pydocket 0.0.1__tar.gz → 0.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- pydocket-0.0.2/.github/workflows/chaos.yml +30 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/.github/workflows/ci.yml +0 -10
- {pydocket-0.0.1 → pydocket-0.0.2}/PKG-INFO +6 -1
- pydocket-0.0.2/chaos/README.md +6 -0
- pydocket-0.0.2/chaos/driver.py +208 -0
- pydocket-0.0.2/chaos/producer.py +42 -0
- pydocket-0.0.2/chaos/run +8 -0
- pydocket-0.0.2/chaos/tasks.py +25 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/pyproject.toml +16 -1
- {pydocket-0.0.1 → pydocket-0.0.2}/src/docket/__init__.py +15 -1
- pydocket-0.0.2/src/docket/annotations.py +30 -0
- pydocket-0.0.2/src/docket/cli.py +254 -0
- pydocket-0.0.2/src/docket/dependencies.py +155 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/src/docket/docket.py +94 -34
- {pydocket-0.0.1 → pydocket-0.0.2}/src/docket/execution.py +27 -0
- pydocket-0.0.2/src/docket/instrumentation.py +103 -0
- pydocket-0.0.2/src/docket/tasks.py +50 -0
- pydocket-0.0.2/src/docket/worker.py +389 -0
- pydocket-0.0.2/telemetry/.gitignore +1 -0
- pydocket-0.0.2/telemetry/start +9 -0
- pydocket-0.0.2/telemetry/stop +8 -0
- pydocket-0.0.2/tests/cli/__init__.py +0 -0
- pydocket-0.0.2/tests/cli/test_parsing.py +66 -0
- pydocket-0.0.2/tests/cli/test_trace.py +66 -0
- pydocket-0.0.2/tests/cli/test_worker.py +177 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/conftest.py +17 -7
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/test_dependencies.py +3 -3
- pydocket-0.0.2/tests/test_fundamentals.py +607 -0
- pydocket-0.0.2/tests/test_instrumentation.py +456 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/test_worker.py +34 -10
- {pydocket-0.0.1 → pydocket-0.0.2}/uv.lock +309 -1
- pydocket-0.0.1/src/docket/cli.py +0 -23
- pydocket-0.0.1/src/docket/dependencies.py +0 -77
- pydocket-0.0.1/src/docket/worker.py +0 -244
- pydocket-0.0.1/tests/cli/test_worker.py +0 -10
- pydocket-0.0.1/tests/test_fundamentals.py +0 -304
- {pydocket-0.0.1 → pydocket-0.0.2}/.cursorrules +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/.github/workflows/publish.yml +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/.gitignore +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/.pre-commit-config.yaml +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/LICENSE +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/README.md +0 -0
- {pydocket-0.0.1/tests → pydocket-0.0.2/chaos}/__init__.py +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/src/docket/__main__.py +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/src/docket/py.typed +0 -0
- {pydocket-0.0.1/tests/cli → pydocket-0.0.2/tests}/__init__.py +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/cli/conftest.py +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/cli/test_module.py +0 -0
- {pydocket-0.0.1 → pydocket-0.0.2}/tests/cli/test_version.py +0 -0
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
name: Docket Chaos Tests
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
workflow_call:
|
|
9
|
+
|
|
10
|
+
jobs:
|
|
11
|
+
test:
|
|
12
|
+
name: Chaos tests
|
|
13
|
+
runs-on: ubuntu-latest
|
|
14
|
+
timeout-minutes: 2
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- uses: actions/checkout@v4
|
|
18
|
+
|
|
19
|
+
- name: Install uv and set Python version
|
|
20
|
+
uses: astral-sh/setup-uv@v5
|
|
21
|
+
with:
|
|
22
|
+
python-version: 3.12
|
|
23
|
+
enable-cache: true
|
|
24
|
+
cache-dependency-glob: "pyproject.toml"
|
|
25
|
+
|
|
26
|
+
- name: Install dependencies
|
|
27
|
+
run: uv sync --dev
|
|
28
|
+
|
|
29
|
+
- name: Run chaos tests
|
|
30
|
+
run: python -m chaos.driver
|
|
@@ -15,16 +15,6 @@ jobs:
|
|
|
15
15
|
matrix:
|
|
16
16
|
python-version: ["3.12", "3.13"]
|
|
17
17
|
|
|
18
|
-
services:
|
|
19
|
-
redis:
|
|
20
|
-
image: redis
|
|
21
|
-
ports:
|
|
22
|
-
- 6379:6379
|
|
23
|
-
options: >-
|
|
24
|
-
--health-cmd "redis-cli ping"
|
|
25
|
-
--health-interval 10s
|
|
26
|
-
--health-timeout 5s
|
|
27
|
-
--health-retries 5
|
|
28
18
|
|
|
29
19
|
steps:
|
|
30
20
|
- uses: actions/checkout@v4
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.2
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -23,7 +23,12 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
23
23
|
Classifier: Programming Language :: Python :: 3.13
|
|
24
24
|
Requires-Python: >=3.12
|
|
25
25
|
Requires-Dist: cloudpickle>=3.1.1
|
|
26
|
+
Requires-Dist: opentelemetry-api>=1.30.0
|
|
27
|
+
Requires-Dist: opentelemetry-exporter-prometheus>=0.51b0
|
|
28
|
+
Requires-Dist: prometheus-client>=0.21.1
|
|
29
|
+
Requires-Dist: python-json-logger>=3.2.1
|
|
26
30
|
Requires-Dist: redis>=5.2.1
|
|
31
|
+
Requires-Dist: rich>=13.9.4
|
|
27
32
|
Requires-Dist: typer>=0.15.1
|
|
28
33
|
Description-Content-Type: text/markdown
|
|
29
34
|
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
This directory includes a chaos and load testing suite that can verify `docket` under
|
|
2
|
+
more real-world conditions.
|
|
3
|
+
|
|
4
|
+
This test suite should have a mode that can run in under a minute or two for use in CI,
|
|
5
|
+
but longer tests for performance are totally fine. Everything should be self-contained,
|
|
6
|
+
reproducible, and verifiable.
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import random
|
|
5
|
+
import sys
|
|
6
|
+
from asyncio import subprocess
|
|
7
|
+
from asyncio.subprocess import Process
|
|
8
|
+
from datetime import timedelta
|
|
9
|
+
from typing import Any, Literal, Sequence
|
|
10
|
+
from uuid import uuid4
|
|
11
|
+
|
|
12
|
+
import redis.exceptions
|
|
13
|
+
from opentelemetry import trace
|
|
14
|
+
from testcontainers.redis import RedisContainer
|
|
15
|
+
|
|
16
|
+
from docket import Docket
|
|
17
|
+
|
|
18
|
+
from .tasks import toxic
|
|
19
|
+
|
|
20
|
+
logging.getLogger().setLevel(logging.INFO)
|
|
21
|
+
|
|
22
|
+
# Quiets down the testcontainers logger
|
|
23
|
+
testcontainers_logger = logging.getLogger("testcontainers.core.container")
|
|
24
|
+
testcontainers_logger.setLevel(logging.ERROR)
|
|
25
|
+
testcontainers_logger = logging.getLogger("testcontainers.core.waiting_utils")
|
|
26
|
+
testcontainers_logger.setLevel(logging.ERROR)
|
|
27
|
+
|
|
28
|
+
console = logging.StreamHandler(stream=sys.stdout)
|
|
29
|
+
console.setFormatter(
|
|
30
|
+
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
31
|
+
)
|
|
32
|
+
logging.getLogger().addHandler(console)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger("chaos.driver")
|
|
36
|
+
tracer = trace.get_tracer("chaos.driver")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def python_entrypoint() -> list[str]:
|
|
40
|
+
if os.environ.get("OTEL_DISTRO"):
|
|
41
|
+
return ["opentelemetry-instrument", sys.executable]
|
|
42
|
+
return [sys.executable]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
async def main(
|
|
46
|
+
mode: Literal["performance", "chaos", "hard"] = "chaos",
|
|
47
|
+
tasks: int = 2000,
|
|
48
|
+
producers: int = 2,
|
|
49
|
+
workers: int = 4,
|
|
50
|
+
):
|
|
51
|
+
with RedisContainer("redis:7.4.2") as redis_server:
|
|
52
|
+
redis_url = f"redis://{redis_server.get_container_host_ip()}:{redis_server.get_exposed_port(6379)}"
|
|
53
|
+
docket = Docket(
|
|
54
|
+
name=f"test-docket-{uuid4()}",
|
|
55
|
+
url=redis_url,
|
|
56
|
+
)
|
|
57
|
+
environment = {
|
|
58
|
+
**os.environ,
|
|
59
|
+
"DOCKET_NAME": docket.name,
|
|
60
|
+
"DOCKET_URL": redis_url,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if tasks % producers != 0:
|
|
64
|
+
raise ValueError("total_tasks must be divisible by total_producers")
|
|
65
|
+
|
|
66
|
+
tasks_per_producer = tasks // producers
|
|
67
|
+
|
|
68
|
+
logger.info(
|
|
69
|
+
"Spawning %d producers with %d tasks each...", producers, tasks_per_producer
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
async def spawn_producer() -> Process:
|
|
73
|
+
return await asyncio.create_subprocess_exec(
|
|
74
|
+
*python_entrypoint(),
|
|
75
|
+
"-m",
|
|
76
|
+
"chaos.producer",
|
|
77
|
+
str(tasks_per_producer),
|
|
78
|
+
env=environment | {"OTEL_SERVICE_NAME": "chaos-producer"},
|
|
79
|
+
stdout=subprocess.DEVNULL,
|
|
80
|
+
stderr=subprocess.DEVNULL,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
producer_processes: list[Process] = []
|
|
84
|
+
for _ in range(producers):
|
|
85
|
+
producer_processes.append(await spawn_producer())
|
|
86
|
+
|
|
87
|
+
logger.info("Spawning %d workers...", workers)
|
|
88
|
+
|
|
89
|
+
async def spawn_worker() -> Process:
|
|
90
|
+
return await asyncio.create_subprocess_exec(
|
|
91
|
+
*python_entrypoint(),
|
|
92
|
+
"-m",
|
|
93
|
+
"docket",
|
|
94
|
+
"worker",
|
|
95
|
+
"--docket",
|
|
96
|
+
docket.name,
|
|
97
|
+
"--url",
|
|
98
|
+
redis_url,
|
|
99
|
+
"--tasks",
|
|
100
|
+
"chaos.tasks:chaos_tasks",
|
|
101
|
+
env=environment
|
|
102
|
+
| {
|
|
103
|
+
"OTEL_SERVICE_NAME": "chaos-worker",
|
|
104
|
+
"DOCKET_WORKER_REDELIVERY_TIMEOUT": "5s",
|
|
105
|
+
},
|
|
106
|
+
stdout=subprocess.DEVNULL,
|
|
107
|
+
stderr=subprocess.DEVNULL,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
worker_processes: list[Process] = []
|
|
111
|
+
for _ in range(workers):
|
|
112
|
+
worker_processes.append(await spawn_worker())
|
|
113
|
+
|
|
114
|
+
while True:
|
|
115
|
+
try:
|
|
116
|
+
async with docket.redis() as r:
|
|
117
|
+
info: dict[str, Any] = await r.info()
|
|
118
|
+
connected_clients = int(info.get("connected_clients", 0))
|
|
119
|
+
|
|
120
|
+
sent_tasks = await r.zcard("hello:sent")
|
|
121
|
+
received_tasks = await r.zcard("hello:received")
|
|
122
|
+
|
|
123
|
+
logger.info(
|
|
124
|
+
"sent: %d, received: %d, clients: %d",
|
|
125
|
+
sent_tasks,
|
|
126
|
+
received_tasks,
|
|
127
|
+
connected_clients,
|
|
128
|
+
)
|
|
129
|
+
if sent_tasks >= tasks:
|
|
130
|
+
break
|
|
131
|
+
except redis.exceptions.ConnectionError as e:
|
|
132
|
+
logger.error(
|
|
133
|
+
"driver: Redis connection error (%s), retrying in 5s...", e
|
|
134
|
+
)
|
|
135
|
+
await asyncio.sleep(5)
|
|
136
|
+
|
|
137
|
+
# Now apply some chaos to the system:
|
|
138
|
+
|
|
139
|
+
if mode in ("chaos", "hard"):
|
|
140
|
+
chaos_chance = random.random()
|
|
141
|
+
if chaos_chance < 0.01 and mode == "hard":
|
|
142
|
+
logger.warning("CHAOS: Killing redis server...")
|
|
143
|
+
redis_server.stop()
|
|
144
|
+
|
|
145
|
+
await asyncio.sleep(5)
|
|
146
|
+
|
|
147
|
+
logger.warning("CHAOS: Starting redis server...")
|
|
148
|
+
while True:
|
|
149
|
+
try:
|
|
150
|
+
redis_server.start()
|
|
151
|
+
break
|
|
152
|
+
except Exception:
|
|
153
|
+
logger.warning(" Redis server failed, retrying in 5s...")
|
|
154
|
+
await asyncio.sleep(5)
|
|
155
|
+
|
|
156
|
+
elif chaos_chance < 0.10:
|
|
157
|
+
worker_index = random.randrange(len(worker_processes))
|
|
158
|
+
worker_to_kill = worker_processes.pop(worker_index)
|
|
159
|
+
|
|
160
|
+
logger.warning("CHAOS: Killing worker %d...", worker_index)
|
|
161
|
+
try:
|
|
162
|
+
worker_to_kill.terminate()
|
|
163
|
+
except ProcessLookupError:
|
|
164
|
+
logger.warning(" What is dead may never die!")
|
|
165
|
+
|
|
166
|
+
logger.warning("CHAOS: Replacing worker %d...", worker_index)
|
|
167
|
+
worker_processes.append(await spawn_worker())
|
|
168
|
+
elif chaos_chance < 0.15:
|
|
169
|
+
logger.warning("CHAOS: Queuing a toxic task...")
|
|
170
|
+
try:
|
|
171
|
+
async with docket:
|
|
172
|
+
await docket.add(toxic)()
|
|
173
|
+
except redis.exceptions.ConnectionError:
|
|
174
|
+
pass
|
|
175
|
+
|
|
176
|
+
await asyncio.sleep(0.25)
|
|
177
|
+
|
|
178
|
+
async with docket.redis() as r:
|
|
179
|
+
first_entries: Sequence[tuple[bytes, float]] = await r.zrange(
|
|
180
|
+
"hello:received", 0, 0, withscores=True
|
|
181
|
+
)
|
|
182
|
+
last_entries: Sequence[tuple[bytes, float]] = await r.zrange(
|
|
183
|
+
"hello:received", -1, -1, withscores=True
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
_, min_score = first_entries[0]
|
|
187
|
+
_, max_score = last_entries[0]
|
|
188
|
+
total_time = timedelta(seconds=max_score - min_score)
|
|
189
|
+
|
|
190
|
+
logger.info(
|
|
191
|
+
"Processed %d tasks in %s, averaging %.2f/s",
|
|
192
|
+
tasks,
|
|
193
|
+
total_time,
|
|
194
|
+
tasks / total_time.total_seconds(),
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
for process in producer_processes + worker_processes:
|
|
198
|
+
try:
|
|
199
|
+
process.kill()
|
|
200
|
+
except ProcessLookupError:
|
|
201
|
+
continue
|
|
202
|
+
await process.wait()
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
if __name__ == "__main__":
|
|
206
|
+
mode = sys.argv[1] if len(sys.argv) > 1 else "chaos"
|
|
207
|
+
assert mode in ("performance", "chaos", "hard")
|
|
208
|
+
asyncio.run(main(mode=mode))
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import redis.exceptions
|
|
8
|
+
|
|
9
|
+
from docket import Docket
|
|
10
|
+
|
|
11
|
+
from .tasks import hello
|
|
12
|
+
|
|
13
|
+
logging.getLogger().setLevel(logging.INFO)
|
|
14
|
+
logger = logging.getLogger("chaos.producer")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def main(tasks_to_produce: int):
|
|
18
|
+
docket = Docket(
|
|
19
|
+
name=os.environ["DOCKET_NAME"],
|
|
20
|
+
url=os.environ["DOCKET_URL"],
|
|
21
|
+
)
|
|
22
|
+
tasks_sent = 0
|
|
23
|
+
while tasks_sent < tasks_to_produce:
|
|
24
|
+
try:
|
|
25
|
+
async with docket:
|
|
26
|
+
async with docket.redis() as r:
|
|
27
|
+
for _ in range(tasks_sent, tasks_to_produce):
|
|
28
|
+
execution = await docket.add(hello)()
|
|
29
|
+
await r.zadd("hello:sent", {execution.key: time.time()})
|
|
30
|
+
logger.info("Added task %s", execution.key)
|
|
31
|
+
tasks_sent += 1
|
|
32
|
+
except redis.exceptions.ConnectionError:
|
|
33
|
+
logger.warning(
|
|
34
|
+
"producer: Redis connection error, retrying in 5s... "
|
|
35
|
+
f"({tasks_sent}/{tasks_to_produce} tasks sent)"
|
|
36
|
+
)
|
|
37
|
+
await asyncio.sleep(5)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
if __name__ == "__main__":
|
|
41
|
+
tasks = int(sys.argv[1])
|
|
42
|
+
asyncio.run(main(tasks))
|
pydocket-0.0.2/chaos/run
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
export OTEL_SERVICE_NAME=chaos-driver
|
|
3
|
+
export OTEL_DISTRO=otlp
|
|
4
|
+
export OTEL_EXPORTER_OTLP_ENDPOINT=0.0.0.0:4317
|
|
5
|
+
export OTEL_EXPORTER_OTLP_INSECURE=true
|
|
6
|
+
export OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
|
|
7
|
+
|
|
8
|
+
opentelemetry-instrument python -m chaos.driver "$@"
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
from docket import CurrentDocket, Docket, Retry, TaskKey
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def hello(
|
|
11
|
+
key: str = TaskKey(),
|
|
12
|
+
docket: Docket = CurrentDocket(),
|
|
13
|
+
retry: Retry = Retry(attempts=sys.maxsize),
|
|
14
|
+
):
|
|
15
|
+
logger.info("Starting task %s", key)
|
|
16
|
+
async with docket.redis() as redis:
|
|
17
|
+
await redis.zadd("hello:received", {key: time.time()})
|
|
18
|
+
logger.info("Finished task %s", key)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def toxic():
|
|
22
|
+
sys.exit(42)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
chaos_tasks = [hello, toxic]
|
|
@@ -18,12 +18,27 @@ classifiers = [
|
|
|
18
18
|
"License :: OSI Approved :: MIT License",
|
|
19
19
|
"Operating System :: OS Independent",
|
|
20
20
|
]
|
|
21
|
-
dependencies = [
|
|
21
|
+
dependencies = [
|
|
22
|
+
"cloudpickle>=3.1.1",
|
|
23
|
+
"opentelemetry-api>=1.30.0",
|
|
24
|
+
"opentelemetry-exporter-prometheus>=0.51b0",
|
|
25
|
+
"prometheus-client>=0.21.1",
|
|
26
|
+
"python-json-logger>=3.2.1",
|
|
27
|
+
"redis>=5.2.1",
|
|
28
|
+
"rich>=13.9.4",
|
|
29
|
+
"typer>=0.15.1",
|
|
30
|
+
]
|
|
22
31
|
|
|
23
32
|
[dependency-groups]
|
|
24
33
|
dev = [
|
|
25
34
|
"codespell>=2.4.1",
|
|
26
35
|
"mypy>=1.14.1",
|
|
36
|
+
"opentelemetry-distro>=0.51b0",
|
|
37
|
+
"opentelemetry-exporter-otlp>=1.30.0",
|
|
38
|
+
"opentelemetry-instrumentation>=0.51b0",
|
|
39
|
+
"opentelemetry-instrumentation-logging>=0.51b0",
|
|
40
|
+
"opentelemetry-instrumentation-redis>=0.51b0",
|
|
41
|
+
"opentelemetry-sdk>=1.30.0",
|
|
27
42
|
"pre-commit>=4.1.0",
|
|
28
43
|
"pyright>=1.1.394",
|
|
29
44
|
"pytest>=8.3.4",
|
|
@@ -8,7 +8,16 @@ from importlib.metadata import version
|
|
|
8
8
|
|
|
9
9
|
__version__ = version("pydocket")
|
|
10
10
|
|
|
11
|
-
from .
|
|
11
|
+
from .annotations import Logged
|
|
12
|
+
from .dependencies import (
|
|
13
|
+
CurrentDocket,
|
|
14
|
+
CurrentExecution,
|
|
15
|
+
CurrentWorker,
|
|
16
|
+
ExponentialRetry,
|
|
17
|
+
Retry,
|
|
18
|
+
TaskKey,
|
|
19
|
+
TaskLogger,
|
|
20
|
+
)
|
|
12
21
|
from .docket import Docket
|
|
13
22
|
from .execution import Execution
|
|
14
23
|
from .worker import Worker
|
|
@@ -19,6 +28,11 @@ __all__ = [
|
|
|
19
28
|
"Execution",
|
|
20
29
|
"CurrentDocket",
|
|
21
30
|
"CurrentWorker",
|
|
31
|
+
"CurrentExecution",
|
|
32
|
+
"TaskKey",
|
|
33
|
+
"TaskLogger",
|
|
22
34
|
"Retry",
|
|
35
|
+
"ExponentialRetry",
|
|
36
|
+
"Logged",
|
|
23
37
|
"__version__",
|
|
24
38
|
]
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
import inspect
|
|
3
|
+
from typing import Any, Iterable, Mapping, Self
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Annotation(abc.ABC):
|
|
7
|
+
@classmethod
|
|
8
|
+
def annotated_parameters(cls, signature: inspect.Signature) -> Mapping[str, Self]:
|
|
9
|
+
annotated: dict[str, Self] = {}
|
|
10
|
+
|
|
11
|
+
for param_name, param in signature.parameters.items():
|
|
12
|
+
if param.annotation == inspect.Parameter.empty:
|
|
13
|
+
continue
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
metadata: Iterable[Any] = param.annotation.__metadata__
|
|
17
|
+
except AttributeError:
|
|
18
|
+
continue
|
|
19
|
+
|
|
20
|
+
for arg_type in metadata:
|
|
21
|
+
if isinstance(arg_type, cls):
|
|
22
|
+
annotated[param_name] = arg_type
|
|
23
|
+
elif isinstance(arg_type, type) and issubclass(arg_type, cls):
|
|
24
|
+
annotated[param_name] = arg_type()
|
|
25
|
+
|
|
26
|
+
return annotated
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Logged(Annotation):
|
|
30
|
+
"""Instructs docket to include arguments to this parameter in the log."""
|