pydocket 0.2.0__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydocket might be problematic. Click here for more details.
- {pydocket-0.2.0 → pydocket-0.2.1}/PKG-INFO +1 -1
- {pydocket-0.2.0 → pydocket-0.2.1}/pyproject.toml +8 -1
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/worker.py +43 -4
- pydocket-0.2.1/tests/conftest.py +167 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_fundamentals.py +36 -0
- pydocket-0.2.0/tests/conftest.py +0 -117
- {pydocket-0.2.0 → pydocket-0.2.1}/.cursor/rules/general.mdc +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.cursor/rules/python-style.mdc +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.github/codecov.yml +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.github/workflows/chaos.yml +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.github/workflows/ci.yml +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.github/workflows/publish.yml +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.gitignore +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/.pre-commit-config.yaml +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/LICENSE +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/README.md +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/README.md +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/__init__.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/driver.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/producer.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/run +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/chaos/tasks.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/__init__.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/__main__.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/annotations.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/cli.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/dependencies.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/docket.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/execution.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/instrumentation.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/py.typed +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/src/docket/tasks.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/telemetry/.gitignore +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/telemetry/start +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/telemetry/stop +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/__init__.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/__init__.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/conftest.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_module.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_parsing.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_snapshot.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_striking.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_tasks.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_version.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_worker.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/cli/test_workers.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_dependencies.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_docket.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_instrumentation.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_striking.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/tests/test_worker.py +0 -0
- {pydocket-0.2.0 → pydocket-0.2.1}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydocket
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.1
|
|
4
4
|
Summary: A distributed background task system for Python functions
|
|
5
5
|
Project-URL: Homepage, https://github.com/chrisguidry/docket
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
|
|
@@ -65,7 +65,14 @@ source = "vcs"
|
|
|
65
65
|
packages = ["src/docket"]
|
|
66
66
|
|
|
67
67
|
[tool.pytest.ini_options]
|
|
68
|
-
addopts =
|
|
68
|
+
addopts = [
|
|
69
|
+
"--numprocesses=logical",
|
|
70
|
+
"--maxprocesses=4",
|
|
71
|
+
"--cov=src/docket",
|
|
72
|
+
"--cov=tests",
|
|
73
|
+
"--cov-report=term-missing",
|
|
74
|
+
"--cov-branch",
|
|
75
|
+
]
|
|
69
76
|
filterwarnings = ["error"]
|
|
70
77
|
|
|
71
78
|
[tool.pyright]
|
|
@@ -7,6 +7,7 @@ from types import TracebackType
|
|
|
7
7
|
from typing import (
|
|
8
8
|
TYPE_CHECKING,
|
|
9
9
|
Any,
|
|
10
|
+
Callable,
|
|
10
11
|
Mapping,
|
|
11
12
|
Protocol,
|
|
12
13
|
Self,
|
|
@@ -63,6 +64,11 @@ class _stream_due_tasks(Protocol):
|
|
|
63
64
|
class Worker:
|
|
64
65
|
docket: Docket
|
|
65
66
|
name: str
|
|
67
|
+
concurrency: int
|
|
68
|
+
redelivery_timeout: timedelta
|
|
69
|
+
reconnection_delay: timedelta
|
|
70
|
+
minimum_check_interval: timedelta
|
|
71
|
+
_strike_conditions: list[Callable[[Execution], bool]] = []
|
|
66
72
|
|
|
67
73
|
def __init__(
|
|
68
74
|
self,
|
|
@@ -80,6 +86,10 @@ class Worker:
|
|
|
80
86
|
self.reconnection_delay = reconnection_delay
|
|
81
87
|
self.minimum_check_interval = minimum_check_interval
|
|
82
88
|
|
|
89
|
+
self._strike_conditions = [
|
|
90
|
+
docket.strike_list.is_stricken,
|
|
91
|
+
]
|
|
92
|
+
|
|
83
93
|
async def __aenter__(self) -> Self:
|
|
84
94
|
self._heartbeat_task = asyncio.create_task(self._heartbeat())
|
|
85
95
|
|
|
@@ -151,6 +161,35 @@ class Worker:
|
|
|
151
161
|
"""Run the worker indefinitely."""
|
|
152
162
|
return await self._run(forever=True) # pragma: no cover
|
|
153
163
|
|
|
164
|
+
async def run_at_most(self, iterations_by_key: Mapping[str, int]) -> None:
|
|
165
|
+
"""
|
|
166
|
+
Run the worker until there are no more tasks to process, but limit specified
|
|
167
|
+
task keys to a maximum number of iterations.
|
|
168
|
+
|
|
169
|
+
This is particularly useful for testing self-perpetuating tasks that would
|
|
170
|
+
otherwise run indefinitely.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
iterations_by_key: Maps task keys to their maximum allowed executions
|
|
174
|
+
"""
|
|
175
|
+
execution_counts: dict[str, int] = {key: 0 for key in iterations_by_key}
|
|
176
|
+
|
|
177
|
+
def has_reached_max_iterations(execution: Execution) -> bool:
|
|
178
|
+
if execution.key not in iterations_by_key:
|
|
179
|
+
return False
|
|
180
|
+
|
|
181
|
+
if execution_counts[execution.key] >= iterations_by_key[execution.key]:
|
|
182
|
+
return True
|
|
183
|
+
|
|
184
|
+
execution_counts[execution.key] += 1
|
|
185
|
+
return False
|
|
186
|
+
|
|
187
|
+
self._strike_conditions.insert(0, has_reached_max_iterations)
|
|
188
|
+
try:
|
|
189
|
+
await self.run_until_finished()
|
|
190
|
+
finally:
|
|
191
|
+
self._strike_conditions.remove(has_reached_max_iterations)
|
|
192
|
+
|
|
154
193
|
async def _run(self, forever: bool = False) -> None:
|
|
155
194
|
logger.info("Starting worker %r with the following tasks:", self.name)
|
|
156
195
|
for task_name, task in self.docket.tasks.items():
|
|
@@ -322,7 +361,7 @@ class Worker:
|
|
|
322
361
|
await process_completed_tasks()
|
|
323
362
|
|
|
324
363
|
async def _execute(self, message: RedisMessage) -> None:
|
|
325
|
-
log_context:
|
|
364
|
+
log_context: Mapping[str, str | float] = self._log_context()
|
|
326
365
|
|
|
327
366
|
function_name = message[b"function"].decode()
|
|
328
367
|
function = self.docket.tasks.get(function_name)
|
|
@@ -334,13 +373,13 @@ class Worker:
|
|
|
334
373
|
|
|
335
374
|
execution = Execution.from_message(function, message)
|
|
336
375
|
|
|
337
|
-
log_context
|
|
376
|
+
log_context = {**log_context, **execution.specific_labels()}
|
|
338
377
|
counter_labels = {**self.labels(), **execution.general_labels()}
|
|
339
378
|
|
|
340
379
|
arrow = "↬" if execution.attempt > 1 else "↪"
|
|
341
380
|
call = execution.call_repr()
|
|
342
381
|
|
|
343
|
-
if
|
|
382
|
+
if any(condition(execution) for condition in self._strike_conditions):
|
|
344
383
|
arrow = "🗙"
|
|
345
384
|
logger.warning("%s %s", arrow, call, extra=log_context)
|
|
346
385
|
TASKS_STRICKEN.add(1, counter_labels | {"docket.where": "worker"})
|
|
@@ -354,7 +393,7 @@ class Worker:
|
|
|
354
393
|
|
|
355
394
|
start = datetime.now(timezone.utc)
|
|
356
395
|
punctuality = start - execution.when
|
|
357
|
-
log_context
|
|
396
|
+
log_context = {**log_context, "punctuality": punctuality.total_seconds()}
|
|
358
397
|
duration = timedelta(0)
|
|
359
398
|
|
|
360
399
|
TASKS_STARTED.add(1, counter_labels)
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import fcntl
|
|
2
|
+
import os
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
from datetime import datetime, timedelta, timezone
|
|
7
|
+
from functools import partial
|
|
8
|
+
from typing import AsyncGenerator, Callable, Generator, Iterable, cast
|
|
9
|
+
from unittest.mock import AsyncMock
|
|
10
|
+
from uuid import uuid4
|
|
11
|
+
|
|
12
|
+
import pytest
|
|
13
|
+
import redis.exceptions
|
|
14
|
+
from docker import DockerClient
|
|
15
|
+
from docker.models.containers import Container
|
|
16
|
+
from redis import ConnectionPool, Redis
|
|
17
|
+
|
|
18
|
+
from docket import Docket, Worker
|
|
19
|
+
|
|
20
|
+
REDIS_VERSION = os.environ.get("REDIS_VERSION", "7.4")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.fixture
|
|
24
|
+
def now() -> Callable[[], datetime]:
|
|
25
|
+
return partial(datetime.now, timezone.utc)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@contextmanager
|
|
29
|
+
def _sync_redis(url: str) -> Generator[Redis, None, None]:
|
|
30
|
+
pool: ConnectionPool | None = None
|
|
31
|
+
redis = Redis.from_url(url) # type: ignore
|
|
32
|
+
try:
|
|
33
|
+
with redis:
|
|
34
|
+
pool = redis.connection_pool # type: ignore
|
|
35
|
+
yield redis
|
|
36
|
+
finally:
|
|
37
|
+
if pool: # pragma: no branch
|
|
38
|
+
pool.disconnect()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@contextmanager
|
|
42
|
+
def _adminitrative_redis(port: int) -> Generator[Redis, None, None]:
|
|
43
|
+
with _sync_redis(f"redis://localhost:{port}/15") as r:
|
|
44
|
+
yield r
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _wait_for_redis(port: int) -> None:
|
|
48
|
+
while True:
|
|
49
|
+
try:
|
|
50
|
+
with _adminitrative_redis(port) as r:
|
|
51
|
+
success = r.ping() # type: ignore
|
|
52
|
+
if success: # pragma: no branch
|
|
53
|
+
return
|
|
54
|
+
except redis.exceptions.ConnectionError: # pragma: no cover
|
|
55
|
+
time.sleep(0.1)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@pytest.fixture(scope="session")
|
|
59
|
+
def redis_server(testrun_uid: str, worker_id: str) -> Generator[Container, None, None]:
|
|
60
|
+
client = DockerClient.from_env()
|
|
61
|
+
|
|
62
|
+
container: Container | None = None
|
|
63
|
+
lock_file_name = f"/tmp/docket-unit-tests-{testrun_uid}-startup"
|
|
64
|
+
|
|
65
|
+
with open(lock_file_name, "w+") as lock_file:
|
|
66
|
+
fcntl.flock(lock_file, fcntl.LOCK_EX)
|
|
67
|
+
|
|
68
|
+
containers: Iterable[Container] = cast(
|
|
69
|
+
Iterable[Container],
|
|
70
|
+
client.containers.list( # type: ignore
|
|
71
|
+
all=True,
|
|
72
|
+
filters={"label": "source=docket-unit-tests"},
|
|
73
|
+
),
|
|
74
|
+
)
|
|
75
|
+
for c in containers:
|
|
76
|
+
if c.labels.get("testrun_uid") == testrun_uid: # type: ignore
|
|
77
|
+
container = c
|
|
78
|
+
else:
|
|
79
|
+
c.remove(force=True) # pragma: no cover
|
|
80
|
+
|
|
81
|
+
if not container:
|
|
82
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
83
|
+
s.bind(("", 0))
|
|
84
|
+
redis_port = s.getsockname()[1]
|
|
85
|
+
|
|
86
|
+
container = client.containers.run(
|
|
87
|
+
f"redis:{REDIS_VERSION}",
|
|
88
|
+
detach=True,
|
|
89
|
+
ports={"6379/tcp": redis_port},
|
|
90
|
+
labels={
|
|
91
|
+
"source": "docket-unit-tests",
|
|
92
|
+
"testrun_uid": testrun_uid,
|
|
93
|
+
},
|
|
94
|
+
auto_remove=True,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
_wait_for_redis(redis_port)
|
|
98
|
+
else:
|
|
99
|
+
port_bindings = container.attrs["HostConfig"]["PortBindings"]["6379/tcp"]
|
|
100
|
+
redis_port = int(port_bindings[0]["HostPort"])
|
|
101
|
+
|
|
102
|
+
with _adminitrative_redis(redis_port) as r:
|
|
103
|
+
r.sadd(f"docket-unit-tests:{testrun_uid}", worker_id)
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
yield container
|
|
107
|
+
finally:
|
|
108
|
+
with _adminitrative_redis(redis_port) as r:
|
|
109
|
+
with r.pipeline() as pipe: # type: ignore
|
|
110
|
+
pipe.srem(f"docket-unit-tests:{testrun_uid}", worker_id)
|
|
111
|
+
pipe.scard(f"docket-unit-tests:{testrun_uid}")
|
|
112
|
+
count: int
|
|
113
|
+
_, count = pipe.execute() # type: ignore
|
|
114
|
+
|
|
115
|
+
if count == 0:
|
|
116
|
+
container.stop()
|
|
117
|
+
os.remove(lock_file_name)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@pytest.fixture
|
|
121
|
+
def redis_port(redis_server: Container) -> int:
|
|
122
|
+
port_bindings = redis_server.attrs["HostConfig"]["PortBindings"]["6379/tcp"]
|
|
123
|
+
return int(port_bindings[0]["HostPort"])
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@pytest.fixture(scope="session")
|
|
127
|
+
def redis_db(worker_id: str) -> int:
|
|
128
|
+
if not worker_id or "gw" not in worker_id:
|
|
129
|
+
return 0 # pragma: no cover
|
|
130
|
+
else:
|
|
131
|
+
return 0 + int(worker_id.replace("gw", "")) # pragma: no cover
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@pytest.fixture
|
|
135
|
+
def redis_url(redis_port: int, redis_db: int) -> str:
|
|
136
|
+
url = f"redis://localhost:{redis_port}/{redis_db}"
|
|
137
|
+
with _sync_redis(url) as r:
|
|
138
|
+
r.flushdb() # type: ignore
|
|
139
|
+
return url
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@pytest.fixture
|
|
143
|
+
async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
|
|
144
|
+
async with Docket(name=f"test-docket-{uuid4()}", url=redis_url) as docket:
|
|
145
|
+
yield docket
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
@pytest.fixture
|
|
149
|
+
async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
|
|
150
|
+
async with Worker(
|
|
151
|
+
docket, minimum_check_interval=timedelta(milliseconds=10)
|
|
152
|
+
) as worker:
|
|
153
|
+
yield worker
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
@pytest.fixture
|
|
157
|
+
def the_task() -> AsyncMock:
|
|
158
|
+
task = AsyncMock()
|
|
159
|
+
task.__name__ = "the_task"
|
|
160
|
+
return task
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
@pytest.fixture
|
|
164
|
+
def another_task() -> AsyncMock:
|
|
165
|
+
task = AsyncMock()
|
|
166
|
+
task.__name__ = "another_task"
|
|
167
|
+
return task
|
|
@@ -609,6 +609,42 @@ async def test_self_perpetuating_scheduled_tasks(
|
|
|
609
609
|
assert calls["second"] == [21, 22, 23]
|
|
610
610
|
|
|
611
611
|
|
|
612
|
+
async def test_infinitely_self_perpetuating_tasks(
|
|
613
|
+
docket: Docket, worker: Worker, now: Callable[[], datetime]
|
|
614
|
+
):
|
|
615
|
+
"""docket should support testing use cases for infinitely self-perpetuating tasks"""
|
|
616
|
+
|
|
617
|
+
calls: dict[str, list[int]] = {
|
|
618
|
+
"first": [],
|
|
619
|
+
"second": [],
|
|
620
|
+
"unaffected": [],
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
async def the_task(start: int, iteration: int, key: str = TaskKey()):
|
|
624
|
+
calls[key].append(start + iteration)
|
|
625
|
+
soon = now() + timedelta(milliseconds=100)
|
|
626
|
+
await docket.add(the_task, key=key, when=soon)(start, iteration + 1)
|
|
627
|
+
|
|
628
|
+
async def unaffected_task(start: int, iteration: int, key: str = TaskKey()):
|
|
629
|
+
calls[key].append(start + iteration)
|
|
630
|
+
if iteration < 3:
|
|
631
|
+
await docket.add(unaffected_task, key=key)(start, iteration + 1)
|
|
632
|
+
|
|
633
|
+
await docket.add(the_task, key="first")(10, 1)
|
|
634
|
+
await docket.add(the_task, key="second")(20, 1)
|
|
635
|
+
await docket.add(unaffected_task, key="unaffected")(30, 1)
|
|
636
|
+
|
|
637
|
+
# Using worker.run_until_finished() would hang here because the task is always
|
|
638
|
+
# queueing up a future run of itself. With worker.run_at_most(),
|
|
639
|
+
# we can specify tasks keys that will only be allowed to run a limited number of
|
|
640
|
+
# times, thus allowing the worker to exist cleanly.
|
|
641
|
+
await worker.run_at_most({"first": 4, "second": 2})
|
|
642
|
+
|
|
643
|
+
assert calls["first"] == [11, 12, 13, 14]
|
|
644
|
+
assert calls["second"] == [21, 22]
|
|
645
|
+
assert calls["unaffected"] == [31, 32, 33]
|
|
646
|
+
|
|
647
|
+
|
|
612
648
|
async def test_striking_entire_tasks(
|
|
613
649
|
docket: Docket, worker: Worker, the_task: AsyncMock, another_task: AsyncMock
|
|
614
650
|
):
|
pydocket-0.2.0/tests/conftest.py
DELETED
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import socket
|
|
3
|
-
import time
|
|
4
|
-
from contextlib import contextmanager
|
|
5
|
-
from datetime import datetime, timezone
|
|
6
|
-
from functools import partial
|
|
7
|
-
from typing import AsyncGenerator, Callable, Generator, Iterable, cast
|
|
8
|
-
from unittest.mock import AsyncMock
|
|
9
|
-
from uuid import uuid4
|
|
10
|
-
|
|
11
|
-
import pytest
|
|
12
|
-
import redis.exceptions
|
|
13
|
-
from docker import DockerClient
|
|
14
|
-
from docker.models.containers import Container
|
|
15
|
-
from redis import ConnectionPool, Redis
|
|
16
|
-
|
|
17
|
-
from docket import Docket, Worker
|
|
18
|
-
|
|
19
|
-
REDIS_VERSION = os.environ.get("REDIS_VERSION", "7.4")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
@pytest.fixture
|
|
23
|
-
def now() -> Callable[[], datetime]:
|
|
24
|
-
return partial(datetime.now, timezone.utc)
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
@pytest.fixture(scope="session")
|
|
28
|
-
def redis_port() -> int:
|
|
29
|
-
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
30
|
-
s.bind(("", 0))
|
|
31
|
-
return s.getsockname()[1]
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
@contextmanager
|
|
35
|
-
def _sync_redis(url: str) -> Generator[Redis, None, None]:
|
|
36
|
-
pool: ConnectionPool | None = None
|
|
37
|
-
redis = Redis.from_url(url) # type: ignore
|
|
38
|
-
try:
|
|
39
|
-
with redis:
|
|
40
|
-
pool = redis.connection_pool # type: ignore
|
|
41
|
-
yield redis
|
|
42
|
-
finally:
|
|
43
|
-
if pool: # pragma: no branch
|
|
44
|
-
pool.disconnect()
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
@pytest.fixture(scope="session")
|
|
48
|
-
def redis_server(redis_port: int) -> Generator[Container, None, None]:
|
|
49
|
-
client = DockerClient.from_env()
|
|
50
|
-
|
|
51
|
-
container: Container
|
|
52
|
-
|
|
53
|
-
# Find and remove any containers from previous test runs
|
|
54
|
-
containers: Iterable[Container] = cast(
|
|
55
|
-
Iterable[Container],
|
|
56
|
-
client.containers.list(all=True, filters={"label": "source=docket-unit-tests"}), # type: ignore
|
|
57
|
-
)
|
|
58
|
-
for container in containers: # pragma: no cover
|
|
59
|
-
container.remove(force=True)
|
|
60
|
-
|
|
61
|
-
container = client.containers.run(
|
|
62
|
-
f"redis:{REDIS_VERSION}",
|
|
63
|
-
detach=True,
|
|
64
|
-
ports={"6379/tcp": redis_port},
|
|
65
|
-
labels={"source": "docket-unit-tests"},
|
|
66
|
-
auto_remove=True,
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
url = f"redis://localhost:{redis_port}/0"
|
|
70
|
-
|
|
71
|
-
while True:
|
|
72
|
-
try:
|
|
73
|
-
with _sync_redis(url) as r:
|
|
74
|
-
success = r.ping() # type: ignore
|
|
75
|
-
if success: # pragma: no branch
|
|
76
|
-
break
|
|
77
|
-
except redis.exceptions.ConnectionError: # pragma: no cover
|
|
78
|
-
time.sleep(0.1)
|
|
79
|
-
|
|
80
|
-
try:
|
|
81
|
-
yield container
|
|
82
|
-
finally:
|
|
83
|
-
container.stop()
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
@pytest.fixture
|
|
87
|
-
def redis_url(redis_server: Container, redis_port: int) -> str:
|
|
88
|
-
url = f"redis://localhost:{redis_port}/0"
|
|
89
|
-
with _sync_redis(url) as r:
|
|
90
|
-
r.flushdb() # type: ignore
|
|
91
|
-
return url
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
@pytest.fixture
|
|
95
|
-
async def docket(redis_url: str, aiolib: str) -> AsyncGenerator[Docket, None]:
|
|
96
|
-
async with Docket(name=f"test-docket-{uuid4()}", url=redis_url) as docket:
|
|
97
|
-
yield docket
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
@pytest.fixture
|
|
101
|
-
async def worker(docket: Docket) -> AsyncGenerator[Worker, None]:
|
|
102
|
-
async with Worker(docket) as worker:
|
|
103
|
-
yield worker
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
@pytest.fixture
|
|
107
|
-
def the_task() -> AsyncMock:
|
|
108
|
-
task = AsyncMock()
|
|
109
|
-
task.__name__ = "the_task"
|
|
110
|
-
return task
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
@pytest.fixture
|
|
114
|
-
def another_task() -> AsyncMock:
|
|
115
|
-
task = AsyncMock()
|
|
116
|
-
task.__name__ = "another_task"
|
|
117
|
-
return task
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|