pydocket 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydocket might be problematic. Click here for more details.

Files changed (51) hide show
  1. pydocket-0.1.1/.cursorrules → pydocket-0.1.3/.cursor/rules/general.mdc +7 -17
  2. pydocket-0.1.3/.cursor/rules/python-style.mdc +22 -0
  3. {pydocket-0.1.1 → pydocket-0.1.3}/.github/workflows/ci.yml +7 -3
  4. {pydocket-0.1.1 → pydocket-0.1.3}/PKG-INFO +1 -1
  5. {pydocket-0.1.1 → pydocket-0.1.3}/pyproject.toml +1 -0
  6. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/cli.py +9 -0
  7. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/docket.py +18 -8
  8. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/worker.py +27 -22
  9. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_snapshot.py +0 -39
  10. {pydocket-0.1.1 → pydocket-0.1.3}/tests/conftest.py +38 -9
  11. {pydocket-0.1.1 → pydocket-0.1.3}/.github/codecov.yml +0 -0
  12. {pydocket-0.1.1 → pydocket-0.1.3}/.github/workflows/chaos.yml +0 -0
  13. {pydocket-0.1.1 → pydocket-0.1.3}/.github/workflows/publish.yml +0 -0
  14. {pydocket-0.1.1 → pydocket-0.1.3}/.gitignore +0 -0
  15. {pydocket-0.1.1 → pydocket-0.1.3}/.pre-commit-config.yaml +0 -0
  16. {pydocket-0.1.1 → pydocket-0.1.3}/LICENSE +0 -0
  17. {pydocket-0.1.1 → pydocket-0.1.3}/README.md +0 -0
  18. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/README.md +0 -0
  19. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/__init__.py +0 -0
  20. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/driver.py +0 -0
  21. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/producer.py +0 -0
  22. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/run +0 -0
  23. {pydocket-0.1.1 → pydocket-0.1.3}/chaos/tasks.py +0 -0
  24. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/__init__.py +0 -0
  25. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/__main__.py +0 -0
  26. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/annotations.py +0 -0
  27. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/dependencies.py +0 -0
  28. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/execution.py +0 -0
  29. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/instrumentation.py +0 -0
  30. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/py.typed +0 -0
  31. {pydocket-0.1.1 → pydocket-0.1.3}/src/docket/tasks.py +0 -0
  32. {pydocket-0.1.1 → pydocket-0.1.3}/telemetry/.gitignore +0 -0
  33. {pydocket-0.1.1 → pydocket-0.1.3}/telemetry/start +0 -0
  34. {pydocket-0.1.1 → pydocket-0.1.3}/telemetry/stop +0 -0
  35. {pydocket-0.1.1 → pydocket-0.1.3}/tests/__init__.py +0 -0
  36. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/__init__.py +0 -0
  37. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/conftest.py +0 -0
  38. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_module.py +0 -0
  39. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_parsing.py +0 -0
  40. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_striking.py +0 -0
  41. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_tasks.py +0 -0
  42. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_version.py +0 -0
  43. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_worker.py +0 -0
  44. {pydocket-0.1.1 → pydocket-0.1.3}/tests/cli/test_workers.py +0 -0
  45. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_dependencies.py +0 -0
  46. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_docket.py +0 -0
  47. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_fundamentals.py +0 -0
  48. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_instrumentation.py +0 -0
  49. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_striking.py +0 -0
  50. {pydocket-0.1.1 → pydocket-0.1.3}/tests/test_worker.py +0 -0
  51. {pydocket-0.1.1 → pydocket-0.1.3}/uv.lock +0 -0
@@ -1,3 +1,10 @@
1
+ ---
2
+ description:
3
+ globs:
4
+ alwaysApply: true
5
+ ---
6
+
7
+ # about docket
1
8
  docket is a distributed background task system for Python functions with a focus
2
9
  on the scheduling of future work as seamlessly and efficiency as immediate work.
3
10
 
@@ -22,20 +29,3 @@ idempotency of an execution.
22
29
 
23
30
  A docket worker should be as easily usable in code as it is from the command line,
24
31
  and should be a breeze to use with test suites.
25
-
26
- # Code style
27
-
28
- When generating production code, always use full parameter and return type hints
29
- for every function. Never generate useless inline comments that just reiterate
30
- what the code is doing. It's okay to include comments in the rare case there is
31
- something tricky going on.
32
-
33
- When generating tests, always use parameter type hints, but never include the
34
- `-> None` return type hint for a test function. For `pytest` fixtures, always
35
- generate both the parameter and return type hints.
36
-
37
- When generating tests, favor smaller, focused tests that use fixtures for reuse.
38
- Don't include extraneous comments in the test code unless something needs more
39
- clarity. Always generate a docstring using "should" language to describe the
40
- aspect of the system the test is checking. Use simple direct language and avoid
41
- sounding stuffy, but make these complete sentences.
@@ -0,0 +1,22 @@
1
+ ---
2
+ description: how to write python
3
+ globs: *.py
4
+ alwaysApply: false
5
+ ---
6
+
7
+ # Code style
8
+
9
+ When generating production code, always use full parameter and return type hints
10
+ for every function. Never generate useless inline comments that just reiterate
11
+ what the code is doing. It's okay to include comments in the rare case there is
12
+ something tricky going on.
13
+
14
+ When generating tests, always use parameter type hints, but never include the
15
+ `-> None` return type hint for a test function. For `pytest` fixtures, always
16
+ generate both the parameter and return type hints.
17
+
18
+ When generating tests, favor smaller, focused tests that use fixtures for reuse.
19
+ Don't include extraneous comments in the test code unless something needs more
20
+ clarity. Always generate a docstring using "should" language to describe the
21
+ aspect of the system the test is checking. Use simple direct language and avoid
22
+ sounding stuffy, but make these complete sentences.
@@ -9,12 +9,14 @@ on:
9
9
 
10
10
  jobs:
11
11
  test:
12
- name: Test Python ${{ matrix.python-version }}
12
+ name: Test Python ${{ matrix.python-version }}, Redis ${{ matrix.redis-version }}, redis-py ${{ matrix.redis-py-version }}
13
13
  runs-on: ubuntu-latest
14
14
  strategy:
15
+ fail-fast: false
15
16
  matrix:
16
17
  python-version: ["3.12", "3.13"]
17
-
18
+ redis-version: ["6.2", "7.4"]
19
+ redis-py-version: [">=4.6,<5", ">=5"]
18
20
 
19
21
  steps:
20
22
  - uses: actions/checkout@v4
@@ -27,9 +29,11 @@ jobs:
27
29
  cache-dependency-glob: "pyproject.toml"
28
30
 
29
31
  - name: Install dependencies
30
- run: uv sync --dev
32
+ run: uv sync --dev --upgrade-package 'redis${{ matrix.redis-py-version }}'
31
33
 
32
34
  - name: Run tests
35
+ env:
36
+ REDIS_VERSION: ${{ matrix.redis-version }}
33
37
  run: uv run pytest --cov-branch --cov-report=xml --cov-report=term-missing:skip-covered
34
38
 
35
39
  - name: Upload coverage reports to Codecov
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydocket
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: A distributed background task system for Python functions
5
5
  Project-URL: Homepage, https://github.com/chrisguidry/docket
6
6
  Project-URL: Bug Tracker, https://github.com/chrisguidry/docket/issues
@@ -68,6 +68,7 @@ packages = ["src/docket"]
68
68
  addopts = "--cov=src/docket --cov=tests --cov-report=term-missing --cov-branch"
69
69
  asyncio_mode = "auto"
70
70
  asyncio_default_fixture_loop_scope = "session"
71
+ filterwarnings = ["error"]
71
72
 
72
73
  [tool.pyright]
73
74
  include = ["src", "tests"]
@@ -228,6 +228,14 @@ def worker(
228
228
  envvar="DOCKET_WORKER_RECONNECTION_DELAY",
229
229
  ),
230
230
  ] = timedelta(seconds=5),
231
+ minimum_check_interval: Annotated[
232
+ timedelta,
233
+ typer.Option(
234
+ parser=duration,
235
+ help="The minimum interval to check for tasks",
236
+ envvar="DOCKET_WORKER_MINIMUM_CHECK_INTERVAL",
237
+ ),
238
+ ] = timedelta(milliseconds=100),
231
239
  until_finished: Annotated[
232
240
  bool,
233
241
  typer.Option(
@@ -244,6 +252,7 @@ def worker(
244
252
  concurrency=concurrency,
245
253
  redelivery_timeout=redelivery_timeout,
246
254
  reconnection_delay=reconnection_delay,
255
+ minimum_check_interval=minimum_check_interval,
247
256
  until_finished=until_finished,
248
257
  tasks=tasks,
249
258
  )
@@ -147,17 +147,17 @@ class Docket:
147
147
  self._monitor_strikes_task = asyncio.create_task(self._monitor_strikes())
148
148
 
149
149
  # Ensure that the stream and worker group exist
150
- async with self.redis() as r:
151
- try:
150
+ try:
151
+ async with self.redis() as r:
152
152
  await r.xgroup_create(
153
153
  groupname=self.worker_group_name,
154
154
  name=self.stream_key,
155
155
  id="0-0",
156
156
  mkstream=True,
157
157
  )
158
- except redis.exceptions.RedisError as e:
159
- if "BUSYGROUP" not in repr(e):
160
- raise
158
+ except redis.exceptions.RedisError as e:
159
+ if "BUSYGROUP" not in repr(e):
160
+ raise
161
161
 
162
162
  return self
163
163
 
@@ -178,8 +178,18 @@ class Docket:
178
178
 
179
179
  @asynccontextmanager
180
180
  async def redis(self) -> AsyncGenerator[Redis, None]:
181
- async with Redis.from_url(self.url) as redis: # type: ignore
182
- yield redis
181
+ redis: Redis | None = None
182
+ try:
183
+ redis = await Redis.from_url(
184
+ self.url,
185
+ single_connection_client=True,
186
+ )
187
+ async with redis:
188
+ yield redis
189
+ finally:
190
+ # redis 4.6.0 doesn't automatically disconnect and leaves connections open
191
+ if redis:
192
+ await redis.connection_pool.disconnect()
183
193
 
184
194
  def register(self, function: Callable[..., Awaitable[Any]]) -> None:
185
195
  from .dependencies import validate_dependencies
@@ -513,7 +523,7 @@ class Docket:
513
523
  )
514
524
  running.append(RunningExecution(execution, worker_name, started))
515
525
  else:
516
- future.append(execution)
526
+ future.append(execution) # pragma: no cover
517
527
 
518
528
  for message in queued_messages:
519
529
  function = self.tasks[message[b"function"].decode()]
@@ -67,7 +67,7 @@ class Worker:
67
67
  concurrency: int = 10,
68
68
  redelivery_timeout: timedelta = timedelta(minutes=5),
69
69
  reconnection_delay: timedelta = timedelta(seconds=5),
70
- minimum_check_interval: timedelta = timedelta(milliseconds=10),
70
+ minimum_check_interval: timedelta = timedelta(milliseconds=100),
71
71
  ) -> None:
72
72
  self.docket = docket
73
73
  self.name = name or f"worker:{uuid4()}"
@@ -110,6 +110,7 @@ class Worker:
110
110
  concurrency: int = 10,
111
111
  redelivery_timeout: timedelta = timedelta(minutes=5),
112
112
  reconnection_delay: timedelta = timedelta(seconds=5),
113
+ minimum_check_interval: timedelta = timedelta(milliseconds=100),
113
114
  until_finished: bool = False,
114
115
  tasks: list[str] = ["docket.tasks:standard_tasks"],
115
116
  ) -> None:
@@ -123,6 +124,7 @@ class Worker:
123
124
  concurrency=concurrency,
124
125
  redelivery_timeout=redelivery_timeout,
125
126
  reconnection_delay=reconnection_delay,
127
+ minimum_check_interval=minimum_check_interval,
126
128
  ) as worker:
127
129
  if until_finished:
128
130
  await worker.run_until_finished()
@@ -170,28 +172,31 @@ class Worker:
170
172
  """
171
173
  local total_work = redis.call('ZCARD', KEYS[1])
172
174
  local due_work = 0
173
- local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
174
175
 
175
- for i, key in ipairs(tasks) do
176
- local hash_key = ARGV[2] .. ":" .. key
177
- local task_data = redis.call('HGETALL', hash_key)
178
-
179
- if #task_data > 0 then
180
- local task = {}
181
- for j = 1, #task_data, 2 do
182
- task[task_data[j]] = task_data[j+1]
176
+ if total_work > 0 then
177
+ local tasks = redis.call('ZRANGEBYSCORE', KEYS[1], 0, ARGV[1])
178
+
179
+ for i, key in ipairs(tasks) do
180
+ local hash_key = ARGV[2] .. ":" .. key
181
+ local task_data = redis.call('HGETALL', hash_key)
182
+
183
+ if #task_data > 0 then
184
+ local task = {}
185
+ for j = 1, #task_data, 2 do
186
+ task[task_data[j]] = task_data[j+1]
187
+ end
188
+
189
+ redis.call('XADD', KEYS[2], '*',
190
+ 'key', task['key'],
191
+ 'when', task['when'],
192
+ 'function', task['function'],
193
+ 'args', task['args'],
194
+ 'kwargs', task['kwargs'],
195
+ 'attempt', task['attempt']
196
+ )
197
+ redis.call('DEL', hash_key)
198
+ due_work = due_work + 1
183
199
  end
184
-
185
- redis.call('XADD', KEYS[2], '*',
186
- 'key', task['key'],
187
- 'when', task['when'],
188
- 'function', task['function'],
189
- 'args', task['args'],
190
- 'kwargs', task['kwargs'],
191
- 'attempt', task['attempt']
192
- )
193
- redis.call('DEL', hash_key)
194
- due_work = due_work + 1
195
200
  end
196
201
  end
197
202
 
@@ -262,7 +267,7 @@ class Worker:
262
267
  )
263
268
 
264
269
  redeliveries: RedisMessages
265
- _, redeliveries, _ = await redis.xautoclaim(
270
+ _, redeliveries, *_ = await redis.xautoclaim(
266
271
  name=self.docket.stream_key,
267
272
  groupname=self.docket.worker_group_name,
268
273
  consumername=self.name,
@@ -91,45 +91,6 @@ async def test_snapshot_with_running_tasks(docket: Docket, runner: CliRunner):
91
91
  assert "sleep" in result.output
92
92
  assert "test-worker" in result.output
93
93
 
94
- worker_running.cancel()
95
- await worker_running
96
-
97
-
98
- async def test_snapshot_with_mixed_tasks(docket: Docket, runner: CliRunner):
99
- """Should show both running and scheduled tasks in the snapshot"""
100
- heartbeat = timedelta(milliseconds=20)
101
- docket.heartbeat_interval = heartbeat
102
-
103
- future = datetime.now(timezone.utc) + timedelta(seconds=5)
104
- await docket.add(tasks.trace, when=future)("hi!")
105
- for _ in range(5): # more than the concurrency allows
106
- await docket.add(tasks.sleep)(2)
107
-
108
- async with Worker(docket, name="test-worker", concurrency=2) as worker:
109
- worker_running = asyncio.create_task(worker.run_until_finished())
110
-
111
- await asyncio.sleep(0.1)
112
-
113
- result = await asyncio.get_running_loop().run_in_executor(
114
- None,
115
- runner.invoke,
116
- app,
117
- [
118
- "snapshot",
119
- "--url",
120
- docket.url,
121
- "--docket",
122
- docket.name,
123
- ],
124
- )
125
- assert result.exit_code == 0, result.output
126
-
127
- assert "1 workers, 2/6 running" in result.output
128
- assert "sleep" in result.output
129
- assert "test-worker" in result.output
130
- assert "trace" in result.output
131
-
132
- worker_running.cancel()
133
94
  await worker_running
134
95
 
135
96
 
@@ -1,7 +1,9 @@
1
+ import os
1
2
  import time
3
+ from contextlib import contextmanager
2
4
  from datetime import datetime, timezone
3
5
  from functools import partial
4
- from typing import AsyncGenerator, Callable, Generator, Iterable, cast
6
+ from typing import Any, AsyncGenerator, Callable, Generator, Iterable, cast
5
7
  from unittest.mock import AsyncMock
6
8
  from uuid import uuid4
7
9
 
@@ -9,10 +11,12 @@ import pytest
9
11
  import redis.exceptions
10
12
  from docker import DockerClient
11
13
  from docker.models.containers import Container
12
- from redis import Redis
14
+ from redis import ConnectionPool, Redis
13
15
 
14
16
  from docket import Docket, Worker
15
17
 
18
+ REDIS_VERSION = os.environ.get("REDIS_VERSION", "7.4")
19
+
16
20
 
17
21
  @pytest.fixture
18
22
  def now() -> Callable[[], datetime]:
@@ -24,6 +28,19 @@ def redis_port(unused_tcp_port_factory: Callable[[], int]) -> int:
24
28
  return unused_tcp_port_factory()
25
29
 
26
30
 
31
+ @contextmanager
32
+ def _sync_redis(url: str) -> Generator[Redis, None, None]:
33
+ pool: ConnectionPool | None = None
34
+ redis = Redis.from_url(url, single_connection_client=True) # type: ignore
35
+ try:
36
+ with redis:
37
+ pool = redis.connection_pool # type: ignore
38
+ yield redis
39
+ finally:
40
+ if pool: # pragma: no branch
41
+ pool.disconnect()
42
+
43
+
27
44
  @pytest.fixture(scope="session")
28
45
  def redis_server(redis_port: int) -> Generator[Container, None, None]:
29
46
  client = DockerClient.from_env()
@@ -39,19 +56,22 @@ def redis_server(redis_port: int) -> Generator[Container, None, None]:
39
56
  container.remove(force=True)
40
57
 
41
58
  container = client.containers.run(
42
- "redis:7.4.2",
59
+ f"redis:{REDIS_VERSION}",
43
60
  detach=True,
44
61
  ports={"6379/tcp": redis_port},
45
62
  labels={"source": "docket-unit-tests"},
46
63
  auto_remove=True,
47
64
  )
48
65
 
66
+ url = f"redis://localhost:{redis_port}/0"
67
+
49
68
  while True:
50
69
  try:
51
- with Redis.from_url(f"redis://localhost:{redis_port}/0") as r: # type: ignore
52
- if r.ping(): # type: ignore
70
+ with _sync_redis(url) as r:
71
+ success = r.ping() # type: ignore
72
+ if success: # pragma: no branch
53
73
  break
54
- except redis.exceptions.ConnectionError:
74
+ except redis.exceptions.ConnectionError: # pragma: no cover
55
75
  pass
56
76
 
57
77
  time.sleep(0.1)
@@ -59,15 +79,24 @@ def redis_server(redis_port: int) -> Generator[Container, None, None]:
59
79
  try:
60
80
  yield container
61
81
  finally:
82
+ with _sync_redis(url) as r:
83
+ info: dict[str, Any] = r.info() # type: ignore
84
+
62
85
  container.stop()
63
86
 
87
+ # By the time the test suite finishes, there should have been no more open
88
+ # Redis connections (just the one that we used to ask about client connections).
89
+ assert info["connected_clients"] == 1, (
90
+ f"Expected 1 connected clients, but found {info['connected_clients']}"
91
+ )
92
+
64
93
 
65
94
  @pytest.fixture
66
95
  def redis_url(redis_server: Container, redis_port: int) -> str:
67
- with Redis.from_url(f"redis://localhost:{redis_port}/0") as r: # type: ignore
96
+ url = f"redis://localhost:{redis_port}/0"
97
+ with _sync_redis(url) as r:
68
98
  r.flushdb() # type: ignore
69
-
70
- return f"redis://localhost:{redis_port}/0"
99
+ return url
71
100
 
72
101
 
73
102
  @pytest.fixture
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes