python-pq 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,256 @@
1
+ Metadata-Version: 2.3
2
+ Name: python-pq
3
+ Version: 0.1.1
4
+ Summary: Postgres-backed job queue for Python
5
+ Author: ricwo
6
+ Author-email: ricwo <r@cogram.com>
7
+ Requires-Dist: alembic>=1.17.2
8
+ Requires-Dist: click>=8.3.1
9
+ Requires-Dist: croniter>=6.0.0
10
+ Requires-Dist: dill>=0.4.0
11
+ Requires-Dist: loguru>=0.7.3
12
+ Requires-Dist: psycopg2-binary>=2.9.11
13
+ Requires-Dist: pydantic>=2.12.5
14
+ Requires-Dist: pydantic-settings>=2.12.0
15
+ Requires-Dist: sqlalchemy>=2.0.45
16
+ Requires-Python: >=3.14
17
+ Description-Content-Type: text/markdown
18
+
19
+ # pq
20
+
21
+ Postgres-backed job queue for Python with fork-based worker isolation.
22
+
23
+ ## Features
24
+
25
+ - **Fork isolation** - Each task runs in a forked process. OOM or crashes don't affect the worker.
26
+ - **Natural Python API** - Pass `*args, **kwargs` directly. Pydantic models and custom objects work.
27
+ - **Periodic tasks** - Schedule with intervals or cron expressions.
28
+ - **Priority queues** - Five priority levels, higher priority tasks run first.
29
+ - **Async support** - Async handlers work seamlessly.
30
+ - **Concurrent workers** - Multiple workers with `FOR UPDATE SKIP LOCKED` prevents duplicate processing.
31
+
32
+ ## Installation
33
+
34
+ ```bash
35
+ uv add pq
36
+ ```
37
+
38
+ Requires PostgreSQL and Python 3.14+.
39
+
40
+ ## Quick Start
41
+
42
+ ```python
43
+ from pq import PQ
44
+
45
+ pq = PQ("postgresql://localhost/mydb")
46
+ pq.create_tables()
47
+
48
+ def send_email(to: str, subject: str, body: str) -> None:
49
+ print(f"Sending email to {to}: {subject}")
50
+
51
+ pq.enqueue(send_email, to="user@example.com", subject="Hello", body="...")
52
+ pq.run_worker()
53
+ ```
54
+
55
+ ## Enqueueing Tasks
56
+
57
+ ```python
58
+ def greet(name: str) -> None:
59
+ print(f"Hello, {name}!")
60
+
61
+ pq.enqueue(greet, name="World")
62
+ pq.enqueue(greet, "World") # Positional args work too
63
+
64
+ # Delayed execution
65
+ from datetime import datetime, timedelta, UTC
66
+ pq.enqueue(greet, "World", run_at=datetime.now(UTC) + timedelta(hours=1))
67
+
68
+ # Priority
69
+ from pq import Priority
70
+ pq.enqueue(greet, "World", priority=Priority.CRITICAL) # 100
71
+ pq.enqueue(greet, "World", priority=Priority.HIGH) # 75
72
+ pq.enqueue(greet, "World", priority=Priority.NORMAL) # 50 (default)
73
+ pq.enqueue(greet, "World", priority=Priority.LOW) # 25
74
+ pq.enqueue(greet, "World", priority=Priority.BATCH) # 0
75
+ ```
76
+
77
+ ## Periodic Tasks
78
+
79
+ ```python
80
+ from datetime import timedelta
81
+
82
+ def heartbeat() -> None:
83
+ print("alive")
84
+
85
+ def weekly_report() -> None:
86
+ print("generating report...")
87
+
88
+ # Fixed interval
89
+ pq.schedule(heartbeat, run_every=timedelta(minutes=5))
90
+
91
+ # Cron expression (Monday 9am)
92
+ pq.schedule(weekly_report, cron="0 9 * * 1")
93
+
94
+ # With arguments
95
+ def report(report_type: str) -> None:
96
+ print(f"generating {report_type} report...")
97
+
98
+ pq.schedule(report, run_every=timedelta(hours=1), report_type="hourly")
99
+
100
+ # Remove schedule
101
+ pq.unschedule(heartbeat)
102
+ ```
103
+
104
+ ## Serialization
105
+
106
+ Arguments are serialized automatically:
107
+
108
+ | Type | Method |
109
+ |------|--------|
110
+ | JSON-serializable (str, int, list, dict) | JSON |
111
+ | Pydantic models | `model_dump()` → JSON |
112
+ | Custom objects, functions | dill (pickle) |
113
+
114
+ ```python
115
+ from pydantic import BaseModel
116
+
117
+ class User(BaseModel):
118
+ id: int
119
+ email: str
120
+
121
+ def process(user: dict, transform: callable) -> None:
122
+ print(transform(user))
123
+
124
+ # Pydantic model → dict, function → pickled
125
+ pq.enqueue(process, User(id=1, email="a@b.com"), transform=lambda x: x["id"] * 2)
126
+ ```
127
+
128
+ ## Async Tasks
129
+
130
+ ```python
131
+ import httpx
132
+
133
+ async def fetch(url: str) -> None:
134
+ async with httpx.AsyncClient() as client:
135
+ response = await client.get(url)
136
+ print(response.status_code)
137
+
138
+ pq.enqueue(fetch, "https://example.com")
139
+ ```
140
+
141
+ ## Worker
142
+
143
+ ```python
144
+ # Run forever (poll every second when idle)
145
+ pq.run_worker(poll_interval=1.0)
146
+
147
+ # Process single task
148
+ if pq.run_worker_once():
149
+ print("Processed a task")
150
+
151
+ # Timeout (kill tasks running longer than 5 minutes)
152
+ pq.run_worker(max_runtime=300)
153
+
154
+ # Dedicated worker for specific priorities
155
+ from pq import Priority
156
+ pq.run_worker(priorities={Priority.CRITICAL, Priority.HIGH})
157
+ ```
158
+
159
+ ## Dedicated Priority Workers
160
+
161
+ Run separate workers for different priority tiers to ensure high-priority tasks aren't blocked:
162
+
163
+ ```bash
164
+ # Terminal 1: High-priority worker (CRITICAL + HIGH only)
165
+ python -c "from myapp import pq; from pq import Priority; pq.run_worker(priorities={Priority.CRITICAL, Priority.HIGH})"
166
+
167
+ # Terminal 2-3: General workers (all priorities)
168
+ python -c "from myapp import pq; pq.run_worker()"
169
+ ```
170
+
171
+ This ensures critical tasks get processed immediately even when the queue is busy.
172
+
173
+ ## Task Management
174
+
175
+ ```python
176
+ def my_task() -> None:
177
+ pass
178
+
179
+ # Cancel a pending task
180
+ task_id = pq.enqueue(my_task)
181
+ pq.cancel(task_id)
182
+
183
+ # Counts
184
+ pq.pending_count()
185
+ pq.periodic_count()
186
+
187
+ # List failed/completed
188
+ pq.list_failed(limit=10)
189
+ pq.list_completed(limit=10)
190
+
191
+ # Clear old tasks
192
+ pq.clear_failed(before=datetime.now(UTC) - timedelta(days=7))
193
+ pq.clear_completed(before=datetime.now(UTC) - timedelta(days=1))
194
+ pq.clear_all()
195
+ ```
196
+
197
+ ## Fork Isolation
198
+
199
+ Every task runs in a forked child process:
200
+
201
+ ```
202
+ Worker (parent)
203
+
204
+ ├── fork() → Child executes task → exits
205
+ │ (OOM/crash only affects child)
206
+
207
+ └── Continues processing next task
208
+ ```
209
+
210
+ The parent monitors via `os.wait4()` and detects:
211
+ - **Timeout** - Task exceeded `max_runtime`
212
+ - **OOM** - Killed by SIGKILL (OOM killer)
213
+ - **Signals** - Killed by other signals
214
+
215
+ ## Multiple Workers
216
+
217
+ Run multiple workers for parallel processing:
218
+
219
+ ```bash
220
+ # Terminal 1
221
+ python -c "from myapp import pq; pq.run_worker()"
222
+
223
+ # Terminal 2
224
+ python -c "from myapp import pq; pq.run_worker()"
225
+ ```
226
+
227
+ Tasks are claimed with `FOR UPDATE SKIP LOCKED` - each task runs exactly once.
228
+
229
+ ## Error Handling
230
+
231
+ Failed tasks are marked with status `FAILED`:
232
+
233
+ ```python
234
+ for task in pq.list_failed():
235
+ print(f"{task.name}: {task.error}")
236
+ ```
237
+
238
+ ## Development
239
+
240
+ Start Postgres:
241
+
242
+ ```bash
243
+ make dev
244
+ ```
245
+
246
+ Run tests:
247
+
248
+ ```bash
249
+ uv run pytest
250
+ ```
251
+
252
+ See [CLAUDE.md](CLAUDE.md) for full development instructions.
253
+
254
+ ## License
255
+
256
+ MIT
@@ -0,0 +1,238 @@
1
+ # pq
2
+
3
+ Postgres-backed job queue for Python with fork-based worker isolation.
4
+
5
+ ## Features
6
+
7
+ - **Fork isolation** - Each task runs in a forked process. OOM or crashes don't affect the worker.
8
+ - **Natural Python API** - Pass `*args, **kwargs` directly. Pydantic models and custom objects work.
9
+ - **Periodic tasks** - Schedule with intervals or cron expressions.
10
+ - **Priority queues** - Five priority levels, higher priority tasks run first.
11
+ - **Async support** - Async handlers work seamlessly.
12
+ - **Concurrent workers** - Multiple workers with `FOR UPDATE SKIP LOCKED` prevents duplicate processing.
13
+
14
+ ## Installation
15
+
16
+ ```bash
17
+ uv add pq
18
+ ```
19
+
20
+ Requires PostgreSQL and Python 3.14+.
21
+
22
+ ## Quick Start
23
+
24
+ ```python
25
+ from pq import PQ
26
+
27
+ pq = PQ("postgresql://localhost/mydb")
28
+ pq.create_tables()
29
+
30
+ def send_email(to: str, subject: str, body: str) -> None:
31
+ print(f"Sending email to {to}: {subject}")
32
+
33
+ pq.enqueue(send_email, to="user@example.com", subject="Hello", body="...")
34
+ pq.run_worker()
35
+ ```
36
+
37
+ ## Enqueueing Tasks
38
+
39
+ ```python
40
+ def greet(name: str) -> None:
41
+ print(f"Hello, {name}!")
42
+
43
+ pq.enqueue(greet, name="World")
44
+ pq.enqueue(greet, "World") # Positional args work too
45
+
46
+ # Delayed execution
47
+ from datetime import datetime, timedelta, UTC
48
+ pq.enqueue(greet, "World", run_at=datetime.now(UTC) + timedelta(hours=1))
49
+
50
+ # Priority
51
+ from pq import Priority
52
+ pq.enqueue(greet, "World", priority=Priority.CRITICAL) # 100
53
+ pq.enqueue(greet, "World", priority=Priority.HIGH) # 75
54
+ pq.enqueue(greet, "World", priority=Priority.NORMAL) # 50 (default)
55
+ pq.enqueue(greet, "World", priority=Priority.LOW) # 25
56
+ pq.enqueue(greet, "World", priority=Priority.BATCH) # 0
57
+ ```
58
+
59
+ ## Periodic Tasks
60
+
61
+ ```python
62
+ from datetime import timedelta
63
+
64
+ def heartbeat() -> None:
65
+ print("alive")
66
+
67
+ def weekly_report() -> None:
68
+ print("generating report...")
69
+
70
+ # Fixed interval
71
+ pq.schedule(heartbeat, run_every=timedelta(minutes=5))
72
+
73
+ # Cron expression (Monday 9am)
74
+ pq.schedule(weekly_report, cron="0 9 * * 1")
75
+
76
+ # With arguments
77
+ def report(report_type: str) -> None:
78
+ print(f"generating {report_type} report...")
79
+
80
+ pq.schedule(report, run_every=timedelta(hours=1), report_type="hourly")
81
+
82
+ # Remove schedule
83
+ pq.unschedule(heartbeat)
84
+ ```
85
+
86
+ ## Serialization
87
+
88
+ Arguments are serialized automatically:
89
+
90
+ | Type | Method |
91
+ |------|--------|
92
+ | JSON-serializable (str, int, list, dict) | JSON |
93
+ | Pydantic models | `model_dump()` → JSON |
94
+ | Custom objects, functions | dill (pickle) |
95
+
96
+ ```python
97
+ from pydantic import BaseModel
98
+
99
+ class User(BaseModel):
100
+ id: int
101
+ email: str
102
+
103
+ def process(user: dict, transform: callable) -> None:
104
+ print(transform(user))
105
+
106
+ # Pydantic model → dict, function → pickled
107
+ pq.enqueue(process, User(id=1, email="a@b.com"), transform=lambda x: x["id"] * 2)
108
+ ```
109
+
110
+ ## Async Tasks
111
+
112
+ ```python
113
+ import httpx
114
+
115
+ async def fetch(url: str) -> None:
116
+ async with httpx.AsyncClient() as client:
117
+ response = await client.get(url)
118
+ print(response.status_code)
119
+
120
+ pq.enqueue(fetch, "https://example.com")
121
+ ```
122
+
123
+ ## Worker
124
+
125
+ ```python
126
+ # Run forever (poll every second when idle)
127
+ pq.run_worker(poll_interval=1.0)
128
+
129
+ # Process single task
130
+ if pq.run_worker_once():
131
+ print("Processed a task")
132
+
133
+ # Timeout (kill tasks running longer than 5 minutes)
134
+ pq.run_worker(max_runtime=300)
135
+
136
+ # Dedicated worker for specific priorities
137
+ from pq import Priority
138
+ pq.run_worker(priorities={Priority.CRITICAL, Priority.HIGH})
139
+ ```
140
+
141
+ ## Dedicated Priority Workers
142
+
143
+ Run separate workers for different priority tiers to ensure high-priority tasks aren't blocked:
144
+
145
+ ```bash
146
+ # Terminal 1: High-priority worker (CRITICAL + HIGH only)
147
+ python -c "from myapp import pq; from pq import Priority; pq.run_worker(priorities={Priority.CRITICAL, Priority.HIGH})"
148
+
149
+ # Terminal 2-3: General workers (all priorities)
150
+ python -c "from myapp import pq; pq.run_worker()"
151
+ ```
152
+
153
+ This ensures critical tasks get processed immediately even when the queue is busy.
154
+
155
+ ## Task Management
156
+
157
+ ```python
158
+ def my_task() -> None:
159
+ pass
160
+
161
+ # Cancel a pending task
162
+ task_id = pq.enqueue(my_task)
163
+ pq.cancel(task_id)
164
+
165
+ # Counts
166
+ pq.pending_count()
167
+ pq.periodic_count()
168
+
169
+ # List failed/completed
170
+ pq.list_failed(limit=10)
171
+ pq.list_completed(limit=10)
172
+
173
+ # Clear old tasks
174
+ pq.clear_failed(before=datetime.now(UTC) - timedelta(days=7))
175
+ pq.clear_completed(before=datetime.now(UTC) - timedelta(days=1))
176
+ pq.clear_all()
177
+ ```
178
+
179
+ ## Fork Isolation
180
+
181
+ Every task runs in a forked child process:
182
+
183
+ ```
184
+ Worker (parent)
185
+
186
+ ├── fork() → Child executes task → exits
187
+ │ (OOM/crash only affects child)
188
+
189
+ └── Continues processing next task
190
+ ```
191
+
192
+ The parent monitors via `os.wait4()` and detects:
193
+ - **Timeout** - Task exceeded `max_runtime`
194
+ - **OOM** - Killed by SIGKILL (OOM killer)
195
+ - **Signals** - Killed by other signals
196
+
197
+ ## Multiple Workers
198
+
199
+ Run multiple workers for parallel processing:
200
+
201
+ ```bash
202
+ # Terminal 1
203
+ python -c "from myapp import pq; pq.run_worker()"
204
+
205
+ # Terminal 2
206
+ python -c "from myapp import pq; pq.run_worker()"
207
+ ```
208
+
209
+ Tasks are claimed with `FOR UPDATE SKIP LOCKED` - each task runs exactly once.
210
+
211
+ ## Error Handling
212
+
213
+ Failed tasks are marked with status `FAILED`:
214
+
215
+ ```python
216
+ for task in pq.list_failed():
217
+ print(f"{task.name}: {task.error}")
218
+ ```
219
+
220
+ ## Development
221
+
222
+ Start Postgres:
223
+
224
+ ```bash
225
+ make dev
226
+ ```
227
+
228
+ Run tests:
229
+
230
+ ```bash
231
+ uv run pytest
232
+ ```
233
+
234
+ See [CLAUDE.md](CLAUDE.md) for full development instructions.
235
+
236
+ ## License
237
+
238
+ MIT
@@ -0,0 +1,46 @@
1
+ [project]
2
+ name = "python-pq"
3
+ version = "0.1.1"
4
+ description = "Postgres-backed job queue for Python"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "ricwo", email = "r@cogram.com" }
8
+ ]
9
+ requires-python = ">=3.14"
10
+ dependencies = [
11
+ "alembic>=1.17.2",
12
+ "click>=8.3.1",
13
+ "croniter>=6.0.0",
14
+ "dill>=0.4.0",
15
+ "loguru>=0.7.3",
16
+ "psycopg2-binary>=2.9.11",
17
+ "pydantic>=2.12.5",
18
+ "pydantic-settings>=2.12.0",
19
+ "sqlalchemy>=2.0.45",
20
+ ]
21
+
22
+ [project.scripts]
23
+ pq = "pq.cli:main"
24
+
25
+ [build-system]
26
+ requires = ["uv_build>=0.8.12,<0.9.0"]
27
+ build-backend = "uv_build"
28
+
29
+ [tool.uv.build-backend]
30
+ module-name = "pq"
31
+
32
+ [dependency-groups]
33
+ dev = [
34
+ "pre-commit>=4.5.1",
35
+ "pytest>=9.0.2",
36
+ "ruff>=0.14.10",
37
+ "ty>=0.0.8",
38
+ ]
39
+
40
+ [tool.pytest.ini_options]
41
+ filterwarnings = [
42
+ # Suppress fork+threads warning in tests. This occurs because multiprocessing.Manager
43
+ # creates background threads for proxy objects, and our worker uses os.fork().
44
+ # In production, workers run in separate processes (not threads), so this is safe.
45
+ "ignore:This process .* is multi-threaded, use of fork\\(\\) may lead to deadlocks in the child:DeprecationWarning",
46
+ ]
@@ -0,0 +1,12 @@
1
+ """PQ - Postgres-backed task queue."""
2
+
3
+ import pq.logging # noqa: F401 - configures loguru on import
4
+
5
+ from pq.client import PQ
6
+ from pq.models import TaskStatus
7
+ from pq.priority import Priority
8
+ from pq.worker import TaskTimeoutError
9
+
10
+ __version__ = "0.1.0"
11
+
12
+ __all__ = ["PQ", "Priority", "TaskStatus", "TaskTimeoutError"]