python-pq 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pq/__init__.py +12 -0
- pq/client.py +352 -0
- pq/config.py +11 -0
- pq/logging.py +27 -0
- pq/models.py +90 -0
- pq/priority.py +16 -0
- pq/registry.py +74 -0
- pq/serialization.py +120 -0
- pq/worker.py +472 -0
- python_pq-0.1.1.dist-info/METADATA +256 -0
- python_pq-0.1.1.dist-info/RECORD +13 -0
- python_pq-0.1.1.dist-info/WHEEL +4 -0
- python_pq-0.1.1.dist-info/entry_points.txt +3 -0
pq/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""PQ - Postgres-backed task queue."""
|
|
2
|
+
|
|
3
|
+
import pq.logging # noqa: F401 - configures loguru on import
|
|
4
|
+
|
|
5
|
+
from pq.client import PQ
|
|
6
|
+
from pq.models import TaskStatus
|
|
7
|
+
from pq.priority import Priority
|
|
8
|
+
from pq.worker import TaskTimeoutError
|
|
9
|
+
|
|
10
|
+
__version__ = "0.1.0"
|
|
11
|
+
|
|
12
|
+
__all__ = ["PQ", "Priority", "TaskStatus", "TaskTimeoutError"]
|
pq/client.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
"""PQ client - main interface for task queue."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable, Set
|
|
4
|
+
from contextlib import contextmanager
|
|
5
|
+
from datetime import UTC, datetime, timedelta
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from croniter import croniter
|
|
9
|
+
from croniter.croniter import CroniterBadCronError
|
|
10
|
+
from sqlalchemy import create_engine, delete, func, select
|
|
11
|
+
from sqlalchemy.dialects.postgresql import insert
|
|
12
|
+
from sqlalchemy.engine import Engine
|
|
13
|
+
from sqlalchemy.orm import sessionmaker
|
|
14
|
+
|
|
15
|
+
from pq.models import Base, Periodic, Task, TaskStatus
|
|
16
|
+
from pq.priority import Priority
|
|
17
|
+
from pq.registry import get_function_path
|
|
18
|
+
from pq.serialization import serialize
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class PQ:
|
|
22
|
+
"""Postgres-backed task queue client."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, database_url: str) -> None:
|
|
25
|
+
"""Initialize PQ with database connection.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
database_url: PostgreSQL connection string.
|
|
29
|
+
"""
|
|
30
|
+
self._engine: Engine = create_engine(database_url)
|
|
31
|
+
self._session_factory = sessionmaker(bind=self._engine)
|
|
32
|
+
|
|
33
|
+
@contextmanager
|
|
34
|
+
def session(self) -> Any:
|
|
35
|
+
"""Get a database session context manager."""
|
|
36
|
+
session = self._session_factory()
|
|
37
|
+
try:
|
|
38
|
+
yield session
|
|
39
|
+
session.commit()
|
|
40
|
+
except Exception:
|
|
41
|
+
session.rollback()
|
|
42
|
+
raise
|
|
43
|
+
finally:
|
|
44
|
+
session.close()
|
|
45
|
+
|
|
46
|
+
def create_tables(self) -> None:
|
|
47
|
+
"""Create all tables (for testing)."""
|
|
48
|
+
Base.metadata.create_all(self._engine)
|
|
49
|
+
|
|
50
|
+
def drop_tables(self) -> None:
|
|
51
|
+
"""Drop all tables (for testing)."""
|
|
52
|
+
Base.metadata.drop_all(self._engine)
|
|
53
|
+
|
|
54
|
+
def clear_all(self) -> None:
|
|
55
|
+
"""Clear all tasks and periodic schedules."""
|
|
56
|
+
with self.session() as session:
|
|
57
|
+
session.execute(delete(Task))
|
|
58
|
+
session.execute(delete(Periodic))
|
|
59
|
+
|
|
60
|
+
def enqueue(
|
|
61
|
+
self,
|
|
62
|
+
task: Callable[..., Any],
|
|
63
|
+
*args: Any,
|
|
64
|
+
run_at: datetime | None = None,
|
|
65
|
+
priority: Priority = Priority.NORMAL,
|
|
66
|
+
**kwargs: Any,
|
|
67
|
+
) -> int:
|
|
68
|
+
"""Enqueue a one-off task.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
task: Callable function to execute.
|
|
72
|
+
*args: Positional arguments to pass to the handler.
|
|
73
|
+
run_at: When to run the task. Defaults to now.
|
|
74
|
+
priority: Task priority. Higher = higher priority. Defaults to NORMAL.
|
|
75
|
+
**kwargs: Keyword arguments to pass to the handler.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Task ID.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
ValueError: If task is a lambda, closure, or cannot be imported.
|
|
82
|
+
"""
|
|
83
|
+
name = get_function_path(task)
|
|
84
|
+
payload = serialize(args, kwargs)
|
|
85
|
+
|
|
86
|
+
if run_at is None:
|
|
87
|
+
run_at = datetime.now(UTC)
|
|
88
|
+
|
|
89
|
+
task_obj = Task(name=name, payload=payload, run_at=run_at, priority=priority)
|
|
90
|
+
|
|
91
|
+
with self.session() as session:
|
|
92
|
+
session.add(task_obj)
|
|
93
|
+
session.flush()
|
|
94
|
+
return task_obj.id
|
|
95
|
+
|
|
96
|
+
def schedule(
|
|
97
|
+
self,
|
|
98
|
+
task: Callable[..., Any],
|
|
99
|
+
*args: Any,
|
|
100
|
+
run_every: timedelta | None = None,
|
|
101
|
+
cron: str | croniter | None = None,
|
|
102
|
+
priority: Priority = Priority.NORMAL,
|
|
103
|
+
**kwargs: Any,
|
|
104
|
+
) -> int:
|
|
105
|
+
"""Schedule a periodic task.
|
|
106
|
+
|
|
107
|
+
If a periodic task with this function already exists, it will be updated.
|
|
108
|
+
Either run_every or cron must be provided, but not both.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
task: Callable function to execute.
|
|
112
|
+
*args: Positional arguments to pass to the handler.
|
|
113
|
+
run_every: Interval between executions (e.g., timedelta(hours=1)).
|
|
114
|
+
cron: Cron expression string (e.g., "0 9 * * 1") or croniter object.
|
|
115
|
+
priority: Task priority. Higher = higher priority. Defaults to NORMAL.
|
|
116
|
+
**kwargs: Keyword arguments to pass to the handler.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Periodic task ID.
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
ValueError: If neither run_every nor cron is provided, or if both are.
|
|
123
|
+
ValueError: If cron expression is invalid.
|
|
124
|
+
ValueError: If task is a lambda, closure, or cannot be imported.
|
|
125
|
+
"""
|
|
126
|
+
if run_every is None and cron is None:
|
|
127
|
+
raise ValueError("Either run_every or cron must be provided")
|
|
128
|
+
if run_every is not None and cron is not None:
|
|
129
|
+
raise ValueError("Only one of run_every or cron can be provided")
|
|
130
|
+
|
|
131
|
+
# Validate and normalize cron expression
|
|
132
|
+
cron_expr: str | None = None
|
|
133
|
+
if cron is not None:
|
|
134
|
+
if isinstance(cron, croniter):
|
|
135
|
+
# Extract expression from croniter object
|
|
136
|
+
cron_expr = " ".join(str(f) for f in cron.expressions)
|
|
137
|
+
else:
|
|
138
|
+
# Validate string expression
|
|
139
|
+
try:
|
|
140
|
+
croniter(cron)
|
|
141
|
+
except (KeyError, ValueError, CroniterBadCronError) as e:
|
|
142
|
+
raise ValueError(f"Invalid cron expression '{cron}': {e}") from e
|
|
143
|
+
cron_expr = cron
|
|
144
|
+
|
|
145
|
+
name = get_function_path(task)
|
|
146
|
+
payload = serialize(args, kwargs)
|
|
147
|
+
|
|
148
|
+
# Calculate next_run based on cron or interval
|
|
149
|
+
now = datetime.now(UTC)
|
|
150
|
+
if cron_expr:
|
|
151
|
+
cron_iter = croniter(cron_expr, now)
|
|
152
|
+
next_run = cron_iter.get_next(datetime)
|
|
153
|
+
else:
|
|
154
|
+
next_run = now
|
|
155
|
+
|
|
156
|
+
with self.session() as session:
|
|
157
|
+
stmt = (
|
|
158
|
+
insert(Periodic)
|
|
159
|
+
.values(
|
|
160
|
+
name=name,
|
|
161
|
+
payload=payload,
|
|
162
|
+
priority=priority,
|
|
163
|
+
run_every=run_every,
|
|
164
|
+
cron=cron_expr,
|
|
165
|
+
next_run=next_run,
|
|
166
|
+
)
|
|
167
|
+
.on_conflict_do_update(
|
|
168
|
+
index_elements=["name"],
|
|
169
|
+
set_={
|
|
170
|
+
"payload": payload,
|
|
171
|
+
"priority": priority,
|
|
172
|
+
"run_every": run_every,
|
|
173
|
+
"cron": cron_expr,
|
|
174
|
+
"next_run": next_run,
|
|
175
|
+
},
|
|
176
|
+
)
|
|
177
|
+
.returning(Periodic.id)
|
|
178
|
+
)
|
|
179
|
+
result = session.execute(stmt)
|
|
180
|
+
return result.scalar_one()
|
|
181
|
+
|
|
182
|
+
def cancel(self, task_id: int) -> bool:
|
|
183
|
+
"""Cancel a one-off task by ID.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
task_id: Task ID.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
True if task was found and deleted, False otherwise.
|
|
190
|
+
"""
|
|
191
|
+
with self.session() as session:
|
|
192
|
+
stmt = delete(Task).where(Task.id == task_id)
|
|
193
|
+
result = session.execute(stmt)
|
|
194
|
+
return result.rowcount > 0
|
|
195
|
+
|
|
196
|
+
def unschedule(self, task: Callable[..., Any]) -> bool:
|
|
197
|
+
"""Remove a periodic task.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
task: The scheduled function to remove.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
True if task was found and deleted, False otherwise.
|
|
204
|
+
"""
|
|
205
|
+
name = get_function_path(task)
|
|
206
|
+
with self.session() as session:
|
|
207
|
+
stmt = delete(Periodic).where(Periodic.name == name)
|
|
208
|
+
result = session.execute(stmt)
|
|
209
|
+
return result.rowcount > 0
|
|
210
|
+
|
|
211
|
+
def pending_count(self) -> int:
|
|
212
|
+
"""Count pending one-off tasks."""
|
|
213
|
+
with self.session() as session:
|
|
214
|
+
result = session.execute(
|
|
215
|
+
select(func.count())
|
|
216
|
+
.select_from(Task)
|
|
217
|
+
.where(Task.status == TaskStatus.PENDING)
|
|
218
|
+
)
|
|
219
|
+
return result.scalar_one()
|
|
220
|
+
|
|
221
|
+
def periodic_count(self) -> int:
|
|
222
|
+
"""Count periodic task schedules."""
|
|
223
|
+
with self.session() as session:
|
|
224
|
+
result = session.execute(select(func.count()).select_from(Periodic))
|
|
225
|
+
return result.scalar_one()
|
|
226
|
+
|
|
227
|
+
def get_task(self, task_id: int) -> Task | None:
|
|
228
|
+
"""Get a task by ID.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
task_id: Task ID.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Task object or None if not found.
|
|
235
|
+
"""
|
|
236
|
+
with self.session() as session:
|
|
237
|
+
return session.get(Task, task_id)
|
|
238
|
+
|
|
239
|
+
def list_failed(self, limit: int = 100) -> list[Task]:
|
|
240
|
+
"""List failed tasks.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
limit: Maximum number of tasks to return.
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
List of failed tasks, most recent first.
|
|
247
|
+
"""
|
|
248
|
+
with self.session() as session:
|
|
249
|
+
stmt = (
|
|
250
|
+
select(Task)
|
|
251
|
+
.where(Task.status == TaskStatus.FAILED)
|
|
252
|
+
.order_by(Task.completed_at.desc())
|
|
253
|
+
.limit(limit)
|
|
254
|
+
)
|
|
255
|
+
return list(session.execute(stmt).scalars().all())
|
|
256
|
+
|
|
257
|
+
def list_completed(self, limit: int = 100) -> list[Task]:
|
|
258
|
+
"""List completed tasks.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
limit: Maximum number of tasks to return.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
List of completed tasks, most recent first.
|
|
265
|
+
"""
|
|
266
|
+
with self.session() as session:
|
|
267
|
+
stmt = (
|
|
268
|
+
select(Task)
|
|
269
|
+
.where(Task.status == TaskStatus.COMPLETED)
|
|
270
|
+
.order_by(Task.completed_at.desc())
|
|
271
|
+
.limit(limit)
|
|
272
|
+
)
|
|
273
|
+
return list(session.execute(stmt).scalars().all())
|
|
274
|
+
|
|
275
|
+
def clear_completed(self, before: datetime | None = None) -> int:
|
|
276
|
+
"""Clear completed tasks.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
before: Only clear tasks completed before this time. If None, clears all.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Number of tasks deleted.
|
|
283
|
+
"""
|
|
284
|
+
with self.session() as session:
|
|
285
|
+
stmt = delete(Task).where(Task.status == TaskStatus.COMPLETED)
|
|
286
|
+
if before is not None:
|
|
287
|
+
stmt = stmt.where(Task.completed_at < before)
|
|
288
|
+
result = session.execute(stmt)
|
|
289
|
+
return result.rowcount
|
|
290
|
+
|
|
291
|
+
def clear_failed(self, before: datetime | None = None) -> int:
|
|
292
|
+
"""Clear failed tasks.
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
before: Only clear tasks failed before this time. If None, clears all.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Number of tasks deleted.
|
|
299
|
+
"""
|
|
300
|
+
with self.session() as session:
|
|
301
|
+
stmt = delete(Task).where(Task.status == TaskStatus.FAILED)
|
|
302
|
+
if before is not None:
|
|
303
|
+
stmt = stmt.where(Task.completed_at < before)
|
|
304
|
+
result = session.execute(stmt)
|
|
305
|
+
return result.rowcount
|
|
306
|
+
|
|
307
|
+
def run_worker(
|
|
308
|
+
self,
|
|
309
|
+
*,
|
|
310
|
+
poll_interval: float = 1.0,
|
|
311
|
+
max_runtime: float = 30 * 60,
|
|
312
|
+
priorities: Set[Priority] | None = None,
|
|
313
|
+
) -> None:
|
|
314
|
+
"""Run the worker loop (blocking).
|
|
315
|
+
|
|
316
|
+
Each task executes in a forked child process for memory isolation.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
poll_interval: Seconds to sleep between polls when idle.
|
|
320
|
+
max_runtime: Maximum execution time per task in seconds. Default: 30 min.
|
|
321
|
+
priorities: If set, only process tasks with these priority levels.
|
|
322
|
+
Use this to dedicate workers to specific priority tiers.
|
|
323
|
+
"""
|
|
324
|
+
from pq.worker import run_worker
|
|
325
|
+
|
|
326
|
+
run_worker(
|
|
327
|
+
self,
|
|
328
|
+
poll_interval=poll_interval,
|
|
329
|
+
max_runtime=max_runtime,
|
|
330
|
+
priorities=priorities,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
def run_worker_once(
|
|
334
|
+
self,
|
|
335
|
+
*,
|
|
336
|
+
max_runtime: float = 30 * 60,
|
|
337
|
+
priorities: Set[Priority] | None = None,
|
|
338
|
+
) -> bool:
|
|
339
|
+
"""Process a single task if available.
|
|
340
|
+
|
|
341
|
+
Each task executes in a forked child process for memory isolation.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
max_runtime: Maximum execution time per task in seconds. Default: 30 min.
|
|
345
|
+
priorities: If set, only process tasks with these priority levels.
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
True if a task was processed, False if queue was empty.
|
|
349
|
+
"""
|
|
350
|
+
from pq.worker import run_worker_once
|
|
351
|
+
|
|
352
|
+
return run_worker_once(self, max_runtime=max_runtime, priorities=priorities)
|
pq/config.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""PQ configuration using pydantic-settings."""
|
|
2
|
+
|
|
3
|
+
from pydantic_settings import BaseSettings
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PQSettings(BaseSettings):
|
|
7
|
+
"""Configuration for PQ task queue."""
|
|
8
|
+
|
|
9
|
+
database_url: str = "postgresql://postgres:postgres@localhost:5433/postgres"
|
|
10
|
+
|
|
11
|
+
model_config = {"env_prefix": "PQ_"}
|
pq/logging.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Centralized loguru configuration for PQ."""
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
from loguru import logger
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def configure_logging() -> None:
|
|
9
|
+
"""Configure loguru with a clean, aligned format.
|
|
10
|
+
|
|
11
|
+
Called automatically when pq is imported.
|
|
12
|
+
"""
|
|
13
|
+
# Remove default handler
|
|
14
|
+
logger.remove()
|
|
15
|
+
|
|
16
|
+
# Add handler with clean format
|
|
17
|
+
# Fixed-width level, simple format without variable-length source info
|
|
18
|
+
logger.add(
|
|
19
|
+
sys.stderr,
|
|
20
|
+
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <level>{message}</level>",
|
|
21
|
+
level="DEBUG",
|
|
22
|
+
colorize=True,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# Auto-configure on import
|
|
27
|
+
configure_logging()
|
pq/models.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""SQLAlchemy 2.0 models for PQ task queue."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
from enum import StrEnum
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from sqlalchemy import (
|
|
8
|
+
BigInteger,
|
|
9
|
+
DateTime,
|
|
10
|
+
Enum,
|
|
11
|
+
Identity,
|
|
12
|
+
Index,
|
|
13
|
+
Integer,
|
|
14
|
+
Interval,
|
|
15
|
+
SmallInteger,
|
|
16
|
+
String,
|
|
17
|
+
Text,
|
|
18
|
+
func,
|
|
19
|
+
)
|
|
20
|
+
from sqlalchemy.dialects.postgresql import JSONB
|
|
21
|
+
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TaskStatus(StrEnum):
|
|
25
|
+
"""Task execution status."""
|
|
26
|
+
|
|
27
|
+
PENDING = "pending"
|
|
28
|
+
RUNNING = "running"
|
|
29
|
+
COMPLETED = "completed"
|
|
30
|
+
FAILED = "failed"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Base(DeclarativeBase):
|
|
34
|
+
"""Base class for all models."""
|
|
35
|
+
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class Task(Base):
|
|
40
|
+
"""One-off task with status tracking."""
|
|
41
|
+
|
|
42
|
+
__tablename__ = "pq_tasks"
|
|
43
|
+
__table_args__ = (
|
|
44
|
+
Index("ix_pq_tasks_status_priority_run_at", "status", "priority", "run_at"),
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
id: Mapped[int] = mapped_column(BigInteger, Identity(), primary_key=True)
|
|
48
|
+
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
|
49
|
+
payload: Mapped[dict[str, Any]] = mapped_column(JSONB, nullable=False, default=dict)
|
|
50
|
+
priority: Mapped[int] = mapped_column(SmallInteger, nullable=False, default=0)
|
|
51
|
+
status: Mapped[TaskStatus] = mapped_column(
|
|
52
|
+
Enum(TaskStatus, name="task_status", create_constraint=True),
|
|
53
|
+
nullable=False,
|
|
54
|
+
default=TaskStatus.PENDING,
|
|
55
|
+
)
|
|
56
|
+
run_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False)
|
|
57
|
+
created_at: Mapped[datetime] = mapped_column(
|
|
58
|
+
DateTime(timezone=True), nullable=False, server_default=func.now()
|
|
59
|
+
)
|
|
60
|
+
started_at: Mapped[datetime | None] = mapped_column(
|
|
61
|
+
DateTime(timezone=True), nullable=True
|
|
62
|
+
)
|
|
63
|
+
completed_at: Mapped[datetime | None] = mapped_column(
|
|
64
|
+
DateTime(timezone=True), nullable=True
|
|
65
|
+
)
|
|
66
|
+
error: Mapped[str | None] = mapped_column(Text, nullable=True)
|
|
67
|
+
attempts: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class Periodic(Base):
|
|
71
|
+
"""Recurring task with interval or cron scheduling."""
|
|
72
|
+
|
|
73
|
+
__tablename__ = "pq_periodic"
|
|
74
|
+
__table_args__ = (
|
|
75
|
+
Index("ix_pq_periodic_priority_next_run", "priority", "next_run"),
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
id: Mapped[int] = mapped_column(BigInteger, Identity(), primary_key=True)
|
|
79
|
+
name: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
|
|
80
|
+
payload: Mapped[dict[str, Any]] = mapped_column(JSONB, nullable=False, default=dict)
|
|
81
|
+
priority: Mapped[int] = mapped_column(SmallInteger, nullable=False, default=0)
|
|
82
|
+
run_every: Mapped[timedelta | None] = mapped_column(Interval, nullable=True)
|
|
83
|
+
cron: Mapped[str | None] = mapped_column(String(100), nullable=True)
|
|
84
|
+
next_run: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False)
|
|
85
|
+
last_run: Mapped[datetime | None] = mapped_column(
|
|
86
|
+
DateTime(timezone=True), nullable=True
|
|
87
|
+
)
|
|
88
|
+
created_at: Mapped[datetime] = mapped_column(
|
|
89
|
+
DateTime(timezone=True), nullable=False, server_default=func.now()
|
|
90
|
+
)
|
pq/priority.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Priority levels for task ordering."""
|
|
2
|
+
|
|
3
|
+
from enum import IntEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Priority(IntEnum):
|
|
7
|
+
"""Task priority levels.
|
|
8
|
+
|
|
9
|
+
Higher values = higher priority (processed first).
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
BATCH = 0
|
|
13
|
+
LOW = 25
|
|
14
|
+
NORMAL = 50
|
|
15
|
+
HIGH = 75
|
|
16
|
+
CRITICAL = 100
|
pq/registry.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Function path resolution for task handlers."""
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from types import FunctionType
|
|
6
|
+
from typing import Any, cast
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_function_path(func: Callable[..., Any]) -> str:
|
|
10
|
+
"""Get the import path for a function as 'module:name'.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
func: A callable function (must be a top-level function, not a lambda).
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Import path in 'module:name' format.
|
|
17
|
+
|
|
18
|
+
Raises:
|
|
19
|
+
ValueError: If the function cannot be serialized (lambda, closure, etc).
|
|
20
|
+
"""
|
|
21
|
+
# Cast to FunctionType for type checker - we verify it has the needed attributes
|
|
22
|
+
fn = cast(FunctionType, func)
|
|
23
|
+
|
|
24
|
+
# Check for lambda
|
|
25
|
+
if fn.__name__ == "<lambda>":
|
|
26
|
+
raise ValueError(
|
|
27
|
+
"Cannot enqueue lambda functions - use a named function instead"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Check for closure (has free variables)
|
|
31
|
+
if hasattr(fn, "__code__") and fn.__code__.co_freevars:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
f"Cannot enqueue closure '{fn.__name__}' - closures capture local variables "
|
|
34
|
+
"and cannot be imported by the worker"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
module = getattr(fn, "__module__", None)
|
|
38
|
+
name = getattr(fn, "__name__", None)
|
|
39
|
+
|
|
40
|
+
if module is None or name is None:
|
|
41
|
+
raise ValueError(f"Cannot determine import path for {func}")
|
|
42
|
+
|
|
43
|
+
return f"{module}:{name}"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def resolve_function_path(path: str) -> Callable[..., Any]:
|
|
47
|
+
"""Import and return a function from its 'module:name' path.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
path: Import path in 'module:name' format.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
The imported callable function.
|
|
54
|
+
|
|
55
|
+
Raises:
|
|
56
|
+
ValueError: If the path is invalid or the function cannot be imported.
|
|
57
|
+
"""
|
|
58
|
+
if ":" not in path:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
f"Invalid function path: {path} (expected 'module:name' format)"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
module_path, func_name = path.rsplit(":", 1)
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
module = importlib.import_module(module_path)
|
|
67
|
+
func = getattr(module, func_name)
|
|
68
|
+
if not callable(func):
|
|
69
|
+
raise ValueError(f"{path} is not callable")
|
|
70
|
+
return func
|
|
71
|
+
except ImportError as e:
|
|
72
|
+
raise ValueError(f"Cannot import module '{module_path}': {e}") from e
|
|
73
|
+
except AttributeError as e:
|
|
74
|
+
raise ValueError(f"Module '{module_path}' has no function '{func_name}'") from e
|