rrq 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq/__init__.py +14 -0
- rrq/cron.py +153 -0
- rrq/settings.py +5 -0
- rrq/worker.py +45 -0
- rrq-0.4.0.dist-info/METADATA +301 -0
- rrq-0.4.0.dist-info/RECORD +16 -0
- rrq-0.3.6.dist-info/METADATA +0 -205
- rrq-0.3.6.dist-info/RECORD +0 -15
- {rrq-0.3.6.dist-info → rrq-0.4.0.dist-info}/WHEEL +0 -0
- {rrq-0.3.6.dist-info → rrq-0.4.0.dist-info}/entry_points.txt +0 -0
- {rrq-0.3.6.dist-info → rrq-0.4.0.dist-info}/licenses/LICENSE +0 -0
rrq/__init__.py
CHANGED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .cron import CronJob, CronSchedule
|
|
2
|
+
from .worker import RRQWorker
|
|
3
|
+
from .client import RRQClient
|
|
4
|
+
from .registry import JobRegistry
|
|
5
|
+
from .settings import RRQSettings
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"CronJob",
|
|
9
|
+
"CronSchedule",
|
|
10
|
+
"RRQWorker",
|
|
11
|
+
"RRQClient",
|
|
12
|
+
"JobRegistry",
|
|
13
|
+
"RRQSettings",
|
|
14
|
+
]
|
rrq/cron.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from datetime import UTC, datetime, timedelta
|
|
4
|
+
from typing import Any, Optional, Sequence
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, Field, PrivateAttr
|
|
7
|
+
|
|
8
|
+
MONTH_NAMES = {
|
|
9
|
+
"jan": 1,
|
|
10
|
+
"feb": 2,
|
|
11
|
+
"mar": 3,
|
|
12
|
+
"apr": 4,
|
|
13
|
+
"may": 5,
|
|
14
|
+
"jun": 6,
|
|
15
|
+
"jul": 7,
|
|
16
|
+
"aug": 8,
|
|
17
|
+
"sep": 9,
|
|
18
|
+
"oct": 10,
|
|
19
|
+
"nov": 11,
|
|
20
|
+
"dec": 12,
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
WEEKDAY_NAMES = {
|
|
24
|
+
"sun": 0,
|
|
25
|
+
"mon": 1,
|
|
26
|
+
"tue": 2,
|
|
27
|
+
"wed": 3,
|
|
28
|
+
"thu": 4,
|
|
29
|
+
"fri": 5,
|
|
30
|
+
"sat": 6,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _parse_value(value: str, names: dict[str, int], min_val: int, max_val: int) -> int:
|
|
35
|
+
if value.lower() in names:
|
|
36
|
+
return names[value.lower()]
|
|
37
|
+
num = int(value)
|
|
38
|
+
if names is WEEKDAY_NAMES and num == 7:
|
|
39
|
+
num = 0
|
|
40
|
+
if not (min_val <= num <= max_val):
|
|
41
|
+
raise ValueError(f"value {num} out of range {min_val}-{max_val}")
|
|
42
|
+
return num
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _parse_field(field: str, *, names: dict[str, int] | None, min_val: int, max_val: int) -> Sequence[int]:
|
|
46
|
+
names = names or {}
|
|
47
|
+
if field == "*":
|
|
48
|
+
return list(range(min_val, max_val + 1))
|
|
49
|
+
values: set[int] = set()
|
|
50
|
+
for part in field.split(','):
|
|
51
|
+
step = 1
|
|
52
|
+
if '/' in part:
|
|
53
|
+
base, step_str = part.split('/', 1)
|
|
54
|
+
step = int(step_str)
|
|
55
|
+
else:
|
|
56
|
+
base = part
|
|
57
|
+
if base == "*":
|
|
58
|
+
start, end = min_val, max_val
|
|
59
|
+
elif '-' in base:
|
|
60
|
+
a, b = base.split('-', 1)
|
|
61
|
+
start = _parse_value(a, names, min_val, max_val)
|
|
62
|
+
end = _parse_value(b, names, min_val, max_val)
|
|
63
|
+
else:
|
|
64
|
+
val = _parse_value(base, names, min_val, max_val)
|
|
65
|
+
start = end = val
|
|
66
|
+
if start > end:
|
|
67
|
+
raise ValueError(f"invalid range {base}")
|
|
68
|
+
for v in range(start, end + 1, step):
|
|
69
|
+
values.add(v)
|
|
70
|
+
return sorted(values)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class CronSchedule:
|
|
74
|
+
"""Represents a cron schedule expression."""
|
|
75
|
+
|
|
76
|
+
def __init__(self, expression: str) -> None:
|
|
77
|
+
fields = expression.split()
|
|
78
|
+
if len(fields) != 5:
|
|
79
|
+
raise ValueError("Cron expression must have 5 fields")
|
|
80
|
+
minute, hour, dom, month, dow = fields
|
|
81
|
+
self.minutes = _parse_field(minute, names=None, min_val=0, max_val=59)
|
|
82
|
+
self.hours = _parse_field(hour, names=None, min_val=0, max_val=23)
|
|
83
|
+
self.dom = _parse_field(dom, names=None, min_val=1, max_val=31)
|
|
84
|
+
self.months = _parse_field(month, names=MONTH_NAMES, min_val=1, max_val=12)
|
|
85
|
+
self.dow = _parse_field(dow, names=WEEKDAY_NAMES, min_val=0, max_val=6)
|
|
86
|
+
self.dom_all = dom == "*"
|
|
87
|
+
self.dow_all = dow == "*"
|
|
88
|
+
|
|
89
|
+
def next_after(self, dt: datetime) -> datetime:
|
|
90
|
+
dt = dt.replace(second=0, microsecond=0) + timedelta(minutes=1)
|
|
91
|
+
while True:
|
|
92
|
+
if dt.month not in self.months:
|
|
93
|
+
dt += timedelta(minutes=1)
|
|
94
|
+
continue
|
|
95
|
+
if dt.hour not in self.hours or dt.minute not in self.minutes:
|
|
96
|
+
dt += timedelta(minutes=1)
|
|
97
|
+
continue
|
|
98
|
+
dom_match = dt.day in self.dom
|
|
99
|
+
# Convert Python weekday (Monday=0) to cron weekday (Sunday=0)
|
|
100
|
+
# Python: Mon=0, Tue=1, Wed=2, Thu=3, Fri=4, Sat=5, Sun=6
|
|
101
|
+
# Cron: Sun=0, Mon=1, Tue=2, Wed=3, Thu=4, Fri=5, Sat=6
|
|
102
|
+
python_weekday = dt.weekday()
|
|
103
|
+
cron_weekday = (python_weekday + 1) % 7
|
|
104
|
+
dow_match = cron_weekday in self.dow
|
|
105
|
+
|
|
106
|
+
if self.dom_all and self.dow_all:
|
|
107
|
+
condition = True
|
|
108
|
+
elif self.dom_all:
|
|
109
|
+
# Only day-of-week constraint
|
|
110
|
+
condition = dow_match
|
|
111
|
+
elif self.dow_all:
|
|
112
|
+
# Only day-of-month constraint
|
|
113
|
+
condition = dom_match
|
|
114
|
+
else:
|
|
115
|
+
# Both constraints specified - use OR logic (standard cron behavior)
|
|
116
|
+
condition = dom_match or dow_match
|
|
117
|
+
if condition:
|
|
118
|
+
return dt
|
|
119
|
+
dt += timedelta(minutes=1)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class CronJob(BaseModel):
|
|
124
|
+
"""Simple cron job specification based on a cron schedule."""
|
|
125
|
+
|
|
126
|
+
function_name: str
|
|
127
|
+
schedule: str = Field(
|
|
128
|
+
description="Cron expression 'm h dom mon dow'. Resolution is one minute."
|
|
129
|
+
)
|
|
130
|
+
args: list[Any] = Field(default_factory=list)
|
|
131
|
+
kwargs: dict[str, Any] = Field(default_factory=dict)
|
|
132
|
+
queue_name: Optional[str] = None
|
|
133
|
+
unique: bool = False
|
|
134
|
+
|
|
135
|
+
# Next run time and parsed schedule are maintained at runtime
|
|
136
|
+
next_run_time: Optional[datetime] = Field(default=None, exclude=True)
|
|
137
|
+
_cron: CronSchedule | None = PrivateAttr(default=None)
|
|
138
|
+
|
|
139
|
+
def model_post_init(self, __context: Any) -> None: # type: ignore[override]
|
|
140
|
+
self._cron = CronSchedule(self.schedule)
|
|
141
|
+
|
|
142
|
+
def schedule_next(self, now: Optional[datetime] = None) -> None:
|
|
143
|
+
"""Compute the next run time strictly after *now*."""
|
|
144
|
+
now = (now or datetime.now(UTC)).replace(second=0, microsecond=0)
|
|
145
|
+
if self._cron is None:
|
|
146
|
+
self._cron = CronSchedule(self.schedule)
|
|
147
|
+
self.next_run_time = self._cron.next_after(now)
|
|
148
|
+
|
|
149
|
+
def due(self, now: Optional[datetime] = None) -> bool:
|
|
150
|
+
now = now or datetime.now(UTC)
|
|
151
|
+
if self.next_run_time is None:
|
|
152
|
+
self.schedule_next(now)
|
|
153
|
+
return now >= (self.next_run_time or now)
|
rrq/settings.py
CHANGED
|
@@ -21,6 +21,7 @@ from .constants import (
|
|
|
21
21
|
DEFAULT_UNIQUE_JOB_LOCK_TTL_SECONDS,
|
|
22
22
|
)
|
|
23
23
|
from .registry import JobRegistry
|
|
24
|
+
from .cron import CronJob
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
class RRQSettings(BaseSettings):
|
|
@@ -97,6 +98,10 @@ class RRQSettings(BaseSettings):
|
|
|
97
98
|
default=None,
|
|
98
99
|
description="Job registry instance, typically provided by the application.",
|
|
99
100
|
)
|
|
101
|
+
cron_jobs: list[CronJob] = Field(
|
|
102
|
+
default_factory=list,
|
|
103
|
+
description="Optional list of cron job specifications to run periodically.",
|
|
104
|
+
)
|
|
100
105
|
model_config = SettingsConfigDict(
|
|
101
106
|
env_prefix="RRQ_",
|
|
102
107
|
extra="ignore",
|
rrq/worker.py
CHANGED
|
@@ -28,6 +28,7 @@ from .job import Job, JobStatus
|
|
|
28
28
|
from .registry import JobRegistry
|
|
29
29
|
from .settings import RRQSettings
|
|
30
30
|
from .store import JobStore
|
|
31
|
+
from .cron import CronJob
|
|
31
32
|
|
|
32
33
|
logger = logging.getLogger(__name__)
|
|
33
34
|
|
|
@@ -77,11 +78,14 @@ class RRQWorker:
|
|
|
77
78
|
# Burst mode: process existing jobs then exit
|
|
78
79
|
self.burst = burst
|
|
79
80
|
|
|
81
|
+
self.cron_jobs: list[CronJob] = list(self.settings.cron_jobs)
|
|
82
|
+
|
|
80
83
|
self._semaphore = asyncio.Semaphore(self.settings.worker_concurrency)
|
|
81
84
|
self._running_tasks: set[asyncio.Task] = set()
|
|
82
85
|
self._shutdown_event = asyncio.Event()
|
|
83
86
|
self._loop = None # Will be set in run()
|
|
84
87
|
self._health_check_task: Optional[asyncio.Task] = None
|
|
88
|
+
self._cron_task: Optional[asyncio.Task] = None
|
|
85
89
|
self.status: str = "initializing" # Worker status (e.g., initializing, running, polling, idle, stopped)
|
|
86
90
|
logger.info(
|
|
87
91
|
f"Initializing RRQWorker {self.worker_id} for queues: {self.queues}"
|
|
@@ -135,6 +139,10 @@ class RRQWorker:
|
|
|
135
139
|
"""
|
|
136
140
|
logger.info(f"Worker {self.worker_id} starting run loop.")
|
|
137
141
|
self._health_check_task = self._loop.create_task(self._heartbeat_loop())
|
|
142
|
+
if self.cron_jobs:
|
|
143
|
+
for cj in self.cron_jobs:
|
|
144
|
+
cj.schedule_next()
|
|
145
|
+
self._cron_task = self._loop.create_task(self._cron_loop())
|
|
138
146
|
|
|
139
147
|
while not self._shutdown_event.is_set():
|
|
140
148
|
try:
|
|
@@ -181,6 +189,10 @@ class RRQWorker:
|
|
|
181
189
|
self._health_check_task.cancel()
|
|
182
190
|
with suppress(asyncio.CancelledError):
|
|
183
191
|
await self._health_check_task
|
|
192
|
+
if self._cron_task:
|
|
193
|
+
self._cron_task.cancel()
|
|
194
|
+
with suppress(asyncio.CancelledError):
|
|
195
|
+
await self._cron_task
|
|
184
196
|
|
|
185
197
|
async def _poll_for_jobs(self, count: int) -> None:
|
|
186
198
|
"""Polls configured queues round-robin and attempts to start processing jobs.
|
|
@@ -781,6 +793,39 @@ class RRQWorker:
|
|
|
781
793
|
|
|
782
794
|
logger.debug(f"Worker {self.worker_id} heartbeat loop finished.")
|
|
783
795
|
|
|
796
|
+
async def _maybe_enqueue_cron_jobs(self) -> None:
|
|
797
|
+
"""Enqueue cron jobs that are due to run."""
|
|
798
|
+
now = datetime.now(UTC)
|
|
799
|
+
for cj in self.cron_jobs:
|
|
800
|
+
if cj.due(now):
|
|
801
|
+
unique_key = f"cron:{cj.function_name}" if cj.unique else None
|
|
802
|
+
try:
|
|
803
|
+
await self.client.enqueue(
|
|
804
|
+
cj.function_name,
|
|
805
|
+
*cj.args,
|
|
806
|
+
_queue_name=cj.queue_name,
|
|
807
|
+
_unique_key=unique_key,
|
|
808
|
+
**cj.kwargs,
|
|
809
|
+
)
|
|
810
|
+
finally:
|
|
811
|
+
cj.schedule_next(now)
|
|
812
|
+
|
|
813
|
+
async def _cron_loop(self) -> None:
|
|
814
|
+
logger.debug(f"Worker {self.worker_id} starting cron loop.")
|
|
815
|
+
while not self._shutdown_event.is_set():
|
|
816
|
+
try:
|
|
817
|
+
await self._maybe_enqueue_cron_jobs()
|
|
818
|
+
except Exception as e:
|
|
819
|
+
logger.error(
|
|
820
|
+
f"Worker {self.worker_id} error running cron jobs: {e}",
|
|
821
|
+
exc_info=True,
|
|
822
|
+
)
|
|
823
|
+
try:
|
|
824
|
+
await asyncio.wait_for(self._shutdown_event.wait(), timeout=30)
|
|
825
|
+
except TimeoutError:
|
|
826
|
+
pass
|
|
827
|
+
logger.debug(f"Worker {self.worker_id} cron loop finished.")
|
|
828
|
+
|
|
784
829
|
async def _close_resources(self) -> None:
|
|
785
830
|
"""Closes the worker's resources, primarily the JobStore connection."""
|
|
786
831
|
logger.info(f"Worker {self.worker_id} closing resources...")
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rrq
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
|
|
5
|
+
Project-URL: Homepage, https://github.com/getresq/rrq
|
|
6
|
+
Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
|
|
7
|
+
Author-email: Mazdak Rezvani <mazdak@me.com>
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
14
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
15
|
+
Classifier: Topic :: System :: Monitoring
|
|
16
|
+
Requires-Python: >=3.11
|
|
17
|
+
Requires-Dist: click>=8.1.3
|
|
18
|
+
Requires-Dist: pydantic-settings>=2.9.1
|
|
19
|
+
Requires-Dist: pydantic>=2.11.4
|
|
20
|
+
Requires-Dist: redis[hiredis]<6,>=4.2.0
|
|
21
|
+
Requires-Dist: watchfiles>=0.19.0
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: pytest-asyncio>=1.0.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
|
|
25
|
+
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# RRQ: Reliable Redis Queue
|
|
29
|
+
|
|
30
|
+
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
31
|
+
|
|
32
|
+
## Key Features
|
|
33
|
+
|
|
34
|
+
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
35
|
+
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
36
|
+
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
37
|
+
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
38
|
+
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
39
|
+
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
40
|
+
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
41
|
+
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
42
|
+
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
43
|
+
* **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
|
|
44
|
+
|
|
45
|
+
- Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
|
|
46
|
+
|
|
47
|
+
- To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
await client.enqueue(
|
|
51
|
+
"process_updates",
|
|
52
|
+
item_id=123,
|
|
53
|
+
_unique_key="update:123",
|
|
54
|
+
_defer_by=10,
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Basic Usage
|
|
59
|
+
|
|
60
|
+
*(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
|
|
61
|
+
|
|
62
|
+
**1. Define Handlers:**
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
# handlers.py
|
|
66
|
+
import asyncio
|
|
67
|
+
from rrq.exc import RetryJob
|
|
68
|
+
|
|
69
|
+
async def my_task(ctx, message: str):
|
|
70
|
+
job_id = ctx['job_id']
|
|
71
|
+
attempt = ctx['job_try']
|
|
72
|
+
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
73
|
+
await asyncio.sleep(1)
|
|
74
|
+
if attempt < 3 and message == "retry_me":
|
|
75
|
+
raise RetryJob("Needs another go!")
|
|
76
|
+
print(f"Finished job {job_id}")
|
|
77
|
+
return {"result": f"Processed: {message}"}
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
**2. Register Handlers:**
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
# main_setup.py (or wherever you initialize)
|
|
84
|
+
from rrq.registry import JobRegistry
|
|
85
|
+
from . import handlers # Assuming handlers.py is in the same directory
|
|
86
|
+
|
|
87
|
+
job_registry = JobRegistry()
|
|
88
|
+
job_registry.register("process_message", handlers.my_task)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
**3. Configure Settings:**
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
# config.py
|
|
95
|
+
from rrq.settings import RRQSettings
|
|
96
|
+
|
|
97
|
+
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
98
|
+
rrq_settings = RRQSettings()
|
|
99
|
+
# Or override directly:
|
|
100
|
+
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
**4. Enqueue Jobs:**
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# enqueue_script.py
|
|
107
|
+
import asyncio
|
|
108
|
+
from rrq.client import RRQClient
|
|
109
|
+
from config import rrq_settings # Import your settings
|
|
110
|
+
|
|
111
|
+
async def enqueue_jobs():
|
|
112
|
+
client = RRQClient(settings=rrq_settings)
|
|
113
|
+
await client.enqueue("process_message", "Hello RRQ!")
|
|
114
|
+
await client.enqueue("process_message", "retry_me")
|
|
115
|
+
await client.close()
|
|
116
|
+
|
|
117
|
+
if __name__ == "__main__":
|
|
118
|
+
asyncio.run(enqueue_jobs())
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**5. Run a Worker:**
|
|
122
|
+
|
|
123
|
+
Note: You don't need to run a worker as the Command Line Interface `rrq` is used for
|
|
124
|
+
this purpose.
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
# worker_script.py
|
|
128
|
+
from rrq.worker import RRQWorker
|
|
129
|
+
from config import rrq_settings # Import your settings
|
|
130
|
+
from main_setup import job_registry # Import your registry
|
|
131
|
+
|
|
132
|
+
# Create worker instance
|
|
133
|
+
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
134
|
+
|
|
135
|
+
# Run the worker (blocking)
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
worker.run()
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
141
|
+
|
|
142
|
+
## Cron Jobs
|
|
143
|
+
|
|
144
|
+
Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
|
|
145
|
+
`schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
|
|
146
|
+
It supports the most common features from Unix cron:
|
|
147
|
+
|
|
148
|
+
- numeric values
|
|
149
|
+
- ranges (e.g. `8-11`)
|
|
150
|
+
- lists separated by commas (e.g. `mon,wed,fri`)
|
|
151
|
+
- step values using `/` (e.g. `*/15`)
|
|
152
|
+
- names for months and days (`jan-dec`, `sun-sat`)
|
|
153
|
+
|
|
154
|
+
Jobs are evaluated in the server's timezone and run with minute resolution.
|
|
155
|
+
|
|
156
|
+
### Cron Schedule Examples
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
# Every minute
|
|
160
|
+
"* * * * *"
|
|
161
|
+
|
|
162
|
+
# Every hour at minute 30
|
|
163
|
+
"30 * * * *"
|
|
164
|
+
|
|
165
|
+
# Every day at 2:30 AM
|
|
166
|
+
"30 2 * * *"
|
|
167
|
+
|
|
168
|
+
# Every Monday at 9:00 AM
|
|
169
|
+
"0 9 * * mon"
|
|
170
|
+
|
|
171
|
+
# Every 15 minutes
|
|
172
|
+
"*/15 * * * *"
|
|
173
|
+
|
|
174
|
+
# Every weekday at 6:00 PM
|
|
175
|
+
"0 18 * * mon-fri"
|
|
176
|
+
|
|
177
|
+
# First day of every month at midnight
|
|
178
|
+
"0 0 1 * *"
|
|
179
|
+
|
|
180
|
+
# Every 2 hours during business hours on weekdays
|
|
181
|
+
"0 9-17/2 * * mon-fri"
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
### Defining Cron Jobs
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
from rrq.settings import RRQSettings
|
|
188
|
+
from rrq.cron import CronJob
|
|
189
|
+
|
|
190
|
+
# Define your cron jobs
|
|
191
|
+
cron_jobs = [
|
|
192
|
+
# Daily cleanup at 2 AM
|
|
193
|
+
CronJob(
|
|
194
|
+
function_name="daily_cleanup",
|
|
195
|
+
schedule="0 2 * * *",
|
|
196
|
+
args=["temp_files"],
|
|
197
|
+
kwargs={"max_age_days": 7}
|
|
198
|
+
),
|
|
199
|
+
|
|
200
|
+
# Weekly report every Monday at 9 AM
|
|
201
|
+
CronJob(
|
|
202
|
+
function_name="generate_weekly_report",
|
|
203
|
+
schedule="0 9 * * mon",
|
|
204
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
205
|
+
),
|
|
206
|
+
|
|
207
|
+
# Health check every 15 minutes on a specific queue
|
|
208
|
+
CronJob(
|
|
209
|
+
function_name="system_health_check",
|
|
210
|
+
schedule="*/15 * * * *",
|
|
211
|
+
queue_name="monitoring"
|
|
212
|
+
),
|
|
213
|
+
|
|
214
|
+
# Backup database every night at 1 AM
|
|
215
|
+
CronJob(
|
|
216
|
+
function_name="backup_database",
|
|
217
|
+
schedule="0 1 * * *",
|
|
218
|
+
kwargs={"backup_type": "incremental"}
|
|
219
|
+
),
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
# Add to your settings
|
|
223
|
+
rrq_settings = RRQSettings(
|
|
224
|
+
redis_dsn="redis://localhost:6379/0",
|
|
225
|
+
cron_jobs=cron_jobs
|
|
226
|
+
)
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Cron Job Handlers
|
|
230
|
+
|
|
231
|
+
Your cron job handlers are regular async functions, just like other job handlers:
|
|
232
|
+
|
|
233
|
+
```python
|
|
234
|
+
async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
|
|
235
|
+
"""Clean up old files."""
|
|
236
|
+
job_id = ctx['job_id']
|
|
237
|
+
print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
|
|
238
|
+
# Your cleanup logic here
|
|
239
|
+
return {"cleaned_files": 42, "status": "completed"}
|
|
240
|
+
|
|
241
|
+
async def generate_weekly_report(ctx):
|
|
242
|
+
"""Generate and send weekly report."""
|
|
243
|
+
job_id = ctx['job_id']
|
|
244
|
+
print(f"Job {job_id}: Generating weekly report")
|
|
245
|
+
# Your report generation logic here
|
|
246
|
+
return {"report_id": "weekly_2024_01", "status": "sent"}
|
|
247
|
+
|
|
248
|
+
# Register your handlers
|
|
249
|
+
from rrq.registry import JobRegistry
|
|
250
|
+
|
|
251
|
+
job_registry = JobRegistry()
|
|
252
|
+
job_registry.register("daily_cleanup", daily_cleanup)
|
|
253
|
+
job_registry.register("generate_weekly_report", generate_weekly_report)
|
|
254
|
+
|
|
255
|
+
# Add the registry to your settings
|
|
256
|
+
rrq_settings.job_registry = job_registry
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
**Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
|
|
260
|
+
|
|
261
|
+
## Command Line Interface
|
|
262
|
+
|
|
263
|
+
RRQ provides a command-line interface (CLI) for managing workers and performing health checks:
|
|
264
|
+
|
|
265
|
+
- **`rrq worker run`** - Run an RRQ worker process.
|
|
266
|
+
- `--settings` (optional): Specify the Python path to your settings object (e.g., `myapp.worker_config.rrq_settings`). If not provided, it will use the `RRQ_SETTINGS` environment variable or default to a basic `RRQSettings` object.
|
|
267
|
+
- `--queue` (optional, multiple): Specify queue(s) to poll. Defaults to the `default_queue_name` in settings.
|
|
268
|
+
- `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit.
|
|
269
|
+
- **`rrq worker watch`** - Run an RRQ worker with auto-restart on file changes.
|
|
270
|
+
- `--path` (optional): Directory path to watch for changes. Defaults to the current directory.
|
|
271
|
+
- `--settings` (optional): Same as above.
|
|
272
|
+
- `--queue` (optional, multiple): Same as above.
|
|
273
|
+
- **`rrq check`** - Perform a health check on active RRQ workers.
|
|
274
|
+
- `--settings` (optional): Same as above.
|
|
275
|
+
- **`rrq dlq requeue`** - Requeue jobs from the dead letter queue back into a live queue.
|
|
276
|
+
- `--settings` (optional): Same as above.
|
|
277
|
+
- `--dlq-name` (optional): Name of the DLQ (without prefix). Defaults to `default_dlq_name` in settings.
|
|
278
|
+
- `--queue` (optional): Target queue name (without prefix). Defaults to `default_queue_name` in settings.
|
|
279
|
+
- `--limit` (optional): Maximum number of DLQ jobs to requeue; all if not set.
|
|
280
|
+
|
|
281
|
+
## Configuration
|
|
282
|
+
|
|
283
|
+
RRQ can be configured in several ways, with the following precedence:
|
|
284
|
+
|
|
285
|
+
1. **Command-Line Argument (`--settings`)**: Directly specify the settings object path via the CLI. This takes the highest precedence.
|
|
286
|
+
2. **Environment Variable (`RRQ_SETTINGS`)**: Set the `RRQ_SETTINGS` environment variable to point to your settings object path. Used if `--settings` is not provided.
|
|
287
|
+
3. **Default Settings**: If neither of the above is provided, RRQ will instantiate a default `RRQSettings` object, which can still be influenced by environment variables starting with `RRQ_`.
|
|
288
|
+
4. **Environment Variables (Prefix `RRQ_`)**: Individual settings can be overridden by environment variables starting with `RRQ_`, which are automatically picked up by the `RRQSettings` object.
|
|
289
|
+
5. **.env File**: If `python-dotenv` is installed, RRQ will attempt to load a `.env` file from the current working directory or parent directories. System environment variables take precedence over `.env` variables.
|
|
290
|
+
|
|
291
|
+
**Important Note on `job_registry`**: The `job_registry` attribute in your `RRQSettings` object is **critical** for RRQ to function. It must be an instance of `JobRegistry` and is used to register job handlers. Without a properly configured `job_registry`, workers will not know how to process jobs, and most operations will fail. Ensure it is set in your settings object to map job names to their respective handler functions.
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
## Core Components
|
|
295
|
+
|
|
296
|
+
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
297
|
+
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
298
|
+
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
299
|
+
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
300
|
+
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
301
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
rrq/__init__.py,sha256=3WYv9UkvnCbjKXrvmqiLm7yuVVQiLclbVCOXq5wb6ZM,290
|
|
2
|
+
rrq/cli.py,sha256=_LbaAH_w2a0VNRR0EctuE4afl-wccvMY2w2VbehFDEQ,16980
|
|
3
|
+
rrq/client.py,sha256=5_bmZ05LKIfY9WFSKU-nYawEupsnrnHT2HewXfC2Ahg,7831
|
|
4
|
+
rrq/constants.py,sha256=F_uZgBI3h00MctnEjBjiCGMrg5jUaz5Bz9I1vkyqNrs,1654
|
|
5
|
+
rrq/cron.py,sha256=9lxJ1OnrTbavJvbIdPp6u5ncYgyD35vRPsSulpVrQko,5244
|
|
6
|
+
rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
|
|
7
|
+
rrq/job.py,sha256=eUbl33QDqDMXPKpo-0dl0Mp29LWWmtbBgRw0sclcwJ4,4011
|
|
8
|
+
rrq/registry.py,sha256=E9W_zx3QiKTBwMOGearaNpDKBDB87JIn0RlMQ3sAcP0,2925
|
|
9
|
+
rrq/settings.py,sha256=AxzSe_rw7-yduKST2c9mPunQWqPE2537XcC_XlMoHWM,4535
|
|
10
|
+
rrq/store.py,sha256=teO0Af8hzBiu7-dFn6_2lz5X90LAZXmtg0VDZuQoAwk,24972
|
|
11
|
+
rrq/worker.py,sha256=KspmZOL6i_dfIypcBi0UpQDpz2NrCj3vEl6CwTNlLKo,42479
|
|
12
|
+
rrq-0.4.0.dist-info/METADATA,sha256=2SFZJlfgwFSpmWfylQ6rSV072HGXlA2MBcECJppV_DY,12914
|
|
13
|
+
rrq-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
+
rrq-0.4.0.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
|
|
15
|
+
rrq-0.4.0.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
|
|
16
|
+
rrq-0.4.0.dist-info/RECORD,,
|
rrq-0.3.6.dist-info/METADATA
DELETED
|
@@ -1,205 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: rrq
|
|
3
|
-
Version: 0.3.6
|
|
4
|
-
Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
|
|
5
|
-
Project-URL: Homepage, https://github.com/getresq/rrq
|
|
6
|
-
Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
|
|
7
|
-
Author-email: Mazdak Rezvani <mazdak@me.com>
|
|
8
|
-
License-File: LICENSE
|
|
9
|
-
Classifier: Intended Audience :: Developers
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
-
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
14
|
-
Classifier: Topic :: System :: Distributed Computing
|
|
15
|
-
Classifier: Topic :: System :: Monitoring
|
|
16
|
-
Requires-Python: >=3.11
|
|
17
|
-
Requires-Dist: click>=8.1.3
|
|
18
|
-
Requires-Dist: pydantic-settings>=2.9.1
|
|
19
|
-
Requires-Dist: pydantic>=2.11.4
|
|
20
|
-
Requires-Dist: redis[hiredis]<6,>=4.2.0
|
|
21
|
-
Requires-Dist: watchfiles>=0.19.0
|
|
22
|
-
Provides-Extra: dev
|
|
23
|
-
Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
|
|
24
|
-
Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
|
|
25
|
-
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
|
26
|
-
Description-Content-Type: text/markdown
|
|
27
|
-
|
|
28
|
-
# RRQ: Reliable Redis Queue
|
|
29
|
-
|
|
30
|
-
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
31
|
-
|
|
32
|
-
## Core Components
|
|
33
|
-
|
|
34
|
-
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
35
|
-
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
36
|
-
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
37
|
-
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
38
|
-
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
39
|
-
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `RETRYING`).
|
|
40
|
-
* **`RRQSettings` (`settings.py`)**: A Pydantic `BaseSettings` model for configuring RRQ behavior (Redis DSN, queue names, timeouts, retry policies, concurrency, etc.). Loadable from environment variables (prefix `RRQ_`).
|
|
41
|
-
* **`constants.py`**: Defines shared constants like Redis key prefixes and default configuration values.
|
|
42
|
-
* **`exc.py`**: Defines custom exceptions, notably `RetryJob` which handlers can raise to explicitly request a retry, potentially with a custom delay.
|
|
43
|
-
|
|
44
|
-
## Key Features
|
|
45
|
-
|
|
46
|
-
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
47
|
-
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
48
|
-
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
49
|
-
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
50
|
-
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
51
|
-
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
52
|
-
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
53
|
-
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
54
|
-
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
55
|
-
*Note: Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.*
|
|
56
|
-
*To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:*
|
|
57
|
-
|
|
58
|
-
```python
|
|
59
|
-
await client.enqueue(
|
|
60
|
-
"process_updates",
|
|
61
|
-
item_id=123,
|
|
62
|
-
_unique_key="update:123",
|
|
63
|
-
_defer_by=10,
|
|
64
|
-
)
|
|
65
|
-
```
|
|
66
|
-
|
|
67
|
-
## Basic Usage
|
|
68
|
-
|
|
69
|
-
*(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
|
|
70
|
-
|
|
71
|
-
**1. Define Handlers:**
|
|
72
|
-
|
|
73
|
-
```python
|
|
74
|
-
# handlers.py
|
|
75
|
-
import asyncio
|
|
76
|
-
from rrq.exc import RetryJob
|
|
77
|
-
|
|
78
|
-
async def my_task(ctx, message: str):
|
|
79
|
-
job_id = ctx['job_id']
|
|
80
|
-
attempt = ctx['job_try']
|
|
81
|
-
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
82
|
-
await asyncio.sleep(1)
|
|
83
|
-
if attempt < 3 and message == "retry_me":
|
|
84
|
-
raise RetryJob("Needs another go!")
|
|
85
|
-
print(f"Finished job {job_id}")
|
|
86
|
-
return {"result": f"Processed: {message}"}
|
|
87
|
-
```
|
|
88
|
-
|
|
89
|
-
**2. Register Handlers:**
|
|
90
|
-
|
|
91
|
-
```python
|
|
92
|
-
# main_setup.py (or wherever you initialize)
|
|
93
|
-
from rrq.registry import JobRegistry
|
|
94
|
-
from . import handlers # Assuming handlers.py is in the same directory
|
|
95
|
-
|
|
96
|
-
job_registry = JobRegistry()
|
|
97
|
-
job_registry.register("process_message", handlers.my_task)
|
|
98
|
-
```
|
|
99
|
-
|
|
100
|
-
**3. Configure Settings:**
|
|
101
|
-
|
|
102
|
-
```python
|
|
103
|
-
# config.py
|
|
104
|
-
from rrq.settings import RRQSettings
|
|
105
|
-
|
|
106
|
-
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
107
|
-
rrq_settings = RRQSettings()
|
|
108
|
-
# Or override directly:
|
|
109
|
-
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
110
|
-
```
|
|
111
|
-
|
|
112
|
-
**4. Enqueue Jobs:**
|
|
113
|
-
|
|
114
|
-
```python
|
|
115
|
-
# enqueue_script.py
|
|
116
|
-
import asyncio
|
|
117
|
-
from rrq.client import RRQClient
|
|
118
|
-
from config import rrq_settings # Import your settings
|
|
119
|
-
|
|
120
|
-
async def enqueue_jobs():
|
|
121
|
-
client = RRQClient(settings=rrq_settings)
|
|
122
|
-
await client.enqueue("process_message", "Hello RRQ!")
|
|
123
|
-
await client.enqueue("process_message", "retry_me")
|
|
124
|
-
await client.close()
|
|
125
|
-
|
|
126
|
-
if __name__ == "__main__":
|
|
127
|
-
asyncio.run(enqueue_jobs())
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
**5. Run a Worker:**
|
|
131
|
-
|
|
132
|
-
```python
|
|
133
|
-
# worker_script.py
|
|
134
|
-
from rrq.worker import RRQWorker
|
|
135
|
-
from config import rrq_settings # Import your settings
|
|
136
|
-
from main_setup import job_registry # Import your registry
|
|
137
|
-
|
|
138
|
-
# Create worker instance
|
|
139
|
-
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
140
|
-
|
|
141
|
-
# Run the worker (blocking)
|
|
142
|
-
if __name__ == "__main__":
|
|
143
|
-
worker.run()
|
|
144
|
-
```
|
|
145
|
-
|
|
146
|
-
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
147
|
-
|
|
148
|
-
## Configuration
|
|
149
|
-
|
|
150
|
-
RRQ behavior is configured via the `RRQSettings` object, which loads values from environment variables prefixed with `RRQ_` by default. Key settings include:
|
|
151
|
-
|
|
152
|
-
* `RRQ_REDIS_DSN`: Connection string for Redis.
|
|
153
|
-
* `RRQ_DEFAULT_QUEUE_NAME`: Default queue name.
|
|
154
|
-
* `RRQ_DEFAULT_MAX_RETRIES`: Default retry limit.
|
|
155
|
-
* `RRQ_DEFAULT_JOB_TIMEOUT_SECONDS`: Default job timeout.
|
|
156
|
-
* `RRQ_WORKER_CONCURRENCY`: Max concurrent jobs per worker.
|
|
157
|
-
* ... and others (see `settings.py`).
|
|
158
|
-
|
|
159
|
-
## RRQ CLI
|
|
160
|
-
|
|
161
|
-
RRQ provides a command-line interface (CLI) for interacting with the job queue system. The `rrq` CLI allows you to manage workers, check system health, and get statistics about queues and jobs.
|
|
162
|
-
|
|
163
|
-
### Usage
|
|
164
|
-
|
|
165
|
-
```bash
|
|
166
|
-
rrq <command> [options]
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
### Commands
|
|
170
|
-
|
|
171
|
-
- **`worker run`**: Run an RRQ worker process to process jobs from queues.
|
|
172
|
-
```bash
|
|
173
|
-
rrq worker run [--burst] --settings <settings_path>
|
|
174
|
-
```
|
|
175
|
-
- `--burst`: Run in burst mode (process one job/batch then exit).
|
|
176
|
-
- `--settings`: Python settings path for application worker settings (e.g., `myapp.worker_config.rrq_settings`).
|
|
177
|
-
|
|
178
|
-
- **`worker watch`**: Run an RRQ worker with auto-restart on file changes in a specified directory.
|
|
179
|
-
```bash
|
|
180
|
-
rrq worker watch [--path <directory>] --settings <settings_path>
|
|
181
|
-
```
|
|
182
|
-
- `--path`: Directory to watch for changes (default: current directory).
|
|
183
|
-
- `--settings`: Python settings path for application worker settings.
|
|
184
|
-
|
|
185
|
-
- **`check`**: Perform a health check on active RRQ workers.
|
|
186
|
-
```bash
|
|
187
|
-
rrq check --settings <settings_path>
|
|
188
|
-
```
|
|
189
|
-
- `--settings`: Python settings path for application settings.
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
### Configuration
|
|
193
|
-
|
|
194
|
-
The CLI uses the same `RRQSettings` as the library, loading configuration from environment variables prefixed with `RRQ_`. You can also specify the settings via the `--settings` option for commands.
|
|
195
|
-
|
|
196
|
-
```bash
|
|
197
|
-
rrq worker run --settings myapp.worker_config.rrq_settings
|
|
198
|
-
```
|
|
199
|
-
|
|
200
|
-
### Help
|
|
201
|
-
|
|
202
|
-
For detailed help on any command, use:
|
|
203
|
-
```bash
|
|
204
|
-
rrq <command> --help
|
|
205
|
-
```
|
rrq-0.3.6.dist-info/RECORD
DELETED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
rrq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
rrq/cli.py,sha256=_LbaAH_w2a0VNRR0EctuE4afl-wccvMY2w2VbehFDEQ,16980
|
|
3
|
-
rrq/client.py,sha256=5_bmZ05LKIfY9WFSKU-nYawEupsnrnHT2HewXfC2Ahg,7831
|
|
4
|
-
rrq/constants.py,sha256=F_uZgBI3h00MctnEjBjiCGMrg5jUaz5Bz9I1vkyqNrs,1654
|
|
5
|
-
rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
|
|
6
|
-
rrq/job.py,sha256=eUbl33QDqDMXPKpo-0dl0Mp29LWWmtbBgRw0sclcwJ4,4011
|
|
7
|
-
rrq/registry.py,sha256=E9W_zx3QiKTBwMOGearaNpDKBDB87JIn0RlMQ3sAcP0,2925
|
|
8
|
-
rrq/settings.py,sha256=BPKP4XjG7z475gqRgHZt4-IvvOs8uZefq4fPfD2Bepk,4350
|
|
9
|
-
rrq/store.py,sha256=teO0Af8hzBiu7-dFn6_2lz5X90LAZXmtg0VDZuQoAwk,24972
|
|
10
|
-
rrq/worker.py,sha256=y0UTziZVh4QbOPv24b8cqbm_xDBM0HtJLwPNYsJPWnE,40706
|
|
11
|
-
rrq-0.3.6.dist-info/METADATA,sha256=MKJ-uoveQQVVI4p_RhRA1Kk-KN9_J348gGYY572HUY0,9224
|
|
12
|
-
rrq-0.3.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
13
|
-
rrq-0.3.6.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
|
|
14
|
-
rrq-0.3.6.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
|
|
15
|
-
rrq-0.3.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|