rrq 0.3.6__tar.gz → 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq-0.4.0/PKG-INFO +301 -0
- rrq-0.4.0/README.md +274 -0
- {rrq-0.3.6 → rrq-0.4.0}/example/example_rrq_settings.py +23 -0
- {rrq-0.3.6 → rrq-0.4.0}/example/rrq_example.py +53 -3
- {rrq-0.3.6 → rrq-0.4.0}/pyproject.toml +7 -9
- rrq-0.4.0/rrq/__init__.py +14 -0
- rrq-0.4.0/rrq/cron.py +153 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/settings.py +5 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/worker.py +45 -0
- {rrq-0.3.6 → rrq-0.4.0}/tests/test_cli.py +0 -7
- rrq-0.4.0/tests/test_cron.py +252 -0
- {rrq-0.3.6 → rrq-0.4.0}/tests/test_worker.py +253 -23
- {rrq-0.3.6 → rrq-0.4.0}/uv.lock +112 -108
- rrq-0.3.6/PKG-INFO +0 -205
- rrq-0.3.6/README.md +0 -178
- rrq-0.3.6/tests/__init__.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/.coverage +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/.gitignore +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/FUTURE.md +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/LICENSE +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/MANIFEST.in +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/cli.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/client.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/constants.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/exc.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/job.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/registry.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/rrq/store.py +0 -0
- {rrq-0.3.6/rrq → rrq-0.4.0/tests}/__init__.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/tests/test_client.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/tests/test_registry.py +0 -0
- {rrq-0.3.6 → rrq-0.4.0}/tests/test_store.py +0 -0
rrq-0.4.0/PKG-INFO
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rrq
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
|
|
5
|
+
Project-URL: Homepage, https://github.com/getresq/rrq
|
|
6
|
+
Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
|
|
7
|
+
Author-email: Mazdak Rezvani <mazdak@me.com>
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
14
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
15
|
+
Classifier: Topic :: System :: Monitoring
|
|
16
|
+
Requires-Python: >=3.11
|
|
17
|
+
Requires-Dist: click>=8.1.3
|
|
18
|
+
Requires-Dist: pydantic-settings>=2.9.1
|
|
19
|
+
Requires-Dist: pydantic>=2.11.4
|
|
20
|
+
Requires-Dist: redis[hiredis]<6,>=4.2.0
|
|
21
|
+
Requires-Dist: watchfiles>=0.19.0
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: pytest-asyncio>=1.0.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
|
|
25
|
+
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# RRQ: Reliable Redis Queue
|
|
29
|
+
|
|
30
|
+
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
31
|
+
|
|
32
|
+
## Key Features
|
|
33
|
+
|
|
34
|
+
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
35
|
+
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
36
|
+
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
37
|
+
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
38
|
+
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
39
|
+
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
40
|
+
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
41
|
+
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
42
|
+
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
43
|
+
* **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
|
|
44
|
+
|
|
45
|
+
- Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
|
|
46
|
+
|
|
47
|
+
- To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
await client.enqueue(
|
|
51
|
+
"process_updates",
|
|
52
|
+
item_id=123,
|
|
53
|
+
_unique_key="update:123",
|
|
54
|
+
_defer_by=10,
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Basic Usage
|
|
59
|
+
|
|
60
|
+
*(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
|
|
61
|
+
|
|
62
|
+
**1. Define Handlers:**
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
# handlers.py
|
|
66
|
+
import asyncio
|
|
67
|
+
from rrq.exc import RetryJob
|
|
68
|
+
|
|
69
|
+
async def my_task(ctx, message: str):
|
|
70
|
+
job_id = ctx['job_id']
|
|
71
|
+
attempt = ctx['job_try']
|
|
72
|
+
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
73
|
+
await asyncio.sleep(1)
|
|
74
|
+
if attempt < 3 and message == "retry_me":
|
|
75
|
+
raise RetryJob("Needs another go!")
|
|
76
|
+
print(f"Finished job {job_id}")
|
|
77
|
+
return {"result": f"Processed: {message}"}
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
**2. Register Handlers:**
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
# main_setup.py (or wherever you initialize)
|
|
84
|
+
from rrq.registry import JobRegistry
|
|
85
|
+
from . import handlers # Assuming handlers.py is in the same directory
|
|
86
|
+
|
|
87
|
+
job_registry = JobRegistry()
|
|
88
|
+
job_registry.register("process_message", handlers.my_task)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
**3. Configure Settings:**
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
# config.py
|
|
95
|
+
from rrq.settings import RRQSettings
|
|
96
|
+
|
|
97
|
+
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
98
|
+
rrq_settings = RRQSettings()
|
|
99
|
+
# Or override directly:
|
|
100
|
+
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
**4. Enqueue Jobs:**
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# enqueue_script.py
|
|
107
|
+
import asyncio
|
|
108
|
+
from rrq.client import RRQClient
|
|
109
|
+
from config import rrq_settings # Import your settings
|
|
110
|
+
|
|
111
|
+
async def enqueue_jobs():
|
|
112
|
+
client = RRQClient(settings=rrq_settings)
|
|
113
|
+
await client.enqueue("process_message", "Hello RRQ!")
|
|
114
|
+
await client.enqueue("process_message", "retry_me")
|
|
115
|
+
await client.close()
|
|
116
|
+
|
|
117
|
+
if __name__ == "__main__":
|
|
118
|
+
asyncio.run(enqueue_jobs())
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**5. Run a Worker:**
|
|
122
|
+
|
|
123
|
+
Note: You don't need to run a worker as the Command Line Interface `rrq` is used for
|
|
124
|
+
this purpose.
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
# worker_script.py
|
|
128
|
+
from rrq.worker import RRQWorker
|
|
129
|
+
from config import rrq_settings # Import your settings
|
|
130
|
+
from main_setup import job_registry # Import your registry
|
|
131
|
+
|
|
132
|
+
# Create worker instance
|
|
133
|
+
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
134
|
+
|
|
135
|
+
# Run the worker (blocking)
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
worker.run()
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
141
|
+
|
|
142
|
+
## Cron Jobs
|
|
143
|
+
|
|
144
|
+
Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
|
|
145
|
+
`schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
|
|
146
|
+
It supports the most common features from Unix cron:
|
|
147
|
+
|
|
148
|
+
- numeric values
|
|
149
|
+
- ranges (e.g. `8-11`)
|
|
150
|
+
- lists separated by commas (e.g. `mon,wed,fri`)
|
|
151
|
+
- step values using `/` (e.g. `*/15`)
|
|
152
|
+
- names for months and days (`jan-dec`, `sun-sat`)
|
|
153
|
+
|
|
154
|
+
Jobs are evaluated in the server's timezone and run with minute resolution.
|
|
155
|
+
|
|
156
|
+
### Cron Schedule Examples
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
# Every minute
|
|
160
|
+
"* * * * *"
|
|
161
|
+
|
|
162
|
+
# Every hour at minute 30
|
|
163
|
+
"30 * * * *"
|
|
164
|
+
|
|
165
|
+
# Every day at 2:30 AM
|
|
166
|
+
"30 2 * * *"
|
|
167
|
+
|
|
168
|
+
# Every Monday at 9:00 AM
|
|
169
|
+
"0 9 * * mon"
|
|
170
|
+
|
|
171
|
+
# Every 15 minutes
|
|
172
|
+
"*/15 * * * *"
|
|
173
|
+
|
|
174
|
+
# Every weekday at 6:00 PM
|
|
175
|
+
"0 18 * * mon-fri"
|
|
176
|
+
|
|
177
|
+
# First day of every month at midnight
|
|
178
|
+
"0 0 1 * *"
|
|
179
|
+
|
|
180
|
+
# Every 2 hours during business hours on weekdays
|
|
181
|
+
"0 9-17/2 * * mon-fri"
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
### Defining Cron Jobs
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
from rrq.settings import RRQSettings
|
|
188
|
+
from rrq.cron import CronJob
|
|
189
|
+
|
|
190
|
+
# Define your cron jobs
|
|
191
|
+
cron_jobs = [
|
|
192
|
+
# Daily cleanup at 2 AM
|
|
193
|
+
CronJob(
|
|
194
|
+
function_name="daily_cleanup",
|
|
195
|
+
schedule="0 2 * * *",
|
|
196
|
+
args=["temp_files"],
|
|
197
|
+
kwargs={"max_age_days": 7}
|
|
198
|
+
),
|
|
199
|
+
|
|
200
|
+
# Weekly report every Monday at 9 AM
|
|
201
|
+
CronJob(
|
|
202
|
+
function_name="generate_weekly_report",
|
|
203
|
+
schedule="0 9 * * mon",
|
|
204
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
205
|
+
),
|
|
206
|
+
|
|
207
|
+
# Health check every 15 minutes on a specific queue
|
|
208
|
+
CronJob(
|
|
209
|
+
function_name="system_health_check",
|
|
210
|
+
schedule="*/15 * * * *",
|
|
211
|
+
queue_name="monitoring"
|
|
212
|
+
),
|
|
213
|
+
|
|
214
|
+
# Backup database every night at 1 AM
|
|
215
|
+
CronJob(
|
|
216
|
+
function_name="backup_database",
|
|
217
|
+
schedule="0 1 * * *",
|
|
218
|
+
kwargs={"backup_type": "incremental"}
|
|
219
|
+
),
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
# Add to your settings
|
|
223
|
+
rrq_settings = RRQSettings(
|
|
224
|
+
redis_dsn="redis://localhost:6379/0",
|
|
225
|
+
cron_jobs=cron_jobs
|
|
226
|
+
)
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Cron Job Handlers
|
|
230
|
+
|
|
231
|
+
Your cron job handlers are regular async functions, just like other job handlers:
|
|
232
|
+
|
|
233
|
+
```python
|
|
234
|
+
async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
|
|
235
|
+
"""Clean up old files."""
|
|
236
|
+
job_id = ctx['job_id']
|
|
237
|
+
print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
|
|
238
|
+
# Your cleanup logic here
|
|
239
|
+
return {"cleaned_files": 42, "status": "completed"}
|
|
240
|
+
|
|
241
|
+
async def generate_weekly_report(ctx):
|
|
242
|
+
"""Generate and send weekly report."""
|
|
243
|
+
job_id = ctx['job_id']
|
|
244
|
+
print(f"Job {job_id}: Generating weekly report")
|
|
245
|
+
# Your report generation logic here
|
|
246
|
+
return {"report_id": "weekly_2024_01", "status": "sent"}
|
|
247
|
+
|
|
248
|
+
# Register your handlers
|
|
249
|
+
from rrq.registry import JobRegistry
|
|
250
|
+
|
|
251
|
+
job_registry = JobRegistry()
|
|
252
|
+
job_registry.register("daily_cleanup", daily_cleanup)
|
|
253
|
+
job_registry.register("generate_weekly_report", generate_weekly_report)
|
|
254
|
+
|
|
255
|
+
# Add the registry to your settings
|
|
256
|
+
rrq_settings.job_registry = job_registry
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
**Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
|
|
260
|
+
|
|
261
|
+
## Command Line Interface
|
|
262
|
+
|
|
263
|
+
RRQ provides a command-line interface (CLI) for managing workers and performing health checks:
|
|
264
|
+
|
|
265
|
+
- **`rrq worker run`** - Run an RRQ worker process.
|
|
266
|
+
- `--settings` (optional): Specify the Python path to your settings object (e.g., `myapp.worker_config.rrq_settings`). If not provided, it will use the `RRQ_SETTINGS` environment variable or default to a basic `RRQSettings` object.
|
|
267
|
+
- `--queue` (optional, multiple): Specify queue(s) to poll. Defaults to the `default_queue_name` in settings.
|
|
268
|
+
- `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit.
|
|
269
|
+
- **`rrq worker watch`** - Run an RRQ worker with auto-restart on file changes.
|
|
270
|
+
- `--path` (optional): Directory path to watch for changes. Defaults to the current directory.
|
|
271
|
+
- `--settings` (optional): Same as above.
|
|
272
|
+
- `--queue` (optional, multiple): Same as above.
|
|
273
|
+
- **`rrq check`** - Perform a health check on active RRQ workers.
|
|
274
|
+
- `--settings` (optional): Same as above.
|
|
275
|
+
- **`rrq dlq requeue`** - Requeue jobs from the dead letter queue back into a live queue.
|
|
276
|
+
- `--settings` (optional): Same as above.
|
|
277
|
+
- `--dlq-name` (optional): Name of the DLQ (without prefix). Defaults to `default_dlq_name` in settings.
|
|
278
|
+
- `--queue` (optional): Target queue name (without prefix). Defaults to `default_queue_name` in settings.
|
|
279
|
+
- `--limit` (optional): Maximum number of DLQ jobs to requeue; all if not set.
|
|
280
|
+
|
|
281
|
+
## Configuration
|
|
282
|
+
|
|
283
|
+
RRQ can be configured in several ways, with the following precedence:
|
|
284
|
+
|
|
285
|
+
1. **Command-Line Argument (`--settings`)**: Directly specify the settings object path via the CLI. This takes the highest precedence.
|
|
286
|
+
2. **Environment Variable (`RRQ_SETTINGS`)**: Set the `RRQ_SETTINGS` environment variable to point to your settings object path. Used if `--settings` is not provided.
|
|
287
|
+
3. **Default Settings**: If neither of the above is provided, RRQ will instantiate a default `RRQSettings` object, which can still be influenced by environment variables starting with `RRQ_`.
|
|
288
|
+
4. **Environment Variables (Prefix `RRQ_`)**: Individual settings can be overridden by environment variables starting with `RRQ_`, which are automatically picked up by the `RRQSettings` object.
|
|
289
|
+
5. **.env File**: If `python-dotenv` is installed, RRQ will attempt to load a `.env` file from the current working directory or parent directories. System environment variables take precedence over `.env` variables.
|
|
290
|
+
|
|
291
|
+
**Important Note on `job_registry`**: The `job_registry` attribute in your `RRQSettings` object is **critical** for RRQ to function. It must be an instance of `JobRegistry` and is used to register job handlers. Without a properly configured `job_registry`, workers will not know how to process jobs, and most operations will fail. Ensure it is set in your settings object to map job names to their respective handler functions.
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
## Core Components
|
|
295
|
+
|
|
296
|
+
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
297
|
+
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
298
|
+
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
299
|
+
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
300
|
+
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
301
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
rrq-0.4.0/README.md
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
# RRQ: Reliable Redis Queue
|
|
2
|
+
|
|
3
|
+
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
4
|
+
|
|
5
|
+
## Key Features
|
|
6
|
+
|
|
7
|
+
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
8
|
+
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
9
|
+
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
10
|
+
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
11
|
+
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
12
|
+
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
13
|
+
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
14
|
+
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
15
|
+
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
16
|
+
* **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
|
|
17
|
+
|
|
18
|
+
- Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
|
|
19
|
+
|
|
20
|
+
- To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
await client.enqueue(
|
|
24
|
+
"process_updates",
|
|
25
|
+
item_id=123,
|
|
26
|
+
_unique_key="update:123",
|
|
27
|
+
_defer_by=10,
|
|
28
|
+
)
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Basic Usage
|
|
32
|
+
|
|
33
|
+
*(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
|
|
34
|
+
|
|
35
|
+
**1. Define Handlers:**
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
# handlers.py
|
|
39
|
+
import asyncio
|
|
40
|
+
from rrq.exc import RetryJob
|
|
41
|
+
|
|
42
|
+
async def my_task(ctx, message: str):
|
|
43
|
+
job_id = ctx['job_id']
|
|
44
|
+
attempt = ctx['job_try']
|
|
45
|
+
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
46
|
+
await asyncio.sleep(1)
|
|
47
|
+
if attempt < 3 and message == "retry_me":
|
|
48
|
+
raise RetryJob("Needs another go!")
|
|
49
|
+
print(f"Finished job {job_id}")
|
|
50
|
+
return {"result": f"Processed: {message}"}
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
**2. Register Handlers:**
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
# main_setup.py (or wherever you initialize)
|
|
57
|
+
from rrq.registry import JobRegistry
|
|
58
|
+
from . import handlers # Assuming handlers.py is in the same directory
|
|
59
|
+
|
|
60
|
+
job_registry = JobRegistry()
|
|
61
|
+
job_registry.register("process_message", handlers.my_task)
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
**3. Configure Settings:**
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
# config.py
|
|
68
|
+
from rrq.settings import RRQSettings
|
|
69
|
+
|
|
70
|
+
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
71
|
+
rrq_settings = RRQSettings()
|
|
72
|
+
# Or override directly:
|
|
73
|
+
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
**4. Enqueue Jobs:**
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
# enqueue_script.py
|
|
80
|
+
import asyncio
|
|
81
|
+
from rrq.client import RRQClient
|
|
82
|
+
from config import rrq_settings # Import your settings
|
|
83
|
+
|
|
84
|
+
async def enqueue_jobs():
|
|
85
|
+
client = RRQClient(settings=rrq_settings)
|
|
86
|
+
await client.enqueue("process_message", "Hello RRQ!")
|
|
87
|
+
await client.enqueue("process_message", "retry_me")
|
|
88
|
+
await client.close()
|
|
89
|
+
|
|
90
|
+
if __name__ == "__main__":
|
|
91
|
+
asyncio.run(enqueue_jobs())
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
**5. Run a Worker:**
|
|
95
|
+
|
|
96
|
+
Note: You don't need to run a worker as the Command Line Interface `rrq` is used for
|
|
97
|
+
this purpose.
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
# worker_script.py
|
|
101
|
+
from rrq.worker import RRQWorker
|
|
102
|
+
from config import rrq_settings # Import your settings
|
|
103
|
+
from main_setup import job_registry # Import your registry
|
|
104
|
+
|
|
105
|
+
# Create worker instance
|
|
106
|
+
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
107
|
+
|
|
108
|
+
# Run the worker (blocking)
|
|
109
|
+
if __name__ == "__main__":
|
|
110
|
+
worker.run()
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
114
|
+
|
|
115
|
+
## Cron Jobs
|
|
116
|
+
|
|
117
|
+
Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
|
|
118
|
+
`schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
|
|
119
|
+
It supports the most common features from Unix cron:
|
|
120
|
+
|
|
121
|
+
- numeric values
|
|
122
|
+
- ranges (e.g. `8-11`)
|
|
123
|
+
- lists separated by commas (e.g. `mon,wed,fri`)
|
|
124
|
+
- step values using `/` (e.g. `*/15`)
|
|
125
|
+
- names for months and days (`jan-dec`, `sun-sat`)
|
|
126
|
+
|
|
127
|
+
Jobs are evaluated in the server's timezone and run with minute resolution.
|
|
128
|
+
|
|
129
|
+
### Cron Schedule Examples
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# Every minute
|
|
133
|
+
"* * * * *"
|
|
134
|
+
|
|
135
|
+
# Every hour at minute 30
|
|
136
|
+
"30 * * * *"
|
|
137
|
+
|
|
138
|
+
# Every day at 2:30 AM
|
|
139
|
+
"30 2 * * *"
|
|
140
|
+
|
|
141
|
+
# Every Monday at 9:00 AM
|
|
142
|
+
"0 9 * * mon"
|
|
143
|
+
|
|
144
|
+
# Every 15 minutes
|
|
145
|
+
"*/15 * * * *"
|
|
146
|
+
|
|
147
|
+
# Every weekday at 6:00 PM
|
|
148
|
+
"0 18 * * mon-fri"
|
|
149
|
+
|
|
150
|
+
# First day of every month at midnight
|
|
151
|
+
"0 0 1 * *"
|
|
152
|
+
|
|
153
|
+
# Every 2 hours during business hours on weekdays
|
|
154
|
+
"0 9-17/2 * * mon-fri"
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
### Defining Cron Jobs
|
|
158
|
+
|
|
159
|
+
```python
|
|
160
|
+
from rrq.settings import RRQSettings
|
|
161
|
+
from rrq.cron import CronJob
|
|
162
|
+
|
|
163
|
+
# Define your cron jobs
|
|
164
|
+
cron_jobs = [
|
|
165
|
+
# Daily cleanup at 2 AM
|
|
166
|
+
CronJob(
|
|
167
|
+
function_name="daily_cleanup",
|
|
168
|
+
schedule="0 2 * * *",
|
|
169
|
+
args=["temp_files"],
|
|
170
|
+
kwargs={"max_age_days": 7}
|
|
171
|
+
),
|
|
172
|
+
|
|
173
|
+
# Weekly report every Monday at 9 AM
|
|
174
|
+
CronJob(
|
|
175
|
+
function_name="generate_weekly_report",
|
|
176
|
+
schedule="0 9 * * mon",
|
|
177
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
178
|
+
),
|
|
179
|
+
|
|
180
|
+
# Health check every 15 minutes on a specific queue
|
|
181
|
+
CronJob(
|
|
182
|
+
function_name="system_health_check",
|
|
183
|
+
schedule="*/15 * * * *",
|
|
184
|
+
queue_name="monitoring"
|
|
185
|
+
),
|
|
186
|
+
|
|
187
|
+
# Backup database every night at 1 AM
|
|
188
|
+
CronJob(
|
|
189
|
+
function_name="backup_database",
|
|
190
|
+
schedule="0 1 * * *",
|
|
191
|
+
kwargs={"backup_type": "incremental"}
|
|
192
|
+
),
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
# Add to your settings
|
|
196
|
+
rrq_settings = RRQSettings(
|
|
197
|
+
redis_dsn="redis://localhost:6379/0",
|
|
198
|
+
cron_jobs=cron_jobs
|
|
199
|
+
)
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### Cron Job Handlers
|
|
203
|
+
|
|
204
|
+
Your cron job handlers are regular async functions, just like other job handlers:
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
|
|
208
|
+
"""Clean up old files."""
|
|
209
|
+
job_id = ctx['job_id']
|
|
210
|
+
print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
|
|
211
|
+
# Your cleanup logic here
|
|
212
|
+
return {"cleaned_files": 42, "status": "completed"}
|
|
213
|
+
|
|
214
|
+
async def generate_weekly_report(ctx):
|
|
215
|
+
"""Generate and send weekly report."""
|
|
216
|
+
job_id = ctx['job_id']
|
|
217
|
+
print(f"Job {job_id}: Generating weekly report")
|
|
218
|
+
# Your report generation logic here
|
|
219
|
+
return {"report_id": "weekly_2024_01", "status": "sent"}
|
|
220
|
+
|
|
221
|
+
# Register your handlers
|
|
222
|
+
from rrq.registry import JobRegistry
|
|
223
|
+
|
|
224
|
+
job_registry = JobRegistry()
|
|
225
|
+
job_registry.register("daily_cleanup", daily_cleanup)
|
|
226
|
+
job_registry.register("generate_weekly_report", generate_weekly_report)
|
|
227
|
+
|
|
228
|
+
# Add the registry to your settings
|
|
229
|
+
rrq_settings.job_registry = job_registry
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
**Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
|
|
233
|
+
|
|
234
|
+
## Command Line Interface
|
|
235
|
+
|
|
236
|
+
RRQ provides a command-line interface (CLI) for managing workers and performing health checks:
|
|
237
|
+
|
|
238
|
+
- **`rrq worker run`** - Run an RRQ worker process.
|
|
239
|
+
- `--settings` (optional): Specify the Python path to your settings object (e.g., `myapp.worker_config.rrq_settings`). If not provided, it will use the `RRQ_SETTINGS` environment variable or default to a basic `RRQSettings` object.
|
|
240
|
+
- `--queue` (optional, multiple): Specify queue(s) to poll. Defaults to the `default_queue_name` in settings.
|
|
241
|
+
- `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit.
|
|
242
|
+
- **`rrq worker watch`** - Run an RRQ worker with auto-restart on file changes.
|
|
243
|
+
- `--path` (optional): Directory path to watch for changes. Defaults to the current directory.
|
|
244
|
+
- `--settings` (optional): Same as above.
|
|
245
|
+
- `--queue` (optional, multiple): Same as above.
|
|
246
|
+
- **`rrq check`** - Perform a health check on active RRQ workers.
|
|
247
|
+
- `--settings` (optional): Same as above.
|
|
248
|
+
- **`rrq dlq requeue`** - Requeue jobs from the dead letter queue back into a live queue.
|
|
249
|
+
- `--settings` (optional): Same as above.
|
|
250
|
+
- `--dlq-name` (optional): Name of the DLQ (without prefix). Defaults to `default_dlq_name` in settings.
|
|
251
|
+
- `--queue` (optional): Target queue name (without prefix). Defaults to `default_queue_name` in settings.
|
|
252
|
+
- `--limit` (optional): Maximum number of DLQ jobs to requeue; all if not set.
|
|
253
|
+
|
|
254
|
+
## Configuration
|
|
255
|
+
|
|
256
|
+
RRQ can be configured in several ways, with the following precedence:
|
|
257
|
+
|
|
258
|
+
1. **Command-Line Argument (`--settings`)**: Directly specify the settings object path via the CLI. This takes the highest precedence.
|
|
259
|
+
2. **Environment Variable (`RRQ_SETTINGS`)**: Set the `RRQ_SETTINGS` environment variable to point to your settings object path. Used if `--settings` is not provided.
|
|
260
|
+
3. **Default Settings**: If neither of the above is provided, RRQ will instantiate a default `RRQSettings` object, which can still be influenced by environment variables starting with `RRQ_`.
|
|
261
|
+
4. **Environment Variables (Prefix `RRQ_`)**: Individual settings can be overridden by environment variables starting with `RRQ_`, which are automatically picked up by the `RRQSettings` object.
|
|
262
|
+
5. **.env File**: If `python-dotenv` is installed, RRQ will attempt to load a `.env` file from the current working directory or parent directories. System environment variables take precedence over `.env` variables.
|
|
263
|
+
|
|
264
|
+
**Important Note on `job_registry`**: The `job_registry` attribute in your `RRQSettings` object is **critical** for RRQ to function. It must be an instance of `JobRegistry` and is used to register job handlers. Without a properly configured `job_registry`, workers will not know how to process jobs, and most operations will fail. Ensure it is set in your settings object to map job names to their respective handler functions.
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
## Core Components
|
|
268
|
+
|
|
269
|
+
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
270
|
+
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
271
|
+
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
272
|
+
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
273
|
+
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
274
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import logging
|
|
5
5
|
|
|
6
|
+
from rrq.cron import CronJob
|
|
6
7
|
from rrq.settings import RRQSettings
|
|
7
8
|
|
|
8
9
|
logger = logging.getLogger("rrq")
|
|
@@ -34,5 +35,27 @@ rrq_settings = RRQSettings(
|
|
|
34
35
|
redis_dsn=redis_dsn,
|
|
35
36
|
on_startup=on_startup_hook,
|
|
36
37
|
on_shutdown=on_shutdown_hook,
|
|
38
|
+
# Example cron jobs - these would run periodically when a worker is running
|
|
39
|
+
cron_jobs=[
|
|
40
|
+
# Run a cleanup task every day at 2 AM
|
|
41
|
+
CronJob(
|
|
42
|
+
function_name="daily_cleanup",
|
|
43
|
+
schedule="0 2 * * *",
|
|
44
|
+
args=["cleanup_logs"],
|
|
45
|
+
kwargs={"max_age_days": 30}
|
|
46
|
+
),
|
|
47
|
+
# Send a status report every Monday at 9 AM
|
|
48
|
+
CronJob(
|
|
49
|
+
function_name="send_status_report",
|
|
50
|
+
schedule="0 9 * * mon",
|
|
51
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
52
|
+
),
|
|
53
|
+
# Health check every 15 minutes
|
|
54
|
+
CronJob(
|
|
55
|
+
function_name="health_check",
|
|
56
|
+
schedule="*/15 * * * *",
|
|
57
|
+
queue_name="monitoring" # Use a specific queue for monitoring tasks
|
|
58
|
+
),
|
|
59
|
+
]
|
|
37
60
|
)
|
|
38
61
|
|