rrq 0.2.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq-0.2.5/.gitignore +3 -0
- rrq-0.2.5/LICENSE +13 -0
- rrq-0.2.5/MANIFEST.in +2 -0
- rrq-0.2.5/PKG-INFO +201 -0
- rrq-0.2.5/README.md +175 -0
- rrq-0.2.5/example/example_rrq_settings.py +38 -0
- rrq-0.2.5/example/rrq_example.py +212 -0
- rrq-0.2.5/pyproject.toml +46 -0
- rrq-0.2.5/rrq/__init__.py +0 -0
- rrq-0.2.5/rrq/client.py +159 -0
- rrq-0.2.5/rrq/constants.py +42 -0
- rrq-0.2.5/rrq/exc.py +46 -0
- rrq-0.2.5/rrq/job.py +133 -0
- rrq-0.2.5/rrq/registry.py +77 -0
- rrq-0.2.5/rrq/rrq.py +328 -0
- rrq-0.2.5/rrq/settings.py +107 -0
- rrq-0.2.5/rrq/store.py +568 -0
- rrq-0.2.5/rrq/worker.py +897 -0
- rrq-0.2.5/tests/__init__.py +0 -0
- rrq-0.2.5/tests/test_cli.py +259 -0
- rrq-0.2.5/tests/test_client.py +214 -0
- rrq-0.2.5/tests/test_registry.py +84 -0
- rrq-0.2.5/tests/test_store.py +362 -0
- rrq-0.2.5/tests/test_worker.py +868 -0
- rrq-0.2.5/uv.lock +409 -0
rrq-0.2.5/.gitignore
ADDED
rrq-0.2.5/LICENSE
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Copyright 2025 Mazdak Rezvani
|
|
2
|
+
|
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
you may not use this file except in compliance with the License.
|
|
5
|
+
You may obtain a copy of the License at
|
|
6
|
+
|
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
See the License for the specific language governing permissions and
|
|
13
|
+
limitations under the License.
|
rrq-0.2.5/MANIFEST.in
ADDED
rrq-0.2.5/PKG-INFO
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rrq
|
|
3
|
+
Version: 0.2.5
|
|
4
|
+
Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
|
|
5
|
+
Project-URL: Homepage, https://github.com/getresq/rrq
|
|
6
|
+
Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
|
|
7
|
+
Author-email: Mazdak Rezvani <mazdak@me.com>
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
14
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
15
|
+
Classifier: Topic :: System :: Monitoring
|
|
16
|
+
Requires-Python: >=3.11
|
|
17
|
+
Requires-Dist: click>=8.1.3
|
|
18
|
+
Requires-Dist: pydantic-settings>=2.9.1
|
|
19
|
+
Requires-Dist: pydantic>=2.11.4
|
|
20
|
+
Requires-Dist: redis[hiredis]<6,>=4.2.0
|
|
21
|
+
Requires-Dist: watchfiles>=0.19.0
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# RRQ: Reliable Redis Queue
|
|
28
|
+
|
|
29
|
+
____ ____ ___
|
|
30
|
+
| _ \ | _ \ / _ \
|
|
31
|
+
| |_) | | |_) | | | | |
|
|
32
|
+
| _ < | _ < | |_| |
|
|
33
|
+
|_| \_\ |_| \_\ \__\_\
|
|
34
|
+
|
|
35
|
+
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
36
|
+
|
|
37
|
+
## Core Components
|
|
38
|
+
|
|
39
|
+
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
40
|
+
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
41
|
+
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
42
|
+
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
43
|
+
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
44
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `RETRYING`).
|
|
45
|
+
* **`RRQSettings` (`settings.py`)**: A Pydantic `BaseSettings` model for configuring RRQ behavior (Redis DSN, queue names, timeouts, retry policies, concurrency, etc.). Loadable from environment variables (prefix `RRQ_`).
|
|
46
|
+
* **`constants.py`**: Defines shared constants like Redis key prefixes and default configuration values.
|
|
47
|
+
* **`exc.py`**: Defines custom exceptions, notably `RetryJob` which handlers can raise to explicitly request a retry, potentially with a custom delay.
|
|
48
|
+
|
|
49
|
+
## Key Features
|
|
50
|
+
|
|
51
|
+
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
52
|
+
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
53
|
+
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
54
|
+
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
55
|
+
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
56
|
+
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
57
|
+
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
58
|
+
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
59
|
+
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
60
|
+
*Note: Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.*
|
|
61
|
+
|
|
62
|
+
## Basic Usage
|
|
63
|
+
|
|
64
|
+
*(See [`rrq_example.py`](examples/rrq_example.py) in the project root for a runnable example)*
|
|
65
|
+
|
|
66
|
+
**1. Define Handlers:**
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
# handlers.py
|
|
70
|
+
import asyncio
|
|
71
|
+
from rrq.exc import RetryJob
|
|
72
|
+
|
|
73
|
+
async def my_task(ctx, message: str):
|
|
74
|
+
job_id = ctx['job_id']
|
|
75
|
+
attempt = ctx['job_try']
|
|
76
|
+
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
77
|
+
await asyncio.sleep(1)
|
|
78
|
+
if attempt < 3 and message == "retry_me":
|
|
79
|
+
raise RetryJob("Needs another go!")
|
|
80
|
+
print(f"Finished job {job_id}")
|
|
81
|
+
return {"result": f"Processed: {message}"}
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
**2. Register Handlers:**
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
# main_setup.py (or wherever you initialize)
|
|
88
|
+
from rrq.registry import JobRegistry
|
|
89
|
+
from . import handlers # Assuming handlers.py is in the same directory
|
|
90
|
+
|
|
91
|
+
job_registry = JobRegistry()
|
|
92
|
+
job_registry.register("process_message", handlers.my_task)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
**3. Configure Settings:**
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
# config.py
|
|
99
|
+
from rrq.settings import RRQSettings
|
|
100
|
+
|
|
101
|
+
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
102
|
+
rrq_settings = RRQSettings()
|
|
103
|
+
# Or override directly:
|
|
104
|
+
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
**4. Enqueue Jobs:**
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
# enqueue_script.py
|
|
111
|
+
import asyncio
|
|
112
|
+
from rrq.client import RRQClient
|
|
113
|
+
from config import rrq_settings # Import your settings
|
|
114
|
+
|
|
115
|
+
async def enqueue_jobs():
|
|
116
|
+
client = RRQClient(settings=rrq_settings)
|
|
117
|
+
await client.enqueue("process_message", "Hello RRQ!")
|
|
118
|
+
await client.enqueue("process_message", "retry_me")
|
|
119
|
+
await client.close()
|
|
120
|
+
|
|
121
|
+
if __name__ == "__main__":
|
|
122
|
+
asyncio.run(enqueue_jobs())
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
**5. Run a Worker:**
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
# worker_script.py
|
|
129
|
+
from rrq.worker import RRQWorker
|
|
130
|
+
from config import rrq_settings # Import your settings
|
|
131
|
+
from main_setup import job_registry # Import your registry
|
|
132
|
+
|
|
133
|
+
# Create worker instance
|
|
134
|
+
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
135
|
+
|
|
136
|
+
# Run the worker (blocking)
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
worker.run()
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
142
|
+
|
|
143
|
+
## Configuration
|
|
144
|
+
|
|
145
|
+
RRQ behavior is configured via the `RRQSettings` object, which loads values from environment variables prefixed with `RRQ_` by default. Key settings include:
|
|
146
|
+
|
|
147
|
+
* `RRQ_REDIS_DSN`: Connection string for Redis.
|
|
148
|
+
* `RRQ_DEFAULT_QUEUE_NAME`: Default queue name.
|
|
149
|
+
* `RRQ_DEFAULT_MAX_RETRIES`: Default retry limit.
|
|
150
|
+
* `RRQ_DEFAULT_JOB_TIMEOUT_SECONDS`: Default job timeout.
|
|
151
|
+
* `RRQ_WORKER_CONCURRENCY`: Max concurrent jobs per worker.
|
|
152
|
+
* ... and others (see `settings.py`).
|
|
153
|
+
|
|
154
|
+
## RRQ CLI
|
|
155
|
+
|
|
156
|
+
RRQ provides a command-line interface (CLI) for interacting with the job queue system. The `rrq` CLI allows you to manage workers, check system health, and get statistics about queues and jobs.
|
|
157
|
+
|
|
158
|
+
### Usage
|
|
159
|
+
|
|
160
|
+
```bash
|
|
161
|
+
rrq <command> [options]
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Commands
|
|
165
|
+
|
|
166
|
+
- **`worker run`**: Run an RRQ worker process to process jobs from queues.
|
|
167
|
+
```bash
|
|
168
|
+
rrq worker run [--burst] [--detach] --settings <settings_path>
|
|
169
|
+
```
|
|
170
|
+
- `--burst`: Run in burst mode (process one job/batch then exit).
|
|
171
|
+
- `--detach`: Run the worker in the background.
|
|
172
|
+
- `--settings`: Python settings path for application worker settings (e.g., `myapp.worker_config.rrq_settings`).
|
|
173
|
+
|
|
174
|
+
- **`worker watch`**: Run an RRQ worker with auto-restart on file changes in a specified directory.
|
|
175
|
+
```bash
|
|
176
|
+
rrq worker watch [--path <directory>] --settings <settings_path>
|
|
177
|
+
```
|
|
178
|
+
- `--path`: Directory to watch for changes (default: current directory).
|
|
179
|
+
- `--settings`: Python settings path for application worker settings.
|
|
180
|
+
|
|
181
|
+
- **`check`**: Perform a health check on active RRQ workers.
|
|
182
|
+
```bash
|
|
183
|
+
rrq check --settings <settings_path>
|
|
184
|
+
```
|
|
185
|
+
- `--settings`: Python settings path for application settings.
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
### Configuration
|
|
189
|
+
|
|
190
|
+
The CLI uses the same `RRQSettings` as the library, loading configuration from environment variables prefixed with `RRQ_`. You can also specify the settings via the `--settings` option for commands.
|
|
191
|
+
|
|
192
|
+
```bash
|
|
193
|
+
rrq worker run --settings myapp.worker_config.rrq_settings
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Help
|
|
197
|
+
|
|
198
|
+
For detailed help on any command, use:
|
|
199
|
+
```bash
|
|
200
|
+
rrq <command> --help
|
|
201
|
+
```
|
rrq-0.2.5/README.md
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
# RRQ: Reliable Redis Queue
|
|
2
|
+
|
|
3
|
+
____ ____ ___
|
|
4
|
+
| _ \ | _ \ / _ \
|
|
5
|
+
| |_) | | |_) | | | | |
|
|
6
|
+
| _ < | _ < | |_| |
|
|
7
|
+
|_| \_\ |_| \_\ \__\_\
|
|
8
|
+
|
|
9
|
+
RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
|
|
10
|
+
|
|
11
|
+
## Core Components
|
|
12
|
+
|
|
13
|
+
* **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
|
|
14
|
+
* **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
|
|
15
|
+
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
16
|
+
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
17
|
+
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
18
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `RETRYING`).
|
|
19
|
+
* **`RRQSettings` (`settings.py`)**: A Pydantic `BaseSettings` model for configuring RRQ behavior (Redis DSN, queue names, timeouts, retry policies, concurrency, etc.). Loadable from environment variables (prefix `RRQ_`).
|
|
20
|
+
* **`constants.py`**: Defines shared constants like Redis key prefixes and default configuration values.
|
|
21
|
+
* **`exc.py`**: Defines custom exceptions, notably `RetryJob` which handlers can raise to explicitly request a retry, potentially with a custom delay.
|
|
22
|
+
|
|
23
|
+
## Key Features
|
|
24
|
+
|
|
25
|
+
* **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
|
|
26
|
+
* **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
|
|
27
|
+
* **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
|
|
28
|
+
* **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
|
|
29
|
+
* **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a DLQ list in Redis for inspection.
|
|
30
|
+
* **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
|
|
31
|
+
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
32
|
+
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
33
|
+
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
34
|
+
*Note: Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.*
|
|
35
|
+
|
|
36
|
+
## Basic Usage
|
|
37
|
+
|
|
38
|
+
*(See [`rrq_example.py`](examples/rrq_example.py) in the project root for a runnable example)*
|
|
39
|
+
|
|
40
|
+
**1. Define Handlers:**
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
# handlers.py
|
|
44
|
+
import asyncio
|
|
45
|
+
from rrq.exc import RetryJob
|
|
46
|
+
|
|
47
|
+
async def my_task(ctx, message: str):
|
|
48
|
+
job_id = ctx['job_id']
|
|
49
|
+
attempt = ctx['job_try']
|
|
50
|
+
print(f"Processing job {job_id} (Attempt {attempt}): {message}")
|
|
51
|
+
await asyncio.sleep(1)
|
|
52
|
+
if attempt < 3 and message == "retry_me":
|
|
53
|
+
raise RetryJob("Needs another go!")
|
|
54
|
+
print(f"Finished job {job_id}")
|
|
55
|
+
return {"result": f"Processed: {message}"}
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
**2. Register Handlers:**
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
# main_setup.py (or wherever you initialize)
|
|
62
|
+
from rrq.registry import JobRegistry
|
|
63
|
+
from . import handlers # Assuming handlers.py is in the same directory
|
|
64
|
+
|
|
65
|
+
job_registry = JobRegistry()
|
|
66
|
+
job_registry.register("process_message", handlers.my_task)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
**3. Configure Settings:**
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
# config.py
|
|
73
|
+
from rrq.settings import RRQSettings
|
|
74
|
+
|
|
75
|
+
# Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
|
|
76
|
+
rrq_settings = RRQSettings()
|
|
77
|
+
# Or override directly:
|
|
78
|
+
# rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
**4. Enqueue Jobs:**
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
# enqueue_script.py
|
|
85
|
+
import asyncio
|
|
86
|
+
from rrq.client import RRQClient
|
|
87
|
+
from config import rrq_settings # Import your settings
|
|
88
|
+
|
|
89
|
+
async def enqueue_jobs():
|
|
90
|
+
client = RRQClient(settings=rrq_settings)
|
|
91
|
+
await client.enqueue("process_message", "Hello RRQ!")
|
|
92
|
+
await client.enqueue("process_message", "retry_me")
|
|
93
|
+
await client.close()
|
|
94
|
+
|
|
95
|
+
if __name__ == "__main__":
|
|
96
|
+
asyncio.run(enqueue_jobs())
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
**5. Run a Worker:**
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
# worker_script.py
|
|
103
|
+
from rrq.worker import RRQWorker
|
|
104
|
+
from config import rrq_settings # Import your settings
|
|
105
|
+
from main_setup import job_registry # Import your registry
|
|
106
|
+
|
|
107
|
+
# Create worker instance
|
|
108
|
+
worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
|
|
109
|
+
|
|
110
|
+
# Run the worker (blocking)
|
|
111
|
+
if __name__ == "__main__":
|
|
112
|
+
worker.run()
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
116
|
+
|
|
117
|
+
## Configuration
|
|
118
|
+
|
|
119
|
+
RRQ behavior is configured via the `RRQSettings` object, which loads values from environment variables prefixed with `RRQ_` by default. Key settings include:
|
|
120
|
+
|
|
121
|
+
* `RRQ_REDIS_DSN`: Connection string for Redis.
|
|
122
|
+
* `RRQ_DEFAULT_QUEUE_NAME`: Default queue name.
|
|
123
|
+
* `RRQ_DEFAULT_MAX_RETRIES`: Default retry limit.
|
|
124
|
+
* `RRQ_DEFAULT_JOB_TIMEOUT_SECONDS`: Default job timeout.
|
|
125
|
+
* `RRQ_WORKER_CONCURRENCY`: Max concurrent jobs per worker.
|
|
126
|
+
* ... and others (see `settings.py`).
|
|
127
|
+
|
|
128
|
+
## RRQ CLI
|
|
129
|
+
|
|
130
|
+
RRQ provides a command-line interface (CLI) for interacting with the job queue system. The `rrq` CLI allows you to manage workers, check system health, and get statistics about queues and jobs.
|
|
131
|
+
|
|
132
|
+
### Usage
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
rrq <command> [options]
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
### Commands
|
|
139
|
+
|
|
140
|
+
- **`worker run`**: Run an RRQ worker process to process jobs from queues.
|
|
141
|
+
```bash
|
|
142
|
+
rrq worker run [--burst] [--detach] --settings <settings_path>
|
|
143
|
+
```
|
|
144
|
+
- `--burst`: Run in burst mode (process one job/batch then exit).
|
|
145
|
+
- `--detach`: Run the worker in the background.
|
|
146
|
+
- `--settings`: Python settings path for application worker settings (e.g., `myapp.worker_config.rrq_settings`).
|
|
147
|
+
|
|
148
|
+
- **`worker watch`**: Run an RRQ worker with auto-restart on file changes in a specified directory.
|
|
149
|
+
```bash
|
|
150
|
+
rrq worker watch [--path <directory>] --settings <settings_path>
|
|
151
|
+
```
|
|
152
|
+
- `--path`: Directory to watch for changes (default: current directory).
|
|
153
|
+
- `--settings`: Python settings path for application worker settings.
|
|
154
|
+
|
|
155
|
+
- **`check`**: Perform a health check on active RRQ workers.
|
|
156
|
+
```bash
|
|
157
|
+
rrq check --settings <settings_path>
|
|
158
|
+
```
|
|
159
|
+
- `--settings`: Python settings path for application settings.
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
### Configuration
|
|
163
|
+
|
|
164
|
+
The CLI uses the same `RRQSettings` as the library, loading configuration from environment variables prefixed with `RRQ_`. You can also specify the settings via the `--settings` option for commands.
|
|
165
|
+
|
|
166
|
+
```bash
|
|
167
|
+
rrq worker run --settings myapp.worker_config.rrq_settings
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
### Help
|
|
171
|
+
|
|
172
|
+
For detailed help on any command, use:
|
|
173
|
+
```bash
|
|
174
|
+
rrq <command> --help
|
|
175
|
+
```
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
'''example_rrq_settings.py: Example RRQ Application Settings'''
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
from rrq.settings import RRQSettings
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("rrq")
|
|
9
|
+
logger.setLevel(logging.DEBUG)
|
|
10
|
+
console_handler = logging.StreamHandler()
|
|
11
|
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
12
|
+
console_handler.setFormatter(formatter)
|
|
13
|
+
logger.addHandler(console_handler)
|
|
14
|
+
|
|
15
|
+
redis_dsn = "redis://localhost:6379/0"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def on_startup_hook():
|
|
21
|
+
logger.info("Executing 'on_startup_hook' (application-specific startup)...")
|
|
22
|
+
await asyncio.sleep(0.1)
|
|
23
|
+
logger.info("'on_startup_hook' complete.")
|
|
24
|
+
|
|
25
|
+
async def on_shutdown_hook():
|
|
26
|
+
logger.info("Executing 'on_shutdown_hook' (application-specific shutdown)...")
|
|
27
|
+
await asyncio.sleep(0.1)
|
|
28
|
+
logger.info("'on_shutdown_hook' complete.")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# RRQ Settings
|
|
33
|
+
rrq_settings = RRQSettings(
|
|
34
|
+
redis_dsn=redis_dsn,
|
|
35
|
+
on_startup=on_startup_hook,
|
|
36
|
+
on_shutdown=on_shutdown_hook,
|
|
37
|
+
)
|
|
38
|
+
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
"""rrq_example.py: A simple example demonstrating the RRQ system."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import random
|
|
6
|
+
import signal
|
|
7
|
+
from contextlib import suppress
|
|
8
|
+
from datetime import timedelta
|
|
9
|
+
|
|
10
|
+
from rrq.client import RRQClient
|
|
11
|
+
from rrq.exc import RetryJob
|
|
12
|
+
from rrq.registry import JobRegistry
|
|
13
|
+
from rrq.settings import RRQSettings
|
|
14
|
+
from rrq.store import JobStore
|
|
15
|
+
from rrq.worker import RRQWorker
|
|
16
|
+
|
|
17
|
+
# --- Basic Logging Setup ---
|
|
18
|
+
logging.basicConfig(
|
|
19
|
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s"
|
|
20
|
+
)
|
|
21
|
+
# Set RRQ internal loggers to DEBUG for more detail in the example
|
|
22
|
+
logging.getLogger("rrq").setLevel(logging.DEBUG)
|
|
23
|
+
logger = logging.getLogger("RRQExample")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# --- Example Job Handlers ---
|
|
27
|
+
async def successful_task(ctx, message: str):
|
|
28
|
+
delay = random.uniform(0.1, 0.5)
|
|
29
|
+
logger.info(
|
|
30
|
+
f"SUCCESS_TASK (Job {ctx['job_id']}, Try {ctx['job_try']}): Received '{message}'. Sleeping for {delay:.2f}s..."
|
|
31
|
+
)
|
|
32
|
+
await asyncio.sleep(delay)
|
|
33
|
+
logger.info(f"SUCCESS_TASK (Job {ctx['job_id']}): Finished successfully.")
|
|
34
|
+
return {"status": "success", "processed_message": message, "slept_for": delay}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def failing_task(ctx, data: dict):
|
|
38
|
+
attempt = ctx["job_try"]
|
|
39
|
+
logger.warning(
|
|
40
|
+
f"FAILING_TASK (Job {ctx['job_id']}, Try {attempt}): Received data {data}. Simulating failure..."
|
|
41
|
+
)
|
|
42
|
+
await asyncio.sleep(0.1)
|
|
43
|
+
# Example: Fail permanently after max retries defined in Job/Settings
|
|
44
|
+
raise ValueError(f"Simulated failure on attempt {attempt}")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
async def retry_task(ctx, counter_limit: int):
|
|
48
|
+
attempt = ctx["job_try"]
|
|
49
|
+
logger.info(
|
|
50
|
+
f"RETRY_TASK (Job {ctx['job_id']}, Try {attempt}): Received limit {counter_limit}. Will retry if attempt < {counter_limit}."
|
|
51
|
+
)
|
|
52
|
+
await asyncio.sleep(0.2)
|
|
53
|
+
if attempt < counter_limit:
|
|
54
|
+
logger.warning(
|
|
55
|
+
f"RETRY_TASK (Job {ctx['job_id']}, Try {attempt}): Raising RetryJob (defer 1s)."
|
|
56
|
+
)
|
|
57
|
+
raise RetryJob(defer_seconds=1.0) # Request specific retry delay
|
|
58
|
+
else:
|
|
59
|
+
logger.info(
|
|
60
|
+
f"RETRY_TASK (Job {ctx['job_id']}, Try {attempt}): Reached limit {counter_limit}. Finishing."
|
|
61
|
+
)
|
|
62
|
+
return {"status": "completed_after_retries", "attempts": attempt}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# --- Main Execution ---
|
|
66
|
+
async def main():
|
|
67
|
+
logger.info("--- Starting RRQ Example ---")
|
|
68
|
+
|
|
69
|
+
# 1. Settings - Use a different Redis DB for the example (e.g., DB 2)
|
|
70
|
+
settings = RRQSettings(
|
|
71
|
+
redis_dsn="redis://localhost:6379/2",
|
|
72
|
+
default_max_retries=3, # Lower retries for example
|
|
73
|
+
worker_health_check_interval_seconds=5, # Frequent health check
|
|
74
|
+
worker_shutdown_grace_period_seconds=5,
|
|
75
|
+
)
|
|
76
|
+
logger.info(f"Using Redis DB: {settings.redis_dsn}")
|
|
77
|
+
|
|
78
|
+
# Ensure Redis DB is clean before starting (optional, good for examples)
|
|
79
|
+
try:
|
|
80
|
+
temp_store = JobStore(settings=settings)
|
|
81
|
+
await temp_store.redis.flushdb()
|
|
82
|
+
logger.info(f"Flushed Redis DB {settings.redis_dsn.split('/')[-1]}")
|
|
83
|
+
await temp_store.aclose()
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f"Could not flush Redis DB: {e}. Please ensure Redis is running.")
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
# 2. Registry
|
|
89
|
+
registry = JobRegistry()
|
|
90
|
+
registry.register("handle_success", successful_task)
|
|
91
|
+
registry.register("handle_failure", failing_task)
|
|
92
|
+
registry.register("handle_retry", retry_task)
|
|
93
|
+
logger.info(f"Registered handlers: {registry.get_registered_functions()}")
|
|
94
|
+
|
|
95
|
+
# 3. Client
|
|
96
|
+
client = RRQClient(settings=settings)
|
|
97
|
+
|
|
98
|
+
# 4. Enqueue Jobs
|
|
99
|
+
logger.info("Enqueueing jobs...")
|
|
100
|
+
job1 = await client.enqueue("handle_success", "Hello World!")
|
|
101
|
+
job2 = await client.enqueue("handle_failure", {"id": 123, "value": "abc"})
|
|
102
|
+
job3 = await client.enqueue(
|
|
103
|
+
"handle_retry", 3
|
|
104
|
+
) # Expect 3 attempts (1 initial + 2 retries)
|
|
105
|
+
job4 = await client.enqueue(
|
|
106
|
+
"handle_success", "Deferred Message!", _defer_by=timedelta(seconds=5)
|
|
107
|
+
)
|
|
108
|
+
job5 = await client.enqueue(
|
|
109
|
+
"handle_success",
|
|
110
|
+
"Another message",
|
|
111
|
+
_queue_name="high_priority", # Example custom queue
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
if all([job1, job2, job3, job4, job5]):
|
|
115
|
+
logger.info("Jobs enqueued successfully.")
|
|
116
|
+
logger.info(f" - Job 1 (Success): {job1.id}")
|
|
117
|
+
logger.info(f" - Job 2 (Failure): {job2.id}")
|
|
118
|
+
logger.info(f" - Job 3 (Retry): {job3.id}")
|
|
119
|
+
logger.info(f" - Job 4 (Deferred): {job4.id}")
|
|
120
|
+
logger.info(f" - Job 5 (CustomQ): {job5.id}")
|
|
121
|
+
else:
|
|
122
|
+
logger.error("Some jobs failed to enqueue.")
|
|
123
|
+
await client.close()
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
await client.close() # Close client connection if no longer needed
|
|
127
|
+
|
|
128
|
+
# 5. Worker Setup
|
|
129
|
+
# Run worker polling both default and the custom queue
|
|
130
|
+
worker = RRQWorker(
|
|
131
|
+
settings=settings,
|
|
132
|
+
job_registry=registry,
|
|
133
|
+
queues=[settings.default_queue_name, "high_priority"],
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# 6. Run Worker (with graceful shutdown handling)
|
|
137
|
+
logger.info(f"Starting worker {worker.worker_id}...")
|
|
138
|
+
worker_task = asyncio.create_task(run_worker_async(worker), name="RRQWorkerRunLoop")
|
|
139
|
+
|
|
140
|
+
# Keep the main script running until interrupted (Ctrl+C)
|
|
141
|
+
stop_event = asyncio.Event()
|
|
142
|
+
loop = asyncio.get_running_loop()
|
|
143
|
+
|
|
144
|
+
def signal_handler():
|
|
145
|
+
logger.info("Shutdown signal received. Setting stop event.")
|
|
146
|
+
if not stop_event.is_set():
|
|
147
|
+
stop_event.set()
|
|
148
|
+
# Allow worker to handle shutdown via its own signal handlers first
|
|
149
|
+
# If worker doesn't shutdown gracefully, worker_task might need cancellation
|
|
150
|
+
|
|
151
|
+
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
152
|
+
try:
|
|
153
|
+
loop.add_signal_handler(sig, signal_handler)
|
|
154
|
+
except NotImplementedError:
|
|
155
|
+
logger.warning(
|
|
156
|
+
f"Signal handler for {sig.name} not supported on this platform."
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
logger.info("Example running. Press Ctrl+C to stop.")
|
|
160
|
+
|
|
161
|
+
# Wait for stop event or worker task completion (e.g., if it errors out)
|
|
162
|
+
done, pending = await asyncio.wait(
|
|
163
|
+
[worker_task, stop_event.wait()], return_when=asyncio.FIRST_COMPLETED
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
logger.info("Stop event triggered or worker task finished.")
|
|
167
|
+
|
|
168
|
+
# Initiate worker shutdown if it hasn't stopped itself
|
|
169
|
+
if not worker_task.done():
|
|
170
|
+
logger.info("Requesting worker shutdown...")
|
|
171
|
+
worker._request_shutdown() # Ask worker to shutdown gracefully
|
|
172
|
+
try:
|
|
173
|
+
await asyncio.wait_for(
|
|
174
|
+
worker_task, timeout=settings.worker_shutdown_grace_period_seconds + 5
|
|
175
|
+
)
|
|
176
|
+
logger.info("Worker task completed after shutdown request.")
|
|
177
|
+
except TimeoutError:
|
|
178
|
+
logger.warning(
|
|
179
|
+
"Worker did not shut down gracefully within extended timeout. Cancelling task."
|
|
180
|
+
)
|
|
181
|
+
worker_task.cancel()
|
|
182
|
+
with suppress(asyncio.CancelledError):
|
|
183
|
+
await worker_task
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.error(f"Error waiting for worker shutdown: {e}", exc_info=True)
|
|
186
|
+
worker_task.cancel() # Ensure cancellation on other errors
|
|
187
|
+
with suppress(asyncio.CancelledError):
|
|
188
|
+
await worker_task
|
|
189
|
+
|
|
190
|
+
logger.info("--- RRQ Example Finished ---")
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
async def run_worker_async(worker: RRQWorker):
|
|
194
|
+
"""Helper function to run the worker's main loop asynchronously."""
|
|
195
|
+
# We don't use worker.run() here because it's synchronous.
|
|
196
|
+
# Instead, we directly await the async _run_loop method.
|
|
197
|
+
try:
|
|
198
|
+
await worker._run_loop()
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(
|
|
201
|
+
f"Worker {worker.worker_id} _run_loop exited with error: {e}", exc_info=True
|
|
202
|
+
)
|
|
203
|
+
finally:
|
|
204
|
+
# Ensure resources are closed even if _run_loop fails unexpectedly
|
|
205
|
+
await worker._close_resources()
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
if __name__ == "__main__":
|
|
209
|
+
try:
|
|
210
|
+
asyncio.run(main())
|
|
211
|
+
except KeyboardInterrupt:
|
|
212
|
+
logger.info("KeyboardInterrupt caught in __main__. Exiting.")
|