rrq 0.3.7__tar.gz → 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rrq-0.3.7 → rrq-0.4.0}/PKG-INFO +123 -6
- {rrq-0.3.7 → rrq-0.4.0}/README.md +121 -4
- {rrq-0.3.7 → rrq-0.4.0}/example/example_rrq_settings.py +23 -0
- {rrq-0.3.7 → rrq-0.4.0}/example/rrq_example.py +49 -1
- {rrq-0.3.7 → rrq-0.4.0}/pyproject.toml +7 -9
- rrq-0.4.0/rrq/__init__.py +14 -0
- rrq-0.4.0/rrq/cron.py +153 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/settings.py +5 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/worker.py +45 -0
- rrq-0.4.0/tests/test_cron.py +252 -0
- {rrq-0.3.7 → rrq-0.4.0}/tests/test_worker.py +242 -1
- {rrq-0.3.7 → rrq-0.4.0}/uv.lock +111 -107
- rrq-0.3.7/tests/__init__.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/.coverage +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/.gitignore +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/FUTURE.md +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/LICENSE +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/MANIFEST.in +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/cli.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/client.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/constants.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/exc.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/job.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/registry.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/rrq/store.py +0 -0
- {rrq-0.3.7/rrq → rrq-0.4.0/tests}/__init__.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/tests/test_cli.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/tests/test_client.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/tests/test_registry.py +0 -0
- {rrq-0.3.7 → rrq-0.4.0}/tests/test_store.py +0 -0
{rrq-0.3.7 → rrq-0.4.0}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rrq
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
|
|
5
5
|
Project-URL: Homepage, https://github.com/getresq/rrq
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
|
|
@@ -20,7 +20,7 @@ Requires-Dist: pydantic>=2.11.4
|
|
|
20
20
|
Requires-Dist: redis[hiredis]<6,>=4.2.0
|
|
21
21
|
Requires-Dist: watchfiles>=0.19.0
|
|
22
22
|
Provides-Extra: dev
|
|
23
|
-
Requires-Dist: pytest-asyncio>=0.
|
|
23
|
+
Requires-Dist: pytest-asyncio>=1.0.0; extra == 'dev'
|
|
24
24
|
Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
|
|
25
25
|
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
|
26
26
|
Description-Content-Type: text/markdown
|
|
@@ -40,6 +40,7 @@ RRQ is a Python library for creating reliable job queues using Redis and `asynci
|
|
|
40
40
|
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
41
41
|
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
42
42
|
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
43
|
+
* **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
|
|
43
44
|
|
|
44
45
|
- Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
|
|
45
46
|
|
|
@@ -138,6 +139,125 @@ if __name__ == "__main__":
|
|
|
138
139
|
|
|
139
140
|
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
140
141
|
|
|
142
|
+
## Cron Jobs
|
|
143
|
+
|
|
144
|
+
Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
|
|
145
|
+
`schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
|
|
146
|
+
It supports the most common features from Unix cron:
|
|
147
|
+
|
|
148
|
+
- numeric values
|
|
149
|
+
- ranges (e.g. `8-11`)
|
|
150
|
+
- lists separated by commas (e.g. `mon,wed,fri`)
|
|
151
|
+
- step values using `/` (e.g. `*/15`)
|
|
152
|
+
- names for months and days (`jan-dec`, `sun-sat`)
|
|
153
|
+
|
|
154
|
+
Jobs are evaluated in the server's timezone and run with minute resolution.
|
|
155
|
+
|
|
156
|
+
### Cron Schedule Examples
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
# Every minute
|
|
160
|
+
"* * * * *"
|
|
161
|
+
|
|
162
|
+
# Every hour at minute 30
|
|
163
|
+
"30 * * * *"
|
|
164
|
+
|
|
165
|
+
# Every day at 2:30 AM
|
|
166
|
+
"30 2 * * *"
|
|
167
|
+
|
|
168
|
+
# Every Monday at 9:00 AM
|
|
169
|
+
"0 9 * * mon"
|
|
170
|
+
|
|
171
|
+
# Every 15 minutes
|
|
172
|
+
"*/15 * * * *"
|
|
173
|
+
|
|
174
|
+
# Every weekday at 6:00 PM
|
|
175
|
+
"0 18 * * mon-fri"
|
|
176
|
+
|
|
177
|
+
# First day of every month at midnight
|
|
178
|
+
"0 0 1 * *"
|
|
179
|
+
|
|
180
|
+
# Every 2 hours during business hours on weekdays
|
|
181
|
+
"0 9-17/2 * * mon-fri"
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
### Defining Cron Jobs
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
from rrq.settings import RRQSettings
|
|
188
|
+
from rrq.cron import CronJob
|
|
189
|
+
|
|
190
|
+
# Define your cron jobs
|
|
191
|
+
cron_jobs = [
|
|
192
|
+
# Daily cleanup at 2 AM
|
|
193
|
+
CronJob(
|
|
194
|
+
function_name="daily_cleanup",
|
|
195
|
+
schedule="0 2 * * *",
|
|
196
|
+
args=["temp_files"],
|
|
197
|
+
kwargs={"max_age_days": 7}
|
|
198
|
+
),
|
|
199
|
+
|
|
200
|
+
# Weekly report every Monday at 9 AM
|
|
201
|
+
CronJob(
|
|
202
|
+
function_name="generate_weekly_report",
|
|
203
|
+
schedule="0 9 * * mon",
|
|
204
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
205
|
+
),
|
|
206
|
+
|
|
207
|
+
# Health check every 15 minutes on a specific queue
|
|
208
|
+
CronJob(
|
|
209
|
+
function_name="system_health_check",
|
|
210
|
+
schedule="*/15 * * * *",
|
|
211
|
+
queue_name="monitoring"
|
|
212
|
+
),
|
|
213
|
+
|
|
214
|
+
# Backup database every night at 1 AM
|
|
215
|
+
CronJob(
|
|
216
|
+
function_name="backup_database",
|
|
217
|
+
schedule="0 1 * * *",
|
|
218
|
+
kwargs={"backup_type": "incremental"}
|
|
219
|
+
),
|
|
220
|
+
]
|
|
221
|
+
|
|
222
|
+
# Add to your settings
|
|
223
|
+
rrq_settings = RRQSettings(
|
|
224
|
+
redis_dsn="redis://localhost:6379/0",
|
|
225
|
+
cron_jobs=cron_jobs
|
|
226
|
+
)
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Cron Job Handlers
|
|
230
|
+
|
|
231
|
+
Your cron job handlers are regular async functions, just like other job handlers:
|
|
232
|
+
|
|
233
|
+
```python
|
|
234
|
+
async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
|
|
235
|
+
"""Clean up old files."""
|
|
236
|
+
job_id = ctx['job_id']
|
|
237
|
+
print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
|
|
238
|
+
# Your cleanup logic here
|
|
239
|
+
return {"cleaned_files": 42, "status": "completed"}
|
|
240
|
+
|
|
241
|
+
async def generate_weekly_report(ctx):
|
|
242
|
+
"""Generate and send weekly report."""
|
|
243
|
+
job_id = ctx['job_id']
|
|
244
|
+
print(f"Job {job_id}: Generating weekly report")
|
|
245
|
+
# Your report generation logic here
|
|
246
|
+
return {"report_id": "weekly_2024_01", "status": "sent"}
|
|
247
|
+
|
|
248
|
+
# Register your handlers
|
|
249
|
+
from rrq.registry import JobRegistry
|
|
250
|
+
|
|
251
|
+
job_registry = JobRegistry()
|
|
252
|
+
job_registry.register("daily_cleanup", daily_cleanup)
|
|
253
|
+
job_registry.register("generate_weekly_report", generate_weekly_report)
|
|
254
|
+
|
|
255
|
+
# Add the registry to your settings
|
|
256
|
+
rrq_settings.job_registry = job_registry
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
**Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
|
|
260
|
+
|
|
141
261
|
## Command Line Interface
|
|
142
262
|
|
|
143
263
|
RRQ provides a command-line interface (CLI) for managing workers and performing health checks:
|
|
@@ -178,7 +298,4 @@ RRQ can be configured in several ways, with the following precedence:
|
|
|
178
298
|
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
179
299
|
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
180
300
|
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
181
|
-
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
182
|
-
* **`RRQSettings` (`settings.py`)**: A Pydantic `BaseSettings` model for configuring RRQ behavior (Redis DSN, queue names, timeouts, retry policies, concurrency, etc.). Loadable from environment variables (prefix `RRQ_`).
|
|
183
|
-
* **`constants.py`**: Defines shared constants like Redis key prefixes and default configuration values.
|
|
184
|
-
* **`exc.py`**: Defines custom exceptions, notably `RetryJob` which handlers can raise to explicitly request a retry, potentially with a custom delay.
|
|
301
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
@@ -13,6 +13,7 @@ RRQ is a Python library for creating reliable job queues using Redis and `asynci
|
|
|
13
13
|
* **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
|
|
14
14
|
* **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
|
|
15
15
|
* **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
|
|
16
|
+
* **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
|
|
16
17
|
|
|
17
18
|
- Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
|
|
18
19
|
|
|
@@ -111,6 +112,125 @@ if __name__ == "__main__":
|
|
|
111
112
|
|
|
112
113
|
You can run multiple instances of `worker_script.py` for concurrent processing.
|
|
113
114
|
|
|
115
|
+
## Cron Jobs
|
|
116
|
+
|
|
117
|
+
Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
|
|
118
|
+
`schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
|
|
119
|
+
It supports the most common features from Unix cron:
|
|
120
|
+
|
|
121
|
+
- numeric values
|
|
122
|
+
- ranges (e.g. `8-11`)
|
|
123
|
+
- lists separated by commas (e.g. `mon,wed,fri`)
|
|
124
|
+
- step values using `/` (e.g. `*/15`)
|
|
125
|
+
- names for months and days (`jan-dec`, `sun-sat`)
|
|
126
|
+
|
|
127
|
+
Jobs are evaluated in the server's timezone and run with minute resolution.
|
|
128
|
+
|
|
129
|
+
### Cron Schedule Examples
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# Every minute
|
|
133
|
+
"* * * * *"
|
|
134
|
+
|
|
135
|
+
# Every hour at minute 30
|
|
136
|
+
"30 * * * *"
|
|
137
|
+
|
|
138
|
+
# Every day at 2:30 AM
|
|
139
|
+
"30 2 * * *"
|
|
140
|
+
|
|
141
|
+
# Every Monday at 9:00 AM
|
|
142
|
+
"0 9 * * mon"
|
|
143
|
+
|
|
144
|
+
# Every 15 minutes
|
|
145
|
+
"*/15 * * * *"
|
|
146
|
+
|
|
147
|
+
# Every weekday at 6:00 PM
|
|
148
|
+
"0 18 * * mon-fri"
|
|
149
|
+
|
|
150
|
+
# First day of every month at midnight
|
|
151
|
+
"0 0 1 * *"
|
|
152
|
+
|
|
153
|
+
# Every 2 hours during business hours on weekdays
|
|
154
|
+
"0 9-17/2 * * mon-fri"
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
### Defining Cron Jobs
|
|
158
|
+
|
|
159
|
+
```python
|
|
160
|
+
from rrq.settings import RRQSettings
|
|
161
|
+
from rrq.cron import CronJob
|
|
162
|
+
|
|
163
|
+
# Define your cron jobs
|
|
164
|
+
cron_jobs = [
|
|
165
|
+
# Daily cleanup at 2 AM
|
|
166
|
+
CronJob(
|
|
167
|
+
function_name="daily_cleanup",
|
|
168
|
+
schedule="0 2 * * *",
|
|
169
|
+
args=["temp_files"],
|
|
170
|
+
kwargs={"max_age_days": 7}
|
|
171
|
+
),
|
|
172
|
+
|
|
173
|
+
# Weekly report every Monday at 9 AM
|
|
174
|
+
CronJob(
|
|
175
|
+
function_name="generate_weekly_report",
|
|
176
|
+
schedule="0 9 * * mon",
|
|
177
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
178
|
+
),
|
|
179
|
+
|
|
180
|
+
# Health check every 15 minutes on a specific queue
|
|
181
|
+
CronJob(
|
|
182
|
+
function_name="system_health_check",
|
|
183
|
+
schedule="*/15 * * * *",
|
|
184
|
+
queue_name="monitoring"
|
|
185
|
+
),
|
|
186
|
+
|
|
187
|
+
# Backup database every night at 1 AM
|
|
188
|
+
CronJob(
|
|
189
|
+
function_name="backup_database",
|
|
190
|
+
schedule="0 1 * * *",
|
|
191
|
+
kwargs={"backup_type": "incremental"}
|
|
192
|
+
),
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
# Add to your settings
|
|
196
|
+
rrq_settings = RRQSettings(
|
|
197
|
+
redis_dsn="redis://localhost:6379/0",
|
|
198
|
+
cron_jobs=cron_jobs
|
|
199
|
+
)
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### Cron Job Handlers
|
|
203
|
+
|
|
204
|
+
Your cron job handlers are regular async functions, just like other job handlers:
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
|
|
208
|
+
"""Clean up old files."""
|
|
209
|
+
job_id = ctx['job_id']
|
|
210
|
+
print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
|
|
211
|
+
# Your cleanup logic here
|
|
212
|
+
return {"cleaned_files": 42, "status": "completed"}
|
|
213
|
+
|
|
214
|
+
async def generate_weekly_report(ctx):
|
|
215
|
+
"""Generate and send weekly report."""
|
|
216
|
+
job_id = ctx['job_id']
|
|
217
|
+
print(f"Job {job_id}: Generating weekly report")
|
|
218
|
+
# Your report generation logic here
|
|
219
|
+
return {"report_id": "weekly_2024_01", "status": "sent"}
|
|
220
|
+
|
|
221
|
+
# Register your handlers
|
|
222
|
+
from rrq.registry import JobRegistry
|
|
223
|
+
|
|
224
|
+
job_registry = JobRegistry()
|
|
225
|
+
job_registry.register("daily_cleanup", daily_cleanup)
|
|
226
|
+
job_registry.register("generate_weekly_report", generate_weekly_report)
|
|
227
|
+
|
|
228
|
+
# Add the registry to your settings
|
|
229
|
+
rrq_settings.job_registry = job_registry
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
**Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
|
|
233
|
+
|
|
114
234
|
## Command Line Interface
|
|
115
235
|
|
|
116
236
|
RRQ provides a command-line interface (CLI) for managing workers and performing health checks:
|
|
@@ -151,7 +271,4 @@ RRQ can be configured in several ways, with the following precedence:
|
|
|
151
271
|
* **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
|
|
152
272
|
* **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
|
|
153
273
|
* **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
|
|
154
|
-
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
155
|
-
* **`RRQSettings` (`settings.py`)**: A Pydantic `BaseSettings` model for configuring RRQ behavior (Redis DSN, queue names, timeouts, retry policies, concurrency, etc.). Loadable from environment variables (prefix `RRQ_`).
|
|
156
|
-
* **`constants.py`**: Defines shared constants like Redis key prefixes and default configuration values.
|
|
157
|
-
* **`exc.py`**: Defines custom exceptions, notably `RetryJob` which handlers can raise to explicitly request a retry, potentially with a custom delay.
|
|
274
|
+
* **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import logging
|
|
5
5
|
|
|
6
|
+
from rrq.cron import CronJob
|
|
6
7
|
from rrq.settings import RRQSettings
|
|
7
8
|
|
|
8
9
|
logger = logging.getLogger("rrq")
|
|
@@ -34,5 +35,27 @@ rrq_settings = RRQSettings(
|
|
|
34
35
|
redis_dsn=redis_dsn,
|
|
35
36
|
on_startup=on_startup_hook,
|
|
36
37
|
on_shutdown=on_shutdown_hook,
|
|
38
|
+
# Example cron jobs - these would run periodically when a worker is running
|
|
39
|
+
cron_jobs=[
|
|
40
|
+
# Run a cleanup task every day at 2 AM
|
|
41
|
+
CronJob(
|
|
42
|
+
function_name="daily_cleanup",
|
|
43
|
+
schedule="0 2 * * *",
|
|
44
|
+
args=["cleanup_logs"],
|
|
45
|
+
kwargs={"max_age_days": 30}
|
|
46
|
+
),
|
|
47
|
+
# Send a status report every Monday at 9 AM
|
|
48
|
+
CronJob(
|
|
49
|
+
function_name="send_status_report",
|
|
50
|
+
schedule="0 9 * * mon",
|
|
51
|
+
unique=True # Prevent duplicate reports if worker restarts
|
|
52
|
+
),
|
|
53
|
+
# Health check every 15 minutes
|
|
54
|
+
CronJob(
|
|
55
|
+
function_name="health_check",
|
|
56
|
+
schedule="*/15 * * * *",
|
|
57
|
+
queue_name="monitoring" # Use a specific queue for monitoring tasks
|
|
58
|
+
),
|
|
59
|
+
]
|
|
37
60
|
)
|
|
38
61
|
|
|
@@ -8,6 +8,7 @@ from contextlib import suppress
|
|
|
8
8
|
from datetime import timedelta
|
|
9
9
|
|
|
10
10
|
from rrq.client import RRQClient
|
|
11
|
+
from rrq.cron import CronJob
|
|
11
12
|
from rrq.exc import RetryJob
|
|
12
13
|
from rrq.registry import JobRegistry
|
|
13
14
|
from rrq.settings import RRQSettings
|
|
@@ -62,6 +63,33 @@ async def retry_task(ctx, counter_limit: int):
|
|
|
62
63
|
return {"status": "completed_after_retries", "attempts": attempt}
|
|
63
64
|
|
|
64
65
|
|
|
66
|
+
# --- Example Cron Job Handlers ---
|
|
67
|
+
async def daily_cleanup(ctx, task_type: str, max_age_days: int = 30):
|
|
68
|
+
"""Example cron job handler for daily cleanup tasks."""
|
|
69
|
+
logger.info(
|
|
70
|
+
f"DAILY_CLEANUP (Job {ctx['job_id']}): Running {task_type} cleanup, max age: {max_age_days} days"
|
|
71
|
+
)
|
|
72
|
+
await asyncio.sleep(0.5) # Simulate cleanup work
|
|
73
|
+
logger.info(f"DAILY_CLEANUP (Job {ctx['job_id']}): Cleanup completed")
|
|
74
|
+
return {"task_type": task_type, "max_age_days": max_age_days, "status": "completed"}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def send_status_report(ctx):
|
|
78
|
+
"""Example cron job handler for sending status reports."""
|
|
79
|
+
logger.info(f"STATUS_REPORT (Job {ctx['job_id']}): Generating and sending status report")
|
|
80
|
+
await asyncio.sleep(0.3) # Simulate report generation
|
|
81
|
+
logger.info(f"STATUS_REPORT (Job {ctx['job_id']}): Status report sent")
|
|
82
|
+
return {"report_type": "weekly", "status": "sent"}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
async def health_check(ctx):
|
|
86
|
+
"""Example cron job handler for health checks."""
|
|
87
|
+
logger.info(f"HEALTH_CHECK (Job {ctx['job_id']}): Running system health check")
|
|
88
|
+
await asyncio.sleep(0.1) # Simulate health check
|
|
89
|
+
logger.info(f"HEALTH_CHECK (Job {ctx['job_id']}): Health check completed - all systems OK")
|
|
90
|
+
return {"status": "healthy", "timestamp": ctx.get("job_start_time")}
|
|
91
|
+
|
|
92
|
+
|
|
65
93
|
# --- Main Execution ---
|
|
66
94
|
async def main():
|
|
67
95
|
logger.info("--- Starting RRQ Example ---")
|
|
@@ -72,8 +100,24 @@ async def main():
|
|
|
72
100
|
default_max_retries=3, # Lower retries for example
|
|
73
101
|
worker_health_check_interval_seconds=5, # Frequent health check
|
|
74
102
|
worker_shutdown_grace_period_seconds=5,
|
|
103
|
+
# Example cron jobs - these will run periodically when the worker is running
|
|
104
|
+
cron_jobs=[
|
|
105
|
+
# Run a health check every 2 minutes (for demo purposes)
|
|
106
|
+
CronJob(
|
|
107
|
+
function_name="health_check",
|
|
108
|
+
schedule="*/2 * * * *", # Every 2 minutes
|
|
109
|
+
queue_name="monitoring"
|
|
110
|
+
),
|
|
111
|
+
# Send a status report every 5 minutes (for demo purposes)
|
|
112
|
+
CronJob(
|
|
113
|
+
function_name="send_status_report",
|
|
114
|
+
schedule="*/5 * * * *", # Every 5 minutes
|
|
115
|
+
unique=True # Prevent duplicate reports
|
|
116
|
+
),
|
|
117
|
+
]
|
|
75
118
|
)
|
|
76
119
|
logger.info(f"Using Redis DB: {settings.redis_dsn}")
|
|
120
|
+
logger.info(f"Configured {len(settings.cron_jobs)} cron jobs")
|
|
77
121
|
|
|
78
122
|
# Ensure Redis DB is clean before starting (optional, good for examples)
|
|
79
123
|
try:
|
|
@@ -90,6 +134,10 @@ async def main():
|
|
|
90
134
|
registry.register("handle_success", successful_task)
|
|
91
135
|
registry.register("handle_failure", failing_task)
|
|
92
136
|
registry.register("handle_retry", retry_task)
|
|
137
|
+
# Register cron job handlers
|
|
138
|
+
registry.register("daily_cleanup", daily_cleanup)
|
|
139
|
+
registry.register("send_status_report", send_status_report)
|
|
140
|
+
registry.register("health_check", health_check)
|
|
93
141
|
logger.info(f"Registered handlers: {registry.get_registered_functions()}")
|
|
94
142
|
|
|
95
143
|
# 3. Client
|
|
@@ -131,7 +179,7 @@ async def main():
|
|
|
131
179
|
worker = RRQWorker(
|
|
132
180
|
settings=settings,
|
|
133
181
|
job_registry=registry,
|
|
134
|
-
queues=[settings.default_queue_name, "high_priority"],
|
|
182
|
+
queues=[settings.default_queue_name, "high_priority", "monitoring"],
|
|
135
183
|
)
|
|
136
184
|
|
|
137
185
|
# 6. Run Worker (with graceful shutdown handling)
|
|
@@ -4,10 +4,8 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "rrq"
|
|
7
|
-
version = "0.
|
|
8
|
-
authors = [
|
|
9
|
-
{ name = "Mazdak Rezvani", email = "mazdak@me.com" },
|
|
10
|
-
]
|
|
7
|
+
version = "0.4.0"
|
|
8
|
+
authors = [{ name = "Mazdak Rezvani", email = "mazdak@me.com" }]
|
|
11
9
|
description = "RRQ is a Python library for creating reliable job queues using Redis and asyncio"
|
|
12
10
|
readme = "README.md"
|
|
13
11
|
requires-python = ">=3.11"
|
|
@@ -30,11 +28,7 @@ dependencies = [
|
|
|
30
28
|
]
|
|
31
29
|
|
|
32
30
|
[project.optional-dependencies]
|
|
33
|
-
dev = [
|
|
34
|
-
"pytest>=8.3.5",
|
|
35
|
-
"pytest-asyncio>=0.26.0",
|
|
36
|
-
"pytest-cov>=6.0.0",
|
|
37
|
-
]
|
|
31
|
+
dev = ["pytest>=8.3.5", "pytest-asyncio>=1.0.0", "pytest-cov>=6.0.0"]
|
|
38
32
|
|
|
39
33
|
[project.urls]
|
|
40
34
|
"Homepage" = "https://github.com/getresq/rrq"
|
|
@@ -44,4 +38,8 @@ dev = [
|
|
|
44
38
|
rrq = "rrq.cli:rrq"
|
|
45
39
|
|
|
46
40
|
[tool.pytest.ini_options]
|
|
41
|
+
asyncio_mode = "strict"
|
|
47
42
|
asyncio_default_fixture_loop_scope = "function"
|
|
43
|
+
|
|
44
|
+
[tool.pyrefly]
|
|
45
|
+
python_interpreter = ".venv/bin/python"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .cron import CronJob, CronSchedule
|
|
2
|
+
from .worker import RRQWorker
|
|
3
|
+
from .client import RRQClient
|
|
4
|
+
from .registry import JobRegistry
|
|
5
|
+
from .settings import RRQSettings
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"CronJob",
|
|
9
|
+
"CronSchedule",
|
|
10
|
+
"RRQWorker",
|
|
11
|
+
"RRQClient",
|
|
12
|
+
"JobRegistry",
|
|
13
|
+
"RRQSettings",
|
|
14
|
+
]
|
rrq-0.4.0/rrq/cron.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from datetime import UTC, datetime, timedelta
|
|
4
|
+
from typing import Any, Optional, Sequence
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, Field, PrivateAttr
|
|
7
|
+
|
|
8
|
+
MONTH_NAMES = {
|
|
9
|
+
"jan": 1,
|
|
10
|
+
"feb": 2,
|
|
11
|
+
"mar": 3,
|
|
12
|
+
"apr": 4,
|
|
13
|
+
"may": 5,
|
|
14
|
+
"jun": 6,
|
|
15
|
+
"jul": 7,
|
|
16
|
+
"aug": 8,
|
|
17
|
+
"sep": 9,
|
|
18
|
+
"oct": 10,
|
|
19
|
+
"nov": 11,
|
|
20
|
+
"dec": 12,
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
WEEKDAY_NAMES = {
|
|
24
|
+
"sun": 0,
|
|
25
|
+
"mon": 1,
|
|
26
|
+
"tue": 2,
|
|
27
|
+
"wed": 3,
|
|
28
|
+
"thu": 4,
|
|
29
|
+
"fri": 5,
|
|
30
|
+
"sat": 6,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _parse_value(value: str, names: dict[str, int], min_val: int, max_val: int) -> int:
|
|
35
|
+
if value.lower() in names:
|
|
36
|
+
return names[value.lower()]
|
|
37
|
+
num = int(value)
|
|
38
|
+
if names is WEEKDAY_NAMES and num == 7:
|
|
39
|
+
num = 0
|
|
40
|
+
if not (min_val <= num <= max_val):
|
|
41
|
+
raise ValueError(f"value {num} out of range {min_val}-{max_val}")
|
|
42
|
+
return num
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _parse_field(field: str, *, names: dict[str, int] | None, min_val: int, max_val: int) -> Sequence[int]:
|
|
46
|
+
names = names or {}
|
|
47
|
+
if field == "*":
|
|
48
|
+
return list(range(min_val, max_val + 1))
|
|
49
|
+
values: set[int] = set()
|
|
50
|
+
for part in field.split(','):
|
|
51
|
+
step = 1
|
|
52
|
+
if '/' in part:
|
|
53
|
+
base, step_str = part.split('/', 1)
|
|
54
|
+
step = int(step_str)
|
|
55
|
+
else:
|
|
56
|
+
base = part
|
|
57
|
+
if base == "*":
|
|
58
|
+
start, end = min_val, max_val
|
|
59
|
+
elif '-' in base:
|
|
60
|
+
a, b = base.split('-', 1)
|
|
61
|
+
start = _parse_value(a, names, min_val, max_val)
|
|
62
|
+
end = _parse_value(b, names, min_val, max_val)
|
|
63
|
+
else:
|
|
64
|
+
val = _parse_value(base, names, min_val, max_val)
|
|
65
|
+
start = end = val
|
|
66
|
+
if start > end:
|
|
67
|
+
raise ValueError(f"invalid range {base}")
|
|
68
|
+
for v in range(start, end + 1, step):
|
|
69
|
+
values.add(v)
|
|
70
|
+
return sorted(values)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class CronSchedule:
|
|
74
|
+
"""Represents a cron schedule expression."""
|
|
75
|
+
|
|
76
|
+
def __init__(self, expression: str) -> None:
|
|
77
|
+
fields = expression.split()
|
|
78
|
+
if len(fields) != 5:
|
|
79
|
+
raise ValueError("Cron expression must have 5 fields")
|
|
80
|
+
minute, hour, dom, month, dow = fields
|
|
81
|
+
self.minutes = _parse_field(minute, names=None, min_val=0, max_val=59)
|
|
82
|
+
self.hours = _parse_field(hour, names=None, min_val=0, max_val=23)
|
|
83
|
+
self.dom = _parse_field(dom, names=None, min_val=1, max_val=31)
|
|
84
|
+
self.months = _parse_field(month, names=MONTH_NAMES, min_val=1, max_val=12)
|
|
85
|
+
self.dow = _parse_field(dow, names=WEEKDAY_NAMES, min_val=0, max_val=6)
|
|
86
|
+
self.dom_all = dom == "*"
|
|
87
|
+
self.dow_all = dow == "*"
|
|
88
|
+
|
|
89
|
+
def next_after(self, dt: datetime) -> datetime:
|
|
90
|
+
dt = dt.replace(second=0, microsecond=0) + timedelta(minutes=1)
|
|
91
|
+
while True:
|
|
92
|
+
if dt.month not in self.months:
|
|
93
|
+
dt += timedelta(minutes=1)
|
|
94
|
+
continue
|
|
95
|
+
if dt.hour not in self.hours or dt.minute not in self.minutes:
|
|
96
|
+
dt += timedelta(minutes=1)
|
|
97
|
+
continue
|
|
98
|
+
dom_match = dt.day in self.dom
|
|
99
|
+
# Convert Python weekday (Monday=0) to cron weekday (Sunday=0)
|
|
100
|
+
# Python: Mon=0, Tue=1, Wed=2, Thu=3, Fri=4, Sat=5, Sun=6
|
|
101
|
+
# Cron: Sun=0, Mon=1, Tue=2, Wed=3, Thu=4, Fri=5, Sat=6
|
|
102
|
+
python_weekday = dt.weekday()
|
|
103
|
+
cron_weekday = (python_weekday + 1) % 7
|
|
104
|
+
dow_match = cron_weekday in self.dow
|
|
105
|
+
|
|
106
|
+
if self.dom_all and self.dow_all:
|
|
107
|
+
condition = True
|
|
108
|
+
elif self.dom_all:
|
|
109
|
+
# Only day-of-week constraint
|
|
110
|
+
condition = dow_match
|
|
111
|
+
elif self.dow_all:
|
|
112
|
+
# Only day-of-month constraint
|
|
113
|
+
condition = dom_match
|
|
114
|
+
else:
|
|
115
|
+
# Both constraints specified - use OR logic (standard cron behavior)
|
|
116
|
+
condition = dom_match or dow_match
|
|
117
|
+
if condition:
|
|
118
|
+
return dt
|
|
119
|
+
dt += timedelta(minutes=1)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class CronJob(BaseModel):
|
|
124
|
+
"""Simple cron job specification based on a cron schedule."""
|
|
125
|
+
|
|
126
|
+
function_name: str
|
|
127
|
+
schedule: str = Field(
|
|
128
|
+
description="Cron expression 'm h dom mon dow'. Resolution is one minute."
|
|
129
|
+
)
|
|
130
|
+
args: list[Any] = Field(default_factory=list)
|
|
131
|
+
kwargs: dict[str, Any] = Field(default_factory=dict)
|
|
132
|
+
queue_name: Optional[str] = None
|
|
133
|
+
unique: bool = False
|
|
134
|
+
|
|
135
|
+
# Next run time and parsed schedule are maintained at runtime
|
|
136
|
+
next_run_time: Optional[datetime] = Field(default=None, exclude=True)
|
|
137
|
+
_cron: CronSchedule | None = PrivateAttr(default=None)
|
|
138
|
+
|
|
139
|
+
def model_post_init(self, __context: Any) -> None: # type: ignore[override]
|
|
140
|
+
self._cron = CronSchedule(self.schedule)
|
|
141
|
+
|
|
142
|
+
def schedule_next(self, now: Optional[datetime] = None) -> None:
|
|
143
|
+
"""Compute the next run time strictly after *now*."""
|
|
144
|
+
now = (now or datetime.now(UTC)).replace(second=0, microsecond=0)
|
|
145
|
+
if self._cron is None:
|
|
146
|
+
self._cron = CronSchedule(self.schedule)
|
|
147
|
+
self.next_run_time = self._cron.next_after(now)
|
|
148
|
+
|
|
149
|
+
def due(self, now: Optional[datetime] = None) -> bool:
|
|
150
|
+
now = now or datetime.now(UTC)
|
|
151
|
+
if self.next_run_time is None:
|
|
152
|
+
self.schedule_next(now)
|
|
153
|
+
return now >= (self.next_run_time or now)
|
|
@@ -21,6 +21,7 @@ from .constants import (
|
|
|
21
21
|
DEFAULT_UNIQUE_JOB_LOCK_TTL_SECONDS,
|
|
22
22
|
)
|
|
23
23
|
from .registry import JobRegistry
|
|
24
|
+
from .cron import CronJob
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
class RRQSettings(BaseSettings):
|
|
@@ -97,6 +98,10 @@ class RRQSettings(BaseSettings):
|
|
|
97
98
|
default=None,
|
|
98
99
|
description="Job registry instance, typically provided by the application.",
|
|
99
100
|
)
|
|
101
|
+
cron_jobs: list[CronJob] = Field(
|
|
102
|
+
default_factory=list,
|
|
103
|
+
description="Optional list of cron job specifications to run periodically.",
|
|
104
|
+
)
|
|
100
105
|
model_config = SettingsConfigDict(
|
|
101
106
|
env_prefix="RRQ_",
|
|
102
107
|
extra="ignore",
|