rrq 0.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. rrq-0.8.0/.coverage +0 -0
  2. rrq-0.8.0/.github/workflows/ci.yml +37 -0
  3. rrq-0.8.0/.gitignore +4 -0
  4. rrq-0.8.0/AGENTS.md +32 -0
  5. rrq-0.8.0/CLAUDE.md +32 -0
  6. rrq-0.8.0/LICENSE +13 -0
  7. rrq-0.8.0/MANIFEST.in +2 -0
  8. rrq-0.8.0/PKG-INFO +524 -0
  9. rrq-0.8.0/README.md +492 -0
  10. rrq-0.8.0/docs/CLI_REFERENCE.md +240 -0
  11. rrq-0.8.0/example/example_rrq_settings.py +58 -0
  12. rrq-0.8.0/example/rrq_example.py +262 -0
  13. rrq-0.8.0/pyproject.toml +51 -0
  14. rrq-0.8.0/rrq/__init__.py +14 -0
  15. rrq-0.8.0/rrq/cli.py +783 -0
  16. rrq-0.8.0/rrq/cli_commands/__init__.py +1 -0
  17. rrq-0.8.0/rrq/cli_commands/base.py +105 -0
  18. rrq-0.8.0/rrq/cli_commands/commands/__init__.py +1 -0
  19. rrq-0.8.0/rrq/cli_commands/commands/debug.py +551 -0
  20. rrq-0.8.0/rrq/cli_commands/commands/dlq.py +853 -0
  21. rrq-0.8.0/rrq/cli_commands/commands/jobs.py +516 -0
  22. rrq-0.8.0/rrq/cli_commands/commands/monitor.py +808 -0
  23. rrq-0.8.0/rrq/cli_commands/commands/queues.py +539 -0
  24. rrq-0.8.0/rrq/cli_commands/utils.py +162 -0
  25. rrq-0.8.0/rrq/client.py +196 -0
  26. rrq-0.8.0/rrq/constants.py +53 -0
  27. rrq-0.8.0/rrq/cron.py +213 -0
  28. rrq-0.8.0/rrq/exc.py +46 -0
  29. rrq-0.8.0/rrq/exporters/__init__.py +1 -0
  30. rrq-0.8.0/rrq/exporters/prometheus.py +90 -0
  31. rrq-0.8.0/rrq/exporters/statsd.py +60 -0
  32. rrq-0.8.0/rrq/hooks.py +250 -0
  33. rrq-0.8.0/rrq/integrations/__init__.py +1 -0
  34. rrq-0.8.0/rrq/integrations/ddtrace.py +456 -0
  35. rrq-0.8.0/rrq/integrations/logfire.py +23 -0
  36. rrq-0.8.0/rrq/integrations/otel.py +325 -0
  37. rrq-0.8.0/rrq/job.py +113 -0
  38. rrq-0.8.0/rrq/registry.py +71 -0
  39. rrq-0.8.0/rrq/settings.py +123 -0
  40. rrq-0.8.0/rrq/store.py +924 -0
  41. rrq-0.8.0/rrq/telemetry.py +129 -0
  42. rrq-0.8.0/rrq/worker.py +1181 -0
  43. rrq-0.8.0/tests/CLAUDE.md +115 -0
  44. rrq-0.8.0/tests/__init__.py +0 -0
  45. rrq-0.8.0/tests/cli_commands/__init__.py +1 -0
  46. rrq-0.8.0/tests/cli_commands/conftest.py +340 -0
  47. rrq-0.8.0/tests/cli_commands/test_debug_commands.py +504 -0
  48. rrq-0.8.0/tests/cli_commands/test_dlq_commands.py +722 -0
  49. rrq-0.8.0/tests/cli_commands/test_integration.py +436 -0
  50. rrq-0.8.0/tests/cli_commands/test_job_commands.py +566 -0
  51. rrq-0.8.0/tests/cli_commands/test_monitor_commands.py +819 -0
  52. rrq-0.8.0/tests/cli_commands/test_monitor_dlq_integration.py +372 -0
  53. rrq-0.8.0/tests/cli_commands/test_queue_commands.py +391 -0
  54. rrq-0.8.0/tests/cli_commands/test_queue_dlq_integration.py +462 -0
  55. rrq-0.8.0/tests/test_cli.py +785 -0
  56. rrq-0.8.0/tests/test_client.py +416 -0
  57. rrq-0.8.0/tests/test_cron.py +256 -0
  58. rrq-0.8.0/tests/test_registry.py +83 -0
  59. rrq-0.8.0/tests/test_store.py +1038 -0
  60. rrq-0.8.0/tests/test_worker.py +1192 -0
  61. rrq-0.8.0/uv.lock +740 -0
rrq-0.8.0/.coverage ADDED
Binary file
@@ -0,0 +1,37 @@
1
+ name: CI
2
+
3
+ on:
4
+ pull_request:
5
+
6
+ jobs:
7
+ test:
8
+ runs-on: ubuntu-latest
9
+ services:
10
+ redis:
11
+ image: redis:7
12
+ ports:
13
+ - 6379:6379
14
+ options: >-
15
+ --health-cmd "redis-cli ping"
16
+ --health-interval 10s
17
+ --health-timeout 5s
18
+ --health-retries 5
19
+ steps:
20
+ - name: Checkout code
21
+ uses: actions/checkout@v4
22
+
23
+ - name: Set up Python
24
+ uses: actions/setup-python@v4
25
+ with:
26
+ python-version: '3.11'
27
+ cache: 'pip'
28
+
29
+ - name: Install uv CLI
30
+ run: |
31
+ python -m pip install --upgrade pip uv
32
+
33
+ - name: Sync dependencies
34
+ run: uv sync --extra dev
35
+
36
+ - name: Run tests
37
+ run: uv run pytest --disable-warnings -q --maxfail=1
rrq-0.8.0/.gitignore ADDED
@@ -0,0 +1,4 @@
1
+ .venv/
2
+ __pycache__/
3
+ .git/
4
+ .vscode/
rrq-0.8.0/AGENTS.md ADDED
@@ -0,0 +1,32 @@
1
+ # RRQ
2
+
3
+ Redis-based async job queue library for Python.
4
+
5
+ See @tests/CLAUDE.md for testing guidelines.
6
+
7
+ ## Commands
8
+ ```bash
9
+ uv run pytest # Run tests
10
+ uv run pytest --maxfail=1 # Debug failing tests
11
+ uv run ruff format && uv run ruff check --fix # Format and lint (run before commits)
12
+ uv run ty check # Type check (must pass before commits)
13
+ uv add <package> # Add dependency
14
+ ```
15
+
16
+ ## Code Style
17
+ - Python 3.11+, double quotes, 88 char lines
18
+ - Type hints on all functions, Pydantic V2 for validation
19
+ - `snake_case` functions, `PascalCase` classes
20
+ - Import order: stdlib → third-party → local
21
+ - Early returns, `match/case` for complex conditionals
22
+ - No blocking I/O in async contexts
23
+
24
+ ## Code References
25
+ Use VS Code clickable format: `rrq/queue.py:45` or `rrq/worker.py:120-135`
26
+
27
+ ## Rules
28
+ - Never commit broken tests
29
+ - Use `uv` for all Python operations
30
+ - Follow existing patterns in codebase
31
+ - No sensitive data in logs
32
+ - Ask before large cross-domain changes
rrq-0.8.0/CLAUDE.md ADDED
@@ -0,0 +1,32 @@
1
+ # RRQ
2
+
3
+ Redis-based async job queue library for Python.
4
+
5
+ See @tests/CLAUDE.md for testing guidelines.
6
+
7
+ ## Commands
8
+ ```bash
9
+ uv run pytest # Run tests
10
+ uv run pytest --maxfail=1 # Debug failing tests
11
+ uv run ruff format && uv run ruff check --fix # Format and lint (run before commits)
12
+ uv run ty check # Type check (must pass before commits)
13
+ uv add <package> # Add dependency
14
+ ```
15
+
16
+ ## Code Style
17
+ - Python 3.11+, double quotes, 88 char lines
18
+ - Type hints on all functions, Pydantic V2 for validation
19
+ - `snake_case` functions, `PascalCase` classes
20
+ - Import order: stdlib → third-party → local
21
+ - Early returns, `match/case` for complex conditionals
22
+ - No blocking I/O in async contexts
23
+
24
+ ## Code References
25
+ Use VS Code clickable format: `rrq/queue.py:45` or `rrq/worker.py:120-135`
26
+
27
+ ## Rules
28
+ - Never commit broken tests
29
+ - Use `uv` for all Python operations
30
+ - Follow existing patterns in codebase
31
+ - No sensitive data in logs
32
+ - Ask before large cross-domain changes
rrq-0.8.0/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2025 Mazdak Rezvani
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
rrq-0.8.0/MANIFEST.in ADDED
@@ -0,0 +1,2 @@
1
+ README.md
2
+ LICENSE
rrq-0.8.0/PKG-INFO ADDED
@@ -0,0 +1,524 @@
1
+ Metadata-Version: 2.4
2
+ Name: rrq
3
+ Version: 0.8.0
4
+ Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
5
+ Project-URL: Homepage, https://github.com/getresq/rrq
6
+ Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
7
+ Author-email: Mazdak Rezvani <mazdak@me.com>
8
+ License-File: LICENSE
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Classifier: Programming Language :: Python :: 3.14
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Topic :: System :: Distributed Computing
17
+ Classifier: Topic :: System :: Monitoring
18
+ Requires-Python: >=3.11
19
+ Requires-Dist: click>=8.1.3
20
+ Requires-Dist: pydantic-settings>=2.9.1
21
+ Requires-Dist: pydantic>=2.11.4
22
+ Requires-Dist: redis[hiredis]>=4.2.0
23
+ Requires-Dist: rich>=14.0.0
24
+ Requires-Dist: watchfiles>=0.19.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest-asyncio>=1.0.0; extra == 'dev'
27
+ Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
28
+ Requires-Dist: pytest>=8.3.5; extra == 'dev'
29
+ Requires-Dist: ruff==0.14.9; extra == 'dev'
30
+ Requires-Dist: ty==0.0.1-alpha.26; extra == 'dev'
31
+ Description-Content-Type: text/markdown
32
+
33
+ # RRQ: Reliable Redis Queue
34
+
35
+ RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
36
+
37
+ ## 🆕 What's New in v0.7.1
38
+
39
+ - **Comprehensive CLI Tools**: 15+ new commands for monitoring, debugging, and management
40
+ - **Real-time Monitoring Dashboard**: Interactive dashboard with `rrq monitor`
41
+ - **Enhanced DLQ Management**: Sophisticated filtering and requeuing capabilities
42
+ - **Bug Fixes**: Critical fix for unique job enqueue failures with proper deferral
43
+
44
+ ## Requirements
45
+
46
+ - Python 3.11 or higher
47
+ - Redis 5.0 or higher
48
+ - asyncio-compatible environment
49
+
50
+ ## Key Features
51
+
52
+ * **At-Least-Once Semantics**: Uses Redis locks to ensure a job is processed by only one worker at a time. If a worker crashes or shuts down mid-processing, the lock expires, and the job *should* be re-processed (though re-queueing on unclean shutdown isn't implemented here yet - graceful shutdown *does* re-queue).
53
+ * **Automatic Retries with Backoff**: Jobs that fail with standard exceptions are automatically retried based on `max_retries` settings, using exponential backoff for delays.
54
+ * **Explicit Retries**: Handlers can raise `RetryJob` to control retry attempts and delays.
55
+ * **Job Timeouts**: Jobs exceeding their configured timeout (`job_timeout_seconds` or `default_job_timeout_seconds`) are terminated and moved to the DLQ.
56
+ * **Dead Letter Queue (DLQ)**: Jobs that fail permanently (max retries reached, fatal error, timeout) are moved to a single global DLQ list in Redis. Each failed job retains its original queue information, allowing for filtered inspection and selective requeuing.
57
+ * **Job Uniqueness**: The `_unique_key` parameter in `enqueue` prevents duplicate jobs based on a custom key within a specified TTL.
58
+ * **Graceful Shutdown**: Workers listen for SIGINT/SIGTERM and attempt to finish active jobs within a grace period before exiting. Interrupted jobs are re-queued.
59
+ * **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
60
+ * **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
61
+ * **Cron Jobs**: Periodic jobs can be defined in `RRQSettings.cron_jobs` using a simple cron syntax.
62
+ * **Comprehensive Monitoring**: Built-in CLI tools for monitoring queues, inspecting jobs, and debugging with real-time dashboards and beautiful table output.
63
+ * **Development Tools**: Debug commands for generating test data, stress testing, and cleaning up development environments.
64
+
65
+ - Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.
66
+
67
+ - To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:
68
+
69
+ ```python
70
+ await client.enqueue(
71
+ "process_updates",
72
+ item_id=123,
73
+ _unique_key="update:123",
74
+ _defer_by=10,
75
+ )
76
+ ```
77
+
78
+ ## Basic Usage
79
+
80
+ *(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
81
+
82
+ **1. Define Handlers:**
83
+
84
+ ```python
85
+ # handlers.py
86
+ import asyncio
87
+ from rrq.exc import RetryJob
88
+
89
+ async def my_task(ctx, message: str):
90
+ job_id = ctx['job_id']
91
+ attempt = ctx['job_try']
92
+ print(f"Processing job {job_id} (Attempt {attempt}): {message}")
93
+ await asyncio.sleep(1)
94
+ if attempt < 3 and message == "retry_me":
95
+ raise RetryJob("Needs another go!")
96
+ print(f"Finished job {job_id}")
97
+ return {"result": f"Processed: {message}"}
98
+ ```
99
+
100
+ **2. Register Handlers:**
101
+
102
+ ```python
103
+ # main_setup.py (or wherever you initialize)
104
+ from rrq.registry import JobRegistry
105
+ from . import handlers # Assuming handlers.py is in the same directory
106
+
107
+ job_registry = JobRegistry()
108
+ job_registry.register("process_message", handlers.my_task)
109
+ ```
110
+
111
+ **3. Configure Settings:**
112
+
113
+ ```python
114
+ # config.py
115
+ from rrq.settings import RRQSettings
116
+
117
+ # Loads from environment variables (RRQ_REDIS_DSN, etc.) or uses defaults
118
+ rrq_settings = RRQSettings()
119
+ # Or override directly:
120
+ # rrq_settings = RRQSettings(redis_dsn="redis://localhost:6379/1")
121
+ ```
122
+
123
+ **4. Enqueue Jobs:**
124
+
125
+ ```python
126
+ # enqueue_script.py
127
+ import asyncio
128
+ from rrq.client import RRQClient
129
+ from config import rrq_settings # Import your settings
130
+
131
+ async def enqueue_jobs():
132
+ client = RRQClient(settings=rrq_settings)
133
+ await client.enqueue("process_message", "Hello RRQ!")
134
+ await client.enqueue("process_message", "retry_me")
135
+ await client.close()
136
+
137
+ if __name__ == "__main__":
138
+ asyncio.run(enqueue_jobs())
139
+ ```
140
+
141
+ **5. Run a Worker:**
142
+
143
+ Note: You don't need to run a worker as the Command Line Interface `rrq` is used for
144
+ this purpose.
145
+
146
+ ```python
147
+ # worker_script.py
148
+ import asyncio
149
+
150
+ from rrq.worker import RRQWorker
151
+ from config import rrq_settings # Import your settings
152
+ from main_setup import job_registry # Import your registry
153
+
154
+ # Create worker instance
155
+ worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
156
+
157
+ # Run the worker (blocking)
158
+ if __name__ == "__main__":
159
+ asyncio.run(worker.run())
160
+ ```
161
+
162
+ You can run multiple instances of `worker_script.py` for concurrent processing.
163
+
164
+ ## Cron Jobs
165
+
166
+ Add instances of `CronJob` to `RRQSettings.cron_jobs` to run periodic jobs. The
167
+ `schedule` string follows the typical five-field cron format `minute hour day-of-month month day-of-week`.
168
+ It supports the most common features from Unix cron:
169
+
170
+ - numeric values
171
+ - ranges (e.g. `8-11`)
172
+ - lists separated by commas (e.g. `mon,wed,fri`)
173
+ - step values using `/` (e.g. `*/15`)
174
+ - names for months and days (`jan-dec`, `sun-sat`)
175
+
176
+ Jobs are evaluated in the server's timezone and run with minute resolution.
177
+
178
+ ### Cron Schedule Examples
179
+
180
+ ```python
181
+ # Every minute
182
+ "* * * * *"
183
+
184
+ # Every hour at minute 30
185
+ "30 * * * *"
186
+
187
+ # Every day at 2:30 AM
188
+ "30 2 * * *"
189
+
190
+ # Every Monday at 9:00 AM
191
+ "0 9 * * mon"
192
+
193
+ # Every 15 minutes
194
+ "*/15 * * * *"
195
+
196
+ # Every weekday at 6:00 PM
197
+ "0 18 * * mon-fri"
198
+
199
+ # First day of every month at midnight
200
+ "0 0 1 * *"
201
+
202
+ # Every 2 hours during business hours on weekdays
203
+ "0 9-17/2 * * mon-fri"
204
+ ```
205
+
206
+ ### Defining Cron Jobs
207
+
208
+ ```python
209
+ from rrq.settings import RRQSettings
210
+ from rrq.cron import CronJob
211
+
212
+ # Define your cron jobs
213
+ cron_jobs = [
214
+ # Daily cleanup at 2 AM
215
+ CronJob(
216
+ function_name="daily_cleanup",
217
+ schedule="0 2 * * *",
218
+ args=["temp_files"],
219
+ kwargs={"max_age_days": 7}
220
+ ),
221
+
222
+ # Weekly report every Monday at 9 AM
223
+ CronJob(
224
+ function_name="generate_weekly_report",
225
+ schedule="0 9 * * mon",
226
+ unique=True # Prevent duplicate reports if worker restarts
227
+ ),
228
+
229
+ # Health check every 15 minutes on a specific queue
230
+ CronJob(
231
+ function_name="system_health_check",
232
+ schedule="*/15 * * * *",
233
+ queue_name="monitoring"
234
+ ),
235
+
236
+ # Backup database every night at 1 AM
237
+ CronJob(
238
+ function_name="backup_database",
239
+ schedule="0 1 * * *",
240
+ kwargs={"backup_type": "incremental"}
241
+ ),
242
+ ]
243
+
244
+ # Add to your settings
245
+ rrq_settings = RRQSettings(
246
+ redis_dsn="redis://localhost:6379/0",
247
+ cron_jobs=cron_jobs
248
+ )
249
+ ```
250
+
251
+ ### Cron Job Handlers
252
+
253
+ Your cron job handlers are regular async functions, just like other job handlers:
254
+
255
+ ```python
256
+ async def daily_cleanup(ctx, file_type: str, max_age_days: int = 7):
257
+ """Clean up old files."""
258
+ job_id = ctx['job_id']
259
+ print(f"Job {job_id}: Cleaning up {file_type} files older than {max_age_days} days")
260
+ # Your cleanup logic here
261
+ return {"cleaned_files": 42, "status": "completed"}
262
+
263
+ async def generate_weekly_report(ctx):
264
+ """Generate and send weekly report."""
265
+ job_id = ctx['job_id']
266
+ print(f"Job {job_id}: Generating weekly report")
267
+ # Your report generation logic here
268
+ return {"report_id": "weekly_2024_01", "status": "sent"}
269
+
270
+ # Register your handlers
271
+ from rrq.registry import JobRegistry
272
+
273
+ job_registry = JobRegistry()
274
+ job_registry.register("daily_cleanup", daily_cleanup)
275
+ job_registry.register("generate_weekly_report", generate_weekly_report)
276
+
277
+ # Add the registry to your settings
278
+ rrq_settings.job_registry = job_registry
279
+ ```
280
+
281
+ **Note:** Cron jobs are automatically enqueued by the worker when they become due. The worker checks for due cron jobs every 30 seconds and enqueues them as regular jobs to be processed.
282
+
283
+ ## Dead Letter Queue (DLQ) Management
284
+
285
+ RRQ uses a single global Dead Letter Queue to store jobs that have failed permanently. Jobs in the DLQ retain their original queue information, allowing for sophisticated filtering and management.
286
+
287
+ ### DLQ Structure
288
+
289
+ - **Global DLQ**: One DLQ per RRQ instance (configurable via `default_dlq_name`)
290
+ - **Queue Preservation**: Each failed job remembers its original queue name
291
+ - **Filtering**: Jobs can be filtered by original queue, function name, error patterns, and time ranges
292
+ - **Inspection**: Full job details including arguments, errors, and execution timeline
293
+
294
+ ### Common DLQ Workflows
295
+
296
+ #### Investigating Failures
297
+ ```bash
298
+ # Get overall DLQ statistics
299
+ rrq dlq stats
300
+
301
+ # List recent failures from a specific queue
302
+ rrq dlq list --queue urgent --limit 10
303
+
304
+ # Group failures by function
305
+ rrq dlq list --function send_email
306
+
307
+ # Inspect a specific failed job
308
+ rrq dlq inspect job_abc123
309
+ ```
310
+
311
+ #### Requeuing Failed Jobs
312
+ ```bash
313
+ # Preview what would be requeued (dry run)
314
+ rrq dlq requeue --queue urgent --dry-run
315
+
316
+ # Requeue all failures from urgent queue
317
+ rrq dlq requeue --queue urgent --all
318
+
319
+ # Requeue specific function failures with limit
320
+ rrq dlq requeue --function send_email --limit 10
321
+
322
+ # Requeue single job to different queue
323
+ rrq dlq requeue --job-id abc123 --target-queue retry_queue
324
+ ```
325
+
326
+ #### Monitoring DLQ in Real-time
327
+ ```bash
328
+ # Monitor includes DLQ statistics panel
329
+ rrq monitor
330
+
331
+ # Queue stats show DLQ count per original queue
332
+ rrq queue stats
333
+ ```
334
+
335
+ ## Command Line Interface
336
+
337
+ RRQ provides a comprehensive command-line interface (CLI) for managing workers, monitoring queues, and debugging.
338
+
339
+ 📖 **[Full CLI Reference Documentation](docs/CLI_REFERENCE.md)**
340
+
341
+ ### Quick Examples
342
+ ```bash
343
+ # Use default settings (localhost Redis)
344
+ rrq queue list
345
+
346
+ # Use custom settings
347
+ rrq queue list --settings myapp.config.rrq_settings
348
+
349
+ # Use environment variable
350
+ export RRQ_SETTINGS=myapp.config.rrq_settings
351
+ rrq monitor
352
+
353
+ # Debug workflow
354
+ rrq debug generate-jobs --count 100 --queue urgent
355
+ rrq queue inspect urgent --limit 10
356
+ rrq monitor --queues urgent --refresh 0.5
357
+
358
+ # DLQ management workflow
359
+ rrq dlq list --queue urgent --limit 10 # List failed jobs from urgent queue
360
+ rrq dlq stats # Show DLQ statistics and error patterns
361
+ rrq dlq inspect <job_id> # Inspect specific failed job
362
+ rrq dlq requeue --queue urgent --dry-run # Preview requeue of urgent queue jobs
363
+ rrq dlq requeue --queue urgent --limit 5 # Requeue 5 jobs from urgent queue
364
+
365
+ # Advanced DLQ filtering and management
366
+ rrq dlq list --function send_email --limit 20 # List failed email jobs
367
+ rrq dlq list --queue urgent --function process_data # Filter by queue AND function
368
+ rrq dlq requeue --function send_email --all # Requeue all failed email jobs
369
+ rrq dlq requeue --job-id abc123 --target-queue retry # Requeue specific job to retry queue
370
+ ```
371
+
372
+ ## Performance and Limitations
373
+
374
+ ### Monitoring Performance Considerations
375
+
376
+ RRQ's monitoring and statistics commands are designed for operational visibility but have some performance considerations for large-scale deployments:
377
+
378
+ #### Queue Statistics (`rrq queue stats`)
379
+ - **Pending Job Counts**: Very fast, uses Redis `ZCARD` operation
380
+ - **Active/Completed/Failed Counts**: Requires scanning job records in Redis which can be slow for large datasets
381
+ - **Optimization**: Use `--max-scan` parameter to limit scanning (default: 1,000 jobs)
382
+ ```bash
383
+ # Fast scan for quick overview
384
+ rrq queue stats --max-scan 500
385
+
386
+ # Complete scan (may be slow)
387
+ rrq queue stats --max-scan 0
388
+ ```
389
+
390
+ #### DLQ Operations (`rrq dlq`)
391
+ - **Job Listing**: Uses batch fetching with Redis pipelines for efficiency
392
+ - **Optimization**: Use `--batch-size` parameter to control memory vs. performance trade-offs
393
+ ```bash
394
+ # Smaller batches for memory-constrained environments
395
+ rrq dlq list --batch-size 50
396
+
397
+ # Larger batches for better performance
398
+ rrq dlq list --batch-size 200
399
+ ```
400
+
401
+ #### Real-time Monitoring (`rrq monitor`)
402
+ - **Error Message Truncation**: Newest errors truncated to 50 characters, error patterns to 50 characters for display consistency
403
+ - **DLQ Statistics**: Updates in real-time but may impact Redis performance with very large DLQs
404
+
405
+ ### Full Metrics Requirements
406
+
407
+ For comprehensive job lifecycle tracking and historical analytics, consider these architectural additions:
408
+
409
+ 1. **Job History Tracking**:
410
+ - Store completed/failed job summaries in a separate Redis structure or external database
411
+ - Implement job completion event logging for time-series analytics
412
+
413
+ 2. **Active Job Monitoring**:
414
+ - Enhanced worker health tracking with job-level visibility
415
+ - Real-time active job registry for immediate status reporting
416
+
417
+ 3. **Throughput Calculation**:
418
+ - Time-series data collection for accurate throughput metrics
419
+ - Queue-specific performance trend tracking
420
+
421
+ 4. **Scalable Statistics**:
422
+ - Consider Redis Streams or time-series databases for high-frequency job event tracking
423
+ - Implement sampling strategies for large-scale deployments
424
+
425
+ The current implementation prioritizes operational simplicity and immediate visibility over comprehensive historical analytics. For production monitoring at scale, complement RRQ's built-in tools with external monitoring systems.
426
+
427
+ ## Configuration
428
+
429
+ RRQ can be configured in several ways, with the following precedence:
430
+
431
+ 1. **Command-Line Argument (`--settings`)**: Directly specify the settings object path via the CLI. This takes the highest precedence.
432
+ 2. **Environment Variable (`RRQ_SETTINGS`)**: Set the `RRQ_SETTINGS` environment variable to point to your settings object path. Used if `--settings` is not provided.
433
+ 3. **Default Settings**: If neither of the above is provided, RRQ will instantiate a default `RRQSettings` object, which can still be influenced by environment variables starting with `RRQ_`.
434
+ 4. **Environment Variables (Prefix `RRQ_`)**: Individual settings can be overridden by environment variables starting with `RRQ_`, which are automatically picked up by the `RRQSettings` object.
435
+ 5. **.env File**: If `python-dotenv` is installed, RRQ will attempt to load a `.env` file from the current working directory or parent directories. System environment variables take precedence over `.env` variables.
436
+
437
+ **Important Note on `job_registry`**: The `job_registry` attribute in your `RRQSettings` object is **critical** for RRQ to function. It must be an instance of `JobRegistry` and is used to register job handlers. Without a properly configured `job_registry`, workers will not know how to process jobs, and most operations will fail. Ensure it is set in your settings object to map job names to their respective handler functions.
438
+
439
+ ## Telemetry (Datadog / OTEL / Logfire)
440
+
441
+ RRQ supports optional distributed tracing for enqueue and job execution. Enable the
442
+ integration in both the producer and worker processes to get end-to-end traces
443
+ across the Redis queue.
444
+
445
+ ### Datadog (ddtrace)
446
+
447
+ ```python
448
+ from rrq.integrations.ddtrace import enable as enable_rrq_ddtrace
449
+
450
+ enable_rrq_ddtrace(service="myapp-rrq")
451
+ ```
452
+
453
+ This only instruments RRQ spans + propagation; it does **not** call
454
+ `ddtrace.patch_all()`. Configure `ddtrace` in your app as you already do.
455
+
456
+ ### Logfire
457
+
458
+ ```python
459
+ import logfire
460
+ from rrq.integrations.logfire import enable as enable_rrq_logfire
461
+
462
+ logfire.configure(service_name="myapp-rrq")
463
+ enable_rrq_logfire(service_name="myapp-rrq")
464
+ ```
465
+
466
+ ### OpenTelemetry (generic)
467
+
468
+ ```python
469
+ from rrq.integrations.otel import enable as enable_rrq_otel
470
+
471
+ enable_rrq_otel(service_name="myapp-rrq")
472
+ ```
473
+
474
+ ### Comprehensive CLI Command System
475
+ - **New modular CLI architecture** with dedicated command modules for better organization
476
+ - **Enhanced monitoring capabilities** with real-time dashboards and beautiful table output
477
+ - **Extensive DLQ management** commands for inspecting, filtering, and requeuing failed jobs
478
+ - **Job lifecycle management** with detailed inspection and control commands
479
+ - **Queue management** with statistics, purging, and migration capabilities
480
+ - **Debug utilities** for development and testing including stress testing and data generation
481
+
482
+ ## 📚 New CLI Commands
483
+
484
+ ### Monitor Commands
485
+ - `rrq monitor` - Real-time dashboard with queue stats, worker health, and DLQ monitoring
486
+ - `rrq monitor workers` - Detailed worker status and health monitoring
487
+ - `rrq monitor jobs` - Active job tracking and monitoring
488
+
489
+ ### DLQ Commands
490
+ - `rrq dlq list` - List failed jobs with filtering by queue, function, and time
491
+ - `rrq dlq stats` - DLQ statistics including error patterns and queue distribution
492
+ - `rrq dlq inspect` - Detailed inspection of failed jobs
493
+ - `rrq dlq requeue` - Requeue failed jobs with dry-run support
494
+ - `rrq dlq purge` - Clean up old failed jobs
495
+
496
+ ### Queue Commands
497
+ - `rrq queue list` - List all queues with job counts
498
+ - `rrq queue stats` - Detailed queue statistics and throughput metrics
499
+ - `rrq queue inspect` - Inspect pending jobs in queues
500
+ - `rrq queue purge` - Purge jobs from queues with safety confirmations
501
+ - `rrq queue migrate` - Move jobs between queues
502
+
503
+ ### Job Commands
504
+ - `rrq job list` - List jobs with status filtering
505
+ - `rrq job inspect` - Detailed job information including timeline
506
+ - `rrq job result` - Retrieve job results
507
+ - `rrq job cancel` - Cancel active jobs
508
+ - `rrq job retry` - Manually retry failed jobs
509
+ - `rrq job delete` - Delete job records
510
+
511
+ ### Debug Commands
512
+ - `rrq debug generate-jobs` - Generate test jobs for development
513
+ - `rrq debug stress-test` - Stress test the system
514
+ - `rrq debug cleanup` - Clean up test data
515
+ - `rrq debug redis-info` - Redis server information and diagnostics
516
+
517
+ ## Core Components
518
+
519
+ * **`RRQClient` (`client.py`)**: Used to enqueue jobs onto specific queues. Supports deferring jobs (by time delta or specific datetime), assigning custom job IDs, and enforcing job uniqueness via keys.
520
+ * **`RRQWorker` (`worker.py`)**: The process that polls queues, fetches jobs, executes the corresponding handler functions, and manages the job lifecycle based on success, failure, retries, or timeouts. Handles graceful shutdown via signals (SIGINT, SIGTERM).
521
+ * **`JobRegistry` (`registry.py`)**: A simple registry to map string function names (used when enqueuing) to the actual asynchronous handler functions the worker should execute.
522
+ * **`JobStore` (`store.py`)**: An abstraction layer handling all direct interactions with Redis. It manages job definitions (Hashes), queues (Sorted Sets), processing locks (Strings with TTL), unique job locks, and worker health checks.
523
+ * **`Job` (`job.py`)**: A Pydantic model representing a job, containing its ID, handler name, arguments, status, retry counts, timestamps, results, etc.
524
+ * **`JobStatus` (`job.py`)**: An Enum defining the possible states of a job (`PENDING`, `ACTIVE`, `COMPLETED`, `FAILED`, `DEFERRED`). `