rrq 0.4.0__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,17 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(find:*)",
5
+ "Bash(grep:*)",
6
+ "Bash(uv run pytest:*)",
7
+ "Bash(mkdir:*)",
8
+ "Bash(mv:*)",
9
+ "Bash(python -m pytest tests/test_worker.py::test_worker_handles_job_failure_max_retries_dlq -xvs)",
10
+ "Bash(rg:*)",
11
+ "Bash(uv run:*)",
12
+ "Bash(ruff format:*)",
13
+ "Bash(ruff check:*)"
14
+ ],
15
+ "deny": []
16
+ }
17
+ }
rrq-0.5.0/CLAUDE.md ADDED
@@ -0,0 +1,115 @@
1
+ ---
2
+ description: RRQ Development Guidelines
3
+ globs: ["**/*", "!tests/**/*"]
4
+ alwaysApply: true
5
+ ---
6
+ # RRQ Development Guidelines
7
+
8
+ ## Project Overview
9
+
10
+ **Important**: Also refer to:
11
+ - `tests/CLAUDE.md` - Testing guidelines
12
+ - `README.md` - Project setup and overview
13
+
14
+ ## Quick Start Commands
15
+ ```bash
16
+ # Backend
17
+ uv run pytest # Running tests
18
+ ruff check # Lint backend code
19
+ ruff format # Format backend code
20
+
21
+ # Package Management
22
+ uv add package_name # Add dependency
23
+ uv sync --extra dev # Sync dependencies
24
+ ```
25
+
26
+ ## Code References Format
27
+ **CRITICAL**: Always use VS Code clickable format for code references:
28
+ - `app/api/users.py:45` - Single line
29
+ - `app/models/job.py:120-135` - Line range
30
+ - Always use paths relative to project root
31
+
32
+ ### Python Code Style
33
+ - Python 3.10+ with comprehensive type hints
34
+ - Double quotes for strings
35
+ - Max line length: 88 characters
36
+ - PEP 8 naming: `snake_case` functions, `PascalCase` classes
37
+ - Pydantic V2 for data validation
38
+ - Import order: stdlib → third-party → local
39
+ - Docstrings for public interfaces
40
+ - Type annotations for all function signatures
41
+
42
+ ### Code Quality Practices
43
+ - Early returns over nested conditions
44
+ - Small, focused functions (single responsibility)
45
+ - Descriptive variable names
46
+ - Comprehensive error handling with specific exceptions
47
+ - Consistent async/await patterns
48
+ - Use `match/case` for complex conditionals
49
+
50
+ ## Self-Review Checklist for Large Changes
51
+
52
+ Before submitting significant backend changes:
53
+
54
+ ### Code Quality
55
+ - [ ] All functions have type hints
56
+ - [ ] Complex logic is well-commented
57
+ - [ ] No debug prints or commented code
58
+ - [ ] Follows existing patterns in codebase
59
+ - [ ] Proper error handling throughout
60
+ - [ ] Idiomatic code using Python, Asyncio, and Pydantic best practices
61
+
62
+ ### Testing
63
+ - [ ] Unit tests for new functionality
64
+ - [ ] Integration tests for API changes
65
+ - [ ] All tests pass with `--warnings-as-errors`
66
+ - [ ] Edge cases covered
67
+ - [ ] Mocked external dependencies
68
+
69
+ ### Performance
70
+ - [ ] Database queries are optimized (N+1 prevention)
71
+ - [ ] Async operations are properly awaited
72
+ - [ ] No blocking I/O in async contexts
73
+ - [ ] Background jobs for heavy operations
74
+
75
+ ### Security
76
+ - [ ] Input validation on all endpoints
77
+ - [ ] No sensitive data in logs
78
+ - [ ] SQL injection prevention
79
+
80
+ ### Documentation
81
+ - [ ] API endpoints documented
82
+ - [ ] Complex functions have docstrings
83
+ - [ ] Schema changes documented
84
+ - [ ] Migration files created if needed
85
+
86
+ ## Linting and Pre-commit
87
+
88
+ **Always run before committing:**
89
+ ```bash
90
+ # Format and lint
91
+ ruff format
92
+ ruff check --fix
93
+
94
+ ```
95
+
96
+ ## Important Development Rules
97
+
98
+ 1. **Never commit broken tests** - Fix root causes
99
+ 2. **Ask before large changes** - Especially cross-domain
100
+ 3. **Follow existing patterns** - Check similar code first
101
+ 4. **Quality over speed** - Do it right the first time
102
+ 5. **Security first** - Never expose sensitive data
103
+
104
+ ## Performance Considerations
105
+ - Profile before optimizing
106
+ - Use async correctly (no sync in async)
107
+ - Cache expensive operations
108
+ - Paginate large result sets
109
+ - Monitor query performance
110
+
111
+ ## Debugging Tips
112
+ - Use `uv run pytest --maxfail=1` for failing tests
113
+ - Use debugger with `import pdb; pdb.set_trace()`
114
+
115
+ Remember: If unsure about implementation, check existing code patterns first!
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rrq
3
- Version: 0.4.0
3
+ Version: 0.5.0
4
4
  Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
5
5
  Project-URL: Homepage, https://github.com/getresq/rrq
6
6
  Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
@@ -265,7 +265,8 @@ RRQ provides a command-line interface (CLI) for managing workers and performing
265
265
  - **`rrq worker run`** - Run an RRQ worker process.
266
266
  - `--settings` (optional): Specify the Python path to your settings object (e.g., `myapp.worker_config.rrq_settings`). If not provided, it will use the `RRQ_SETTINGS` environment variable or default to a basic `RRQSettings` object.
267
267
  - `--queue` (optional, multiple): Specify queue(s) to poll. Defaults to the `default_queue_name` in settings.
268
- - `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit.
268
+ - `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit. Cannot be used with `--num-workers > 1`.
269
+ - `--num-workers` (optional, integer): Number of parallel worker processes to start. Defaults to the number of CPU cores available on the machine. Cannot be used with `--burst` mode.
269
270
  - **`rrq worker watch`** - Run an RRQ worker with auto-restart on file changes.
270
271
  - `--path` (optional): Directory path to watch for changes. Defaults to the current directory.
271
272
  - `--settings` (optional): Same as above.
@@ -238,7 +238,8 @@ RRQ provides a command-line interface (CLI) for managing workers and performing
238
238
  - **`rrq worker run`** - Run an RRQ worker process.
239
239
  - `--settings` (optional): Specify the Python path to your settings object (e.g., `myapp.worker_config.rrq_settings`). If not provided, it will use the `RRQ_SETTINGS` environment variable or default to a basic `RRQSettings` object.
240
240
  - `--queue` (optional, multiple): Specify queue(s) to poll. Defaults to the `default_queue_name` in settings.
241
- - `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit.
241
+ - `--burst` (flag): Run the worker in burst mode to process one job or batch and then exit. Cannot be used with `--num-workers > 1`.
242
+ - `--num-workers` (optional, integer): Number of parallel worker processes to start. Defaults to the number of CPU cores available on the machine. Cannot be used with `--burst` mode.
242
243
  - **`rrq worker watch`** - Run an RRQ worker with auto-restart on file changes.
243
244
  - `--path` (optional): Directory path to watch for changes. Defaults to the current directory.
244
245
  - `--settings` (optional): Same as above.
@@ -1,4 +1,4 @@
1
- '''example_rrq_settings.py: Example RRQ Application Settings'''
1
+ """example_rrq_settings.py: Example RRQ Application Settings"""
2
2
 
3
3
  import asyncio
4
4
  import logging
@@ -16,20 +16,18 @@ logger.addHandler(console_handler)
16
16
  redis_dsn = "redis://localhost:6379/0"
17
17
 
18
18
 
19
-
20
-
21
19
  async def on_startup_hook():
22
20
  logger.info("Executing 'on_startup_hook' (application-specific startup)...")
23
21
  await asyncio.sleep(0.1)
24
22
  logger.info("'on_startup_hook' complete.")
25
23
 
24
+
26
25
  async def on_shutdown_hook():
27
26
  logger.info("Executing 'on_shutdown_hook' (application-specific shutdown)...")
28
27
  await asyncio.sleep(0.1)
29
28
  logger.info("'on_shutdown_hook' complete.")
30
29
 
31
30
 
32
-
33
31
  # RRQ Settings
34
32
  rrq_settings = RRQSettings(
35
33
  redis_dsn=redis_dsn,
@@ -42,20 +40,19 @@ rrq_settings = RRQSettings(
42
40
  function_name="daily_cleanup",
43
41
  schedule="0 2 * * *",
44
42
  args=["cleanup_logs"],
45
- kwargs={"max_age_days": 30}
43
+ kwargs={"max_age_days": 30},
46
44
  ),
47
45
  # Send a status report every Monday at 9 AM
48
46
  CronJob(
49
47
  function_name="send_status_report",
50
48
  schedule="0 9 * * mon",
51
- unique=True # Prevent duplicate reports if worker restarts
49
+ unique=True, # Prevent duplicate reports if worker restarts
52
50
  ),
53
51
  # Health check every 15 minutes
54
52
  CronJob(
55
53
  function_name="health_check",
56
54
  schedule="*/15 * * * *",
57
- queue_name="monitoring" # Use a specific queue for monitoring tasks
55
+ queue_name="monitoring", # Use a specific queue for monitoring tasks
58
56
  ),
59
- ]
57
+ ],
60
58
  )
61
-
@@ -76,7 +76,9 @@ async def daily_cleanup(ctx, task_type: str, max_age_days: int = 30):
76
76
 
77
77
  async def send_status_report(ctx):
78
78
  """Example cron job handler for sending status reports."""
79
- logger.info(f"STATUS_REPORT (Job {ctx['job_id']}): Generating and sending status report")
79
+ logger.info(
80
+ f"STATUS_REPORT (Job {ctx['job_id']}): Generating and sending status report"
81
+ )
80
82
  await asyncio.sleep(0.3) # Simulate report generation
81
83
  logger.info(f"STATUS_REPORT (Job {ctx['job_id']}): Status report sent")
82
84
  return {"report_type": "weekly", "status": "sent"}
@@ -86,7 +88,9 @@ async def health_check(ctx):
86
88
  """Example cron job handler for health checks."""
87
89
  logger.info(f"HEALTH_CHECK (Job {ctx['job_id']}): Running system health check")
88
90
  await asyncio.sleep(0.1) # Simulate health check
89
- logger.info(f"HEALTH_CHECK (Job {ctx['job_id']}): Health check completed - all systems OK")
91
+ logger.info(
92
+ f"HEALTH_CHECK (Job {ctx['job_id']}): Health check completed - all systems OK"
93
+ )
90
94
  return {"status": "healthy", "timestamp": ctx.get("job_start_time")}
91
95
 
92
96
 
@@ -106,15 +110,15 @@ async def main():
106
110
  CronJob(
107
111
  function_name="health_check",
108
112
  schedule="*/2 * * * *", # Every 2 minutes
109
- queue_name="monitoring"
113
+ queue_name="monitoring",
110
114
  ),
111
115
  # Send a status report every 5 minutes (for demo purposes)
112
116
  CronJob(
113
117
  function_name="send_status_report",
114
118
  schedule="*/5 * * * *", # Every 5 minutes
115
- unique=True # Prevent duplicate reports
119
+ unique=True, # Prevent duplicate reports
116
120
  ),
117
- ]
121
+ ],
118
122
  )
119
123
  logger.info(f"Using Redis DB: {settings.redis_dsn}")
120
124
  logger.info(f"Configured {len(settings.cron_jobs)} cron jobs")
@@ -210,7 +214,8 @@ async def main():
210
214
 
211
215
  # Wait for stop event or worker task completion (e.g., if it errors out)
212
216
  done, pending = await asyncio.wait(
213
- [worker_task, asyncio.create_task(stop_event.wait())], return_when=asyncio.FIRST_COMPLETED
217
+ [worker_task, asyncio.create_task(stop_event.wait())],
218
+ return_when=asyncio.FIRST_COMPLETED,
214
219
  )
215
220
 
216
221
  logger.info("Stop event triggered or worker task finished.")
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "rrq"
7
- version = "0.4.0"
7
+ version = "0.5.0"
8
8
  authors = [{ name = "Mazdak Rezvani", email = "mazdak@me.com" }]
9
9
  description = "RRQ is a Python library for creating reliable job queues using Redis and asyncio"
10
10
  readme = "README.md"