rrq 0.2.5__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/settings.py CHANGED
@@ -5,7 +5,6 @@ Settings can be loaded from environment variables (with a prefix of `RRQ_`) or
5
5
  from a .env file. Sensible defaults are provided for most settings.
6
6
  """
7
7
 
8
- # Import Callable and Awaitable for type hinting hooks
9
8
  from typing import Awaitable, Callable, Optional
10
9
 
11
10
  from pydantic import Field
@@ -95,7 +94,8 @@ class RRQSettings(BaseSettings):
95
94
  description="Grace period (in seconds) for active job tasks to finish during worker shutdown.",
96
95
  )
97
96
  job_registry: Optional[JobRegistry] = Field(
98
- default=None, description="Job registry instance, typically provided by the application."
97
+ default=None,
98
+ description="Job registry instance, typically provided by the application.",
99
99
  )
100
100
  model_config = SettingsConfigDict(
101
101
  env_prefix="RRQ_",
@@ -104,4 +104,3 @@ class RRQSettings(BaseSettings):
104
104
  # env_file=".env",
105
105
  # env_file_encoding='utf-8'
106
106
  )
107
-
rrq/store.py CHANGED
@@ -38,6 +38,23 @@ class JobStore:
38
38
  self.redis = AsyncRedis.from_url(
39
39
  settings.redis_dsn, decode_responses=False
40
40
  ) # Work with bytes initially
41
+
42
+ def _format_queue_key(self, queue_name: str) -> str:
43
+ """Normalize a queue name or key into a Redis key for ZSET queues."""
44
+
45
+ # If already a full key, use it directly
46
+ if queue_name.startswith(QUEUE_KEY_PREFIX):
47
+ return queue_name
48
+ return f"{QUEUE_KEY_PREFIX}{queue_name}"
49
+
50
+ def _format_dlq_key(self, dlq_name: str) -> str:
51
+ """Normalize a DLQ name or key into a Redis key for DLQ lists."""
52
+ from .constants import DLQ_KEY_PREFIX
53
+
54
+ # If already a full key, use it directly
55
+ if dlq_name.startswith(DLQ_KEY_PREFIX):
56
+ return dlq_name
57
+ return f"{DLQ_KEY_PREFIX}{dlq_name}"
41
58
 
42
59
  async def aclose(self):
43
60
  """Closes the Redis connection pool associated with this store."""
@@ -83,9 +100,6 @@ class JobStore:
83
100
  job: The Job object to save.
84
101
  """
85
102
  job_key = f"{JOB_KEY_PREFIX}{job.id}"
86
- # print(
87
- # f"DEBUG JobStore.save_job_definition (ENTRY): job.id={job.id}, job.job_args={job.job_args}, job.job_kwargs={job.job_kwargs}, type(job.job_args)={type(job.job_args)}"
88
- # )
89
103
 
90
104
  # Dump model excluding fields handled manually
91
105
  job_data_dict = job.model_dump(
@@ -111,9 +125,6 @@ class JobStore:
111
125
  if "id" not in final_mapping_for_hset:
112
126
  final_mapping_for_hset["id"] = job.id
113
127
 
114
- # print(
115
- # f"!!! RRQ_JOB_STORE_SAVE (PRINT) (JobID:{job.id}) -> Mapping for HSET: { {k: str(v)[:50] + '...' if isinstance(v, str) and len(v) > 50 else v for k, v in final_mapping_for_hset.items()} }"
116
- # )
117
128
  if final_mapping_for_hset: # Avoid HSET with empty mapping
118
129
  await self.redis.hset(job_key, mapping=final_mapping_for_hset)
119
130
  logger.debug(f"Saved job definition for {job.id} to Redis hash {job_key}.")
@@ -153,7 +164,6 @@ class JobStore:
153
164
  if job_args_str and job_args_str.lower() != "null":
154
165
  try:
155
166
  job_args_list = json.loads(job_args_str)
156
- # print(f"DEBUG get_job_def: Parsed job_args_list = {job_args_list}")
157
167
  except json.JSONDecodeError:
158
168
  logger.error(
159
169
  f"Failed to JSON decode 'job_args' for job {job_id} from string: '{job_args_str}'",
@@ -163,7 +173,6 @@ class JobStore:
163
173
  if job_kwargs_str and job_kwargs_str.lower() != "null":
164
174
  try:
165
175
  job_kwargs_dict = json.loads(job_kwargs_str)
166
- # print(f"DEBUG get_job_def: Parsed job_kwargs_dict = {job_kwargs_dict}")
167
176
  except json.JSONDecodeError:
168
177
  logger.error(
169
178
  f"Failed to JSON decode 'job_kwargs' for job {job_id} from string: '{job_kwargs_str}'",
@@ -196,12 +205,6 @@ class JobStore:
196
205
  )
197
206
  validated_job.result = result_obj
198
207
 
199
- # print(
200
- # f"DEBUG get_job_def (POST-MANUAL-ASSIGN): job_id={job_id}, job.job_args='{validated_job.job_args}', type={type(validated_job.job_args)}"
201
- # )
202
- # print(
203
- # f"!!! RRQ_JOB_STORE_GET (POST_CREATE_VALIDATED) -> JobID:{validated_job.id}, Status:{validated_job.status.value if validated_job.status else None}, Retries:{validated_job.current_retries}"
204
- # )
205
208
  logger.debug(f"Successfully retrieved and parsed job {validated_job.id}")
206
209
  return validated_job
207
210
  except Exception as e_val:
@@ -224,7 +227,7 @@ class JobStore:
224
227
  job_id: The ID of the job to add.
225
228
  score: The score (float) determining the job's position/priority in the queue.
226
229
  """
227
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
230
+ queue_key = self._format_queue_key(queue_name)
228
231
  await self.redis.zadd(
229
232
  queue_key, {job_id.encode("utf-8"): score}
230
233
  ) # Store job_id as bytes
@@ -243,7 +246,7 @@ class JobStore:
243
246
  Returns:
244
247
  A list of job IDs as strings.
245
248
  """
246
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
249
+ queue_key = self._format_queue_key(queue_name)
247
250
  job_ids_bytes = await self.redis.zrange(queue_key, start, end)
248
251
  return [job_id.decode("utf-8") for job_id in job_ids_bytes]
249
252
 
@@ -259,7 +262,7 @@ class JobStore:
259
262
  """
260
263
  if count <= 0:
261
264
  return []
262
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
265
+ queue_key = self._format_queue_key(queue_name)
263
266
  now_ms = int(datetime.now(UTC).timestamp() * 1000)
264
267
  # Fetch jobs with score from -inf up to current time, limit by count
265
268
  job_ids_bytes = await self.redis.zrangebyscore(
@@ -348,7 +351,7 @@ class JobStore:
348
351
  completion_time: The timestamp when the job failed permanently.
349
352
  """
350
353
  job_key = f"{JOB_KEY_PREFIX}{job_id}"
351
- dlq_redis_key = f"{QUEUE_KEY_PREFIX}{dlq_name}"
354
+ dlq_redis_key = self._format_dlq_key(dlq_name)
352
355
 
353
356
  # Ensure complex fields are properly handled if needed (error could be complex)
354
357
  # For now, assuming simple string error message
@@ -365,6 +368,42 @@ class JobStore:
365
368
  pipe.expire(job_key, DEFAULT_DLQ_RESULT_TTL_SECONDS)
366
369
  results = await pipe.execute()
367
370
  logger.info(f"Moved job {job_id} to DLQ '{dlq_redis_key}'. Results: {results}")
371
+
372
+ async def requeue_dlq(
373
+ self,
374
+ dlq_name: str,
375
+ target_queue: str,
376
+ limit: int | None = None,
377
+ ) -> int:
378
+ """Requeue jobs from the Dead Letter Queue back into a live queue.
379
+
380
+ Pops jobs from the DLQ list and adds them to the target queue with current timestamp.
381
+
382
+ Args:
383
+ dlq_name: Name of the DLQ (without prefix).
384
+ target_queue: Name of the target queue (without prefix).
385
+ limit: Maximum number of jobs to requeue; all if None.
386
+
387
+ Returns:
388
+ Number of jobs requeued.
389
+ """
390
+ jobs_requeued = 0
391
+ dlq_key = self._format_dlq_key(dlq_name)
392
+ # Continue popping until limit is reached or DLQ is empty
393
+ while limit is None or jobs_requeued < limit:
394
+ job_id_bytes = await self.redis.rpop(dlq_key)
395
+ if not job_id_bytes:
396
+ break
397
+ job_id = job_id_bytes.decode("utf-8")
398
+ # Use current time for re-enqueue score
399
+ now_ms = int(datetime.now(UTC).timestamp() * 1000)
400
+ await self.add_job_to_queue(
401
+ self._format_queue_key(target_queue),
402
+ job_id,
403
+ now_ms,
404
+ )
405
+ jobs_requeued += 1
406
+ return jobs_requeued
368
407
 
369
408
  async def get_job_lock_owner(self, job_id: str) -> Optional[str]:
370
409
  """Gets the current owner (worker ID) of a job's processing lock, if held.
@@ -389,7 +428,7 @@ class JobStore:
389
428
  Returns:
390
429
  The number of elements removed (0 or 1).
391
430
  """
392
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
431
+ queue_key = self._format_queue_key(queue_name)
393
432
  removed_count = await self.redis.zrem(queue_key, job_id.encode("utf-8"))
394
433
  count = int(removed_count) # Ensure int
395
434
  if count > 0:
@@ -469,16 +508,12 @@ class JobStore:
469
508
 
470
509
  # Serialize result to JSON string
471
510
  try:
472
- # Use pydantic-core for robust serialization if available, else standard json
511
+ # Use pydantic JSON serialization if available, else standard JSON dump
473
512
  if hasattr(result, "model_dump_json"):
474
513
  result_str = result.model_dump_json()
475
- elif isinstance(result, str):
476
- result_str = result # Store plain strings directly if not JSON-like?
477
- # Let's stick to JSON encoding everything for consistency in save/load.
478
- # If it's already a string, json.dumps adds quotes.
479
- result_str = json.dumps(result)
480
514
  else:
481
- result_str = json.dumps(result, default=str) # Handle datetimes etc.
515
+ # Always JSON-encode the result, converting unknown types to strings
516
+ result_str = json.dumps(result, default=str)
482
517
  except TypeError as e:
483
518
  logger.error(
484
519
  f"Failed to serialize result for job {job_id}: {e}", exc_info=True
rrq/worker.py CHANGED
@@ -48,6 +48,7 @@ class RRQWorker:
48
48
  job_registry: JobRegistry,
49
49
  queues: Optional[list[str]] = None,
50
50
  worker_id: Optional[str] = None,
51
+ burst: bool = False,
51
52
  ):
52
53
  """Initializes the RRQWorker.
53
54
 
@@ -73,6 +74,8 @@ class RRQWorker:
73
74
  worker_id
74
75
  or f"{DEFAULT_WORKER_ID_PREFIX}{os.getpid()}_{uuid.uuid4().hex[:6]}"
75
76
  )
77
+ # Burst mode: process existing jobs then exit
78
+ self.burst = burst
76
79
 
77
80
  self._semaphore = asyncio.Semaphore(self.settings.worker_concurrency)
78
81
  self._running_tasks: set[asyncio.Task] = set()
@@ -144,7 +147,14 @@ class RRQWorker:
144
147
  f"Worker {self.worker_id} polling for up to {jobs_to_fetch} jobs..."
145
148
  )
146
149
  self.status = "polling"
147
- await self._poll_for_jobs(jobs_to_fetch)
150
+ # Poll for jobs and get count of jobs started
151
+ fetched_count = await self._poll_for_jobs(jobs_to_fetch)
152
+ # In burst mode, exit when no new jobs and no tasks running
153
+ if self.burst and fetched_count == 0 and not self._running_tasks:
154
+ logger.info(
155
+ f"Worker {self.worker_id} burst mode complete: no more jobs."
156
+ )
157
+ break
148
158
  else:
149
159
  if self.status != "idle (concurrency limit)":
150
160
  logger.debug(
@@ -224,6 +234,8 @@ class RRQWorker:
224
234
  exc_info=True,
225
235
  )
226
236
  await asyncio.sleep(1) # Avoid tight loop on polling error
237
+ # For burst mode, return number of jobs fetched in this poll
238
+ return fetched_count
227
239
 
228
240
  async def _try_process_job(self, job_id: str, queue_name: str) -> bool:
229
241
  """Attempts to lock, fetch definition, and start the execution task for a specific job.
@@ -805,7 +817,8 @@ class RRQWorker:
805
817
  delay_seconds = min(max_delay, base_delay * (2 ** (retry_attempt - 1)))
806
818
  delay_ms = int(delay_seconds * 1000)
807
819
  logger.debug(
808
- f"Calculated backoff for job {job.id} (attempt {retry_attempt}): {delay_ms}ms"
820
+ f"Calculated backoff for job {job.id} (attempt {retry_attempt}): "
821
+ f"base_delay={base_delay}s, max_delay={max_delay}s -> {delay_ms}ms"
809
822
  )
810
823
  return delay_ms
811
824
 
@@ -893,5 +906,6 @@ class RRQWorker:
893
906
  if self.client: # Check if client exists before closing
894
907
  await self.client.close()
895
908
  if self.job_store:
896
- await self.job_store.close()
909
+ # Close the Redis connection pool
910
+ await self.job_store.aclose()
897
911
  logger.info(f"[{self.worker_id}] RRQ worker closed.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rrq
3
- Version: 0.2.5
3
+ Version: 0.3.6
4
4
  Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
5
5
  Project-URL: Homepage, https://github.com/getresq/rrq
6
6
  Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
@@ -21,17 +21,12 @@ Requires-Dist: redis[hiredis]<6,>=4.2.0
21
21
  Requires-Dist: watchfiles>=0.19.0
22
22
  Provides-Extra: dev
23
23
  Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
24
+ Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
24
25
  Requires-Dist: pytest>=8.3.5; extra == 'dev'
25
26
  Description-Content-Type: text/markdown
26
27
 
27
28
  # RRQ: Reliable Redis Queue
28
29
 
29
- ____ ____ ___
30
- | _ \ | _ \ / _ \
31
- | |_) | | |_) | | | | |
32
- | _ < | _ < | |_| |
33
- |_| \_\ |_| \_\ \__\_\
34
-
35
30
  RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
36
31
 
37
32
  ## Core Components
@@ -58,10 +53,20 @@ RRQ is a Python library for creating reliable job queues using Redis and `asynci
58
53
  * **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
59
54
  * **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
60
55
  *Note: Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.*
56
+ *To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:*
57
+
58
+ ```python
59
+ await client.enqueue(
60
+ "process_updates",
61
+ item_id=123,
62
+ _unique_key="update:123",
63
+ _defer_by=10,
64
+ )
65
+ ```
61
66
 
62
67
  ## Basic Usage
63
68
 
64
- *(See [`rrq_example.py`](examples/rrq_example.py) in the project root for a runnable example)*
69
+ *(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
65
70
 
66
71
  **1. Define Handlers:**
67
72
 
@@ -165,10 +170,9 @@ rrq <command> [options]
165
170
 
166
171
  - **`worker run`**: Run an RRQ worker process to process jobs from queues.
167
172
  ```bash
168
- rrq worker run [--burst] [--detach] --settings <settings_path>
173
+ rrq worker run [--burst] --settings <settings_path>
169
174
  ```
170
175
  - `--burst`: Run in burst mode (process one job/batch then exit).
171
- - `--detach`: Run the worker in the background.
172
176
  - `--settings`: Python settings path for application worker settings (e.g., `myapp.worker_config.rrq_settings`).
173
177
 
174
178
  - **`worker watch`**: Run an RRQ worker with auto-restart on file changes in a specified directory.
@@ -0,0 +1,15 @@
1
+ rrq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ rrq/cli.py,sha256=_LbaAH_w2a0VNRR0EctuE4afl-wccvMY2w2VbehFDEQ,16980
3
+ rrq/client.py,sha256=5_bmZ05LKIfY9WFSKU-nYawEupsnrnHT2HewXfC2Ahg,7831
4
+ rrq/constants.py,sha256=F_uZgBI3h00MctnEjBjiCGMrg5jUaz5Bz9I1vkyqNrs,1654
5
+ rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
6
+ rrq/job.py,sha256=eUbl33QDqDMXPKpo-0dl0Mp29LWWmtbBgRw0sclcwJ4,4011
7
+ rrq/registry.py,sha256=E9W_zx3QiKTBwMOGearaNpDKBDB87JIn0RlMQ3sAcP0,2925
8
+ rrq/settings.py,sha256=BPKP4XjG7z475gqRgHZt4-IvvOs8uZefq4fPfD2Bepk,4350
9
+ rrq/store.py,sha256=teO0Af8hzBiu7-dFn6_2lz5X90LAZXmtg0VDZuQoAwk,24972
10
+ rrq/worker.py,sha256=y0UTziZVh4QbOPv24b8cqbm_xDBM0HtJLwPNYsJPWnE,40706
11
+ rrq-0.3.6.dist-info/METADATA,sha256=MKJ-uoveQQVVI4p_RhRA1Kk-KN9_J348gGYY572HUY0,9224
12
+ rrq-0.3.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
+ rrq-0.3.6.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
14
+ rrq-0.3.6.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
15
+ rrq-0.3.6.dist-info/RECORD,,
@@ -1,2 +1,2 @@
1
1
  [console_scripts]
2
- rrq = rrq.rrq:rrq
2
+ rrq = rrq.cli:rrq
rrq/rrq.py DELETED
@@ -1,328 +0,0 @@
1
- """RRQ: Reliable Redis Queue Command Line Interface"""
2
-
3
- import asyncio
4
- import importlib
5
- import logging
6
- import os
7
- import signal
8
- import subprocess
9
- import sys
10
- from contextlib import suppress
11
-
12
- import click
13
- import redis.exceptions
14
- from watchfiles import awatch
15
-
16
- from .constants import HEALTH_KEY_PREFIX
17
- from .settings import RRQSettings
18
- from .store import JobStore
19
- from .worker import RRQWorker
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
- # Helper to load settings for commands
24
- def _load_app_settings(settings_object_path: str | None = None) -> RRQSettings:
25
- """Load the settings object from the given path.
26
- If not provided, the RRQ_SETTINGS environment variable will be used.
27
- If the environment variable is not set, will create a default settings object.
28
- RRQ Setting objects, automatically pick up ENVIRONMENT variables starting with RRQ_.
29
-
30
- Args:
31
- settings_object_path: A string representing the path to the settings object. (e.g. "myapp.worker_config.rrq_settings").
32
-
33
- Returns:
34
- The RRQSettings object.
35
- """
36
- try:
37
- if settings_object_path is None:
38
- settings_object_path = os.getenv("RRQ_SETTINGS")
39
-
40
- if settings_object_path is None:
41
- return RRQSettings()
42
-
43
- # Split into module path and object name
44
- parts = settings_object_path.split(".")
45
- settings_object_name = parts[-1]
46
- settings_object_module_path = ".".join(parts[:-1])
47
-
48
- # Import the module
49
- settings_object_module = importlib.import_module(settings_object_module_path)
50
-
51
- # Get the object
52
- settings_object = getattr(settings_object_module, settings_object_name)
53
-
54
- return settings_object
55
- except ImportError:
56
- click.echo(click.style(f"ERROR: Could not import settings object '{settings_object_path}'. Make sure it is in PYTHONPATH.", fg="red"), err=True)
57
- sys.exit(1)
58
- except Exception as e:
59
- click.echo(click.style(f"ERROR: Unexpected error processing settings object '{settings_object_path}': {e}", fg="red"), err=True)
60
- sys.exit(1)
61
-
62
-
63
- # --- Health Check ---
64
- async def check_health_async_impl(settings_object_path: str | None = None) -> bool:
65
- """Performs health check for RRQ workers."""
66
- rrq_settings = _load_app_settings(settings_object_path)
67
-
68
- logger.info("Performing RRQ worker health check...")
69
- job_store = None
70
- try:
71
- job_store = JobStore(settings=rrq_settings)
72
- await job_store.redis.ping()
73
- logger.debug(f"Successfully connected to Redis: {rrq_settings.redis_dsn}")
74
-
75
- health_key_pattern = f"{HEALTH_KEY_PREFIX}*"
76
- worker_keys = [key_bytes.decode("utf-8") async for key_bytes in job_store.redis.scan_iter(match=health_key_pattern)]
77
-
78
- if not worker_keys:
79
- click.echo(click.style("Worker Health Check: FAIL (No active workers found)", fg="red"))
80
- return False
81
-
82
- click.echo(click.style(f"Worker Health Check: Found {len(worker_keys)} active worker(s):", fg="green"))
83
- for key in worker_keys:
84
- worker_id = key.split(HEALTH_KEY_PREFIX)[1]
85
- health_data, ttl = await job_store.get_worker_health(worker_id)
86
- if health_data:
87
- status = health_data.get("status", "N/A")
88
- active_jobs = health_data.get("active_jobs", "N/A")
89
- timestamp = health_data.get("timestamp", "N/A")
90
- click.echo(
91
- f" - Worker ID: {click.style(worker_id, bold=True)}\n"
92
- f" Status: {status}\n"
93
- f" Active Jobs: {active_jobs}\n"
94
- f" Last Heartbeat: {timestamp}\n"
95
- f" TTL: {ttl if ttl is not None else 'N/A'} seconds"
96
- )
97
- else:
98
- click.echo(f" - Worker ID: {click.style(worker_id, bold=True)} - Health data missing/invalid. TTL: {ttl if ttl is not None else 'N/A'}s")
99
- return True
100
- except redis.exceptions.ConnectionError as e:
101
- logger.error(f"Redis connection failed during health check: {e}", exc_info=True)
102
- click.echo(click.style(f"Worker Health Check: FAIL - Redis connection error: {e}", fg="red"))
103
- return False
104
- except Exception as e:
105
- logger.error(f"An unexpected error occurred during health check: {e}", exc_info=True)
106
- click.echo(click.style(f"Worker Health Check: FAIL - Unexpected error: {e}", fg="red"))
107
- return False
108
- finally:
109
- if job_store:
110
- await job_store.aclose()
111
-
112
- # --- Process Management ---
113
- def start_rrq_worker_subprocess(is_detached: bool = False, settings_object_path: str | None = None) -> subprocess.Popen | None:
114
- """Start an RRQ worker process."""
115
- command = ["rrq", "worker", "run"]
116
- if settings_object_path:
117
- command.extend(["--settings", settings_object_path])
118
- else:
119
- raise ValueError("start_rrq_worker_subprocess called without settings_object_path!")
120
-
121
- logger.info(f"Starting worker subprocess with command: {' '.join(command)}")
122
- if is_detached:
123
- process = subprocess.Popen(
124
- command,
125
- start_new_session=True,
126
- stdout=subprocess.DEVNULL,
127
- stderr=subprocess.DEVNULL,
128
- stdin=subprocess.DEVNULL,
129
- )
130
- logger.info(f"RRQ worker started in background with PID: {process.pid}")
131
- else:
132
- process = subprocess.Popen(
133
- command,
134
- start_new_session=True,
135
- stdout=sys.stdout,
136
- stderr=sys.stderr,
137
- )
138
-
139
- return process
140
-
141
-
142
- def terminate_worker_process(process: subprocess.Popen | None, logger: logging.Logger) -> None:
143
- if not process or process.pid is None:
144
- logger.debug("No active worker process to terminate.")
145
- return
146
-
147
- try:
148
- if process.poll() is not None:
149
- logger.debug(f"Worker process {process.pid} already terminated (poll returned exit code: {process.returncode}).")
150
- return
151
-
152
- pgid = os.getpgid(process.pid)
153
- logger.info(f"Terminating worker process group for PID {process.pid} (PGID {pgid})...")
154
- os.killpg(pgid, signal.SIGTERM)
155
- process.wait(timeout=5)
156
- except subprocess.TimeoutExpired:
157
- logger.warning(f"Worker process {process.pid} did not terminate gracefully (SIGTERM timeout), sending SIGKILL.")
158
- with suppress(ProcessLookupError):
159
- os.killpg(os.getpgid(process.pid), signal.SIGKILL)
160
- except Exception as e:
161
- logger.error(f"Unexpected error checking worker process {process.pid}: {e}")
162
-
163
-
164
- async def watch_rrq_worker_impl(watch_path: str, settings_object_path: str | None = None) -> None:
165
- if not settings_object_path:
166
- click.echo(click.style("ERROR: 'rrq worker watch' requires --settings to be specified.", fg="red"), err=True)
167
- sys.exit(1)
168
-
169
- abs_watch_path = os.path.abspath(watch_path)
170
- click.echo(f"Watching for file changes in {abs_watch_path} to restart RRQ worker (app settings: {settings_object_path})...")
171
- worker_process: subprocess.Popen | None = None
172
- loop = asyncio.get_event_loop()
173
- shutdown_event = asyncio.Event()
174
-
175
- def sig_handler(_signum, _frame):
176
- logger.info("Signal received, stopping watcher and worker...")
177
- if worker_process is not None:
178
- terminate_worker_process(worker_process, logger)
179
- loop.call_soon_threadsafe(shutdown_event.set)
180
-
181
- original_sigint = signal.getsignal(signal.SIGINT)
182
- original_sigterm = signal.getsignal(signal.SIGTERM)
183
- signal.signal(signal.SIGINT, sig_handler)
184
- signal.signal(signal.SIGTERM, sig_handler)
185
-
186
- try:
187
- worker_process = start_rrq_worker_subprocess(is_detached=False, settings_object_path=settings_object_path)
188
- async for changes in awatch(abs_watch_path, stop_event=shutdown_event):
189
- if shutdown_event.is_set():
190
- break
191
- if not changes:
192
- continue
193
-
194
- logger.info(f"File changes detected: {changes}. Restarting RRQ worker...")
195
- if worker_process is not None:
196
- terminate_worker_process(worker_process, logger)
197
- await asyncio.sleep(1)
198
- if shutdown_event.is_set():
199
- break
200
- worker_process = start_rrq_worker_subprocess(is_detached=False, settings_object_path=settings_object_path)
201
- except Exception as e:
202
- logger.error(f"Error in watch_rrq_worker: {e}", exc_info=True)
203
- finally:
204
- logger.info("Exiting watch mode. Ensuring worker process is terminated.")
205
- if not shutdown_event.is_set():
206
- shutdown_event.set()
207
- if worker_process is not None:
208
- terminate_worker_process(worker_process, logger)
209
- signal.signal(signal.SIGINT, original_sigint)
210
- signal.signal(signal.SIGTERM, original_sigterm)
211
- logger.info("Watch worker cleanup complete.")
212
-
213
-
214
- # --- Click CLI Definitions ---
215
-
216
- CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
217
-
218
- @click.group(context_settings=CONTEXT_SETTINGS)
219
- def rrq():
220
- """RRQ: Reliable Redis Queue Command Line Interface.
221
-
222
- Provides tools for running RRQ workers, checking system health,
223
- and managing jobs. Requires an application-specific --settings module
224
- for most operations.
225
- """
226
- pass
227
-
228
-
229
-
230
- @rrq.group("worker")
231
- def worker_cli():
232
- """Manage RRQ workers (run, watch)."""
233
- pass
234
-
235
-
236
- @worker_cli.command("run")
237
- @click.option("--burst", is_flag=True, help="Run worker in burst mode (process one job/batch then exit). Not Implemented yet.")
238
- @click.option("--detach", is_flag=True, help="Run the worker in the background (detached).")
239
- @click.option(
240
- "--settings",
241
- "settings_object_path",
242
- type=str,
243
- required=False,
244
- default=None,
245
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
246
- )
247
- def worker_run_command(burst: bool, detach: bool, settings_object_path: str):
248
- """Run an RRQ worker process. Requires --settings."""
249
- rrq_settings = _load_app_settings(settings_object_path)
250
-
251
- if detach:
252
- logger.info("Attempting to start worker in detached (background) mode...")
253
- process = start_rrq_worker_subprocess(is_detached=True, settings_object_path=settings_object_path)
254
- click.echo(f"Worker initiated in background (PID: {process.pid}). Check logs for status.")
255
- return
256
-
257
- if burst:
258
- raise NotImplementedError("Burst mode is not implemented yet.")
259
-
260
- logger.info(f"Starting RRQ Worker (Burst: {burst}, App Settings: {settings_object_path})")
261
-
262
- if not rrq_settings.job_registry:
263
- click.echo(click.style("ERROR: No 'job_registry_app'. You must provide a JobRegistry instance in settings.", fg="red"), err=True)
264
- sys.exit(1)
265
-
266
- logger.debug(f"Registered handlers (from effective registry): {rrq_settings.job_registry.get_registered_functions()}")
267
- logger.debug(f"Effective RRQ settings for worker: {rrq_settings}")
268
-
269
- worker_instance = RRQWorker(
270
- settings=rrq_settings,
271
- job_registry=rrq_settings.job_registry,
272
- )
273
-
274
- loop = asyncio.get_event_loop()
275
- try:
276
- logger.info("Starting worker run loop...")
277
- loop.run_until_complete(worker_instance.run())
278
- except KeyboardInterrupt:
279
- logger.info("RRQ Worker run interrupted by user (KeyboardInterrupt).")
280
- except Exception as e:
281
- logger.error(f"Exception during RRQ Worker run: {e}", exc_info=True)
282
- finally:
283
- logger.info("RRQ Worker run finished or exited. Cleaning up event loop.")
284
- if loop.is_running():
285
- loop.run_until_complete(loop.shutdown_asyncgens())
286
- loop.close()
287
- logger.info("RRQ Worker has shut down.")
288
-
289
-
290
- @worker_cli.command("watch")
291
- @click.option(
292
- "--path",
293
- default=".",
294
- type=click.Path(exists=True, dir_okay=True, file_okay=False, readable=True),
295
- help="Directory path to watch for changes. Default is current directory.",
296
- show_default=True,
297
- )
298
- @click.option(
299
- "--settings",
300
- "settings_object_path",
301
- type=str,
302
- required=False,
303
- default=None,
304
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
305
- )
306
- def worker_watch_command(path: str, settings_object_path: str):
307
- """Run the RRQ worker with auto-restart on file changes in PATH. Requires --settings."""
308
- asyncio.run(watch_rrq_worker_impl(path, settings_object_path=settings_object_path))
309
-
310
-
311
- @rrq.command("check")
312
- @click.option(
313
- "--settings",
314
- "settings_object_path",
315
- type=str,
316
- required=False,
317
- default=None,
318
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
319
- )
320
- def check_command(settings_object_path: str):
321
- """Perform a health check on active RRQ worker(s). Requires --settings."""
322
- click.echo("Performing RRQ health check...")
323
- healthy = asyncio.run(check_health_async_impl(settings_object_path=settings_object_path))
324
- if healthy:
325
- click.echo(click.style("Health check PASSED.", fg="green"))
326
- else:
327
- click.echo(click.style("Health check FAILED.", fg="red"))
328
- sys.exit(1)