rrq 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/worker.py CHANGED
@@ -29,6 +29,7 @@ from .registry import JobRegistry
29
29
  from .settings import RRQSettings
30
30
  from .store import JobStore
31
31
  from .cron import CronJob
32
+ from .telemetry import get_telemetry
32
33
 
33
34
  logger = logging.getLogger(__name__)
34
35
 
@@ -83,9 +84,9 @@ class RRQWorker:
83
84
  self._semaphore = asyncio.Semaphore(self.settings.worker_concurrency)
84
85
  self._running_tasks: set[asyncio.Task] = set()
85
86
  self._shutdown_event = asyncio.Event()
86
- self._loop = None # Will be set in run()
87
- self._health_check_task: Optional[asyncio.Task] = None
88
- self._cron_task: Optional[asyncio.Task] = None
87
+ self._loop: asyncio.AbstractEventLoop | None = None # Will be set in run()
88
+ self._health_check_task: asyncio.Task | None = None
89
+ self._cron_task: asyncio.Task | None = None
89
90
  self.status: str = "initializing" # Worker status (e.g., initializing, running, polling, idle, stopped)
90
91
  logger.info(
91
92
  f"Initializing RRQWorker {self.worker_id} for queues: {self.queues}"
@@ -145,8 +146,18 @@ class RRQWorker:
145
146
  self.status = "running"
146
147
  self._loop = asyncio.get_running_loop()
147
148
  self._setup_signal_handlers()
149
+ telemetry = get_telemetry()
148
150
  try:
149
151
  await self._call_startup_hook()
152
+ try:
153
+ telemetry.worker_started(
154
+ worker_id=self.worker_id, queues=list(self.queues)
155
+ )
156
+ except Exception as e_telemetry:
157
+ logger.error(
158
+ f"Worker {self.worker_id} error during telemetry startup: {e_telemetry}",
159
+ exc_info=True,
160
+ )
150
161
  await self._run_loop()
151
162
  except asyncio.CancelledError:
152
163
  logger.info(f"Worker {self.worker_id} run cancelled.")
@@ -154,6 +165,20 @@ class RRQWorker:
154
165
  logger.info(f"Worker {self.worker_id} shutting down cleanly.")
155
166
  await self._call_shutdown_hook()
156
167
  self.status = "stopped"
168
+ try:
169
+ telemetry.worker_stopped(worker_id=self.worker_id)
170
+ except Exception as e_telemetry:
171
+ logger.error(
172
+ f"Worker {self.worker_id} error during telemetry shutdown: {e_telemetry}",
173
+ exc_info=True,
174
+ )
175
+ try:
176
+ await self.close()
177
+ except Exception as e_close:
178
+ logger.error(
179
+ f"Worker {self.worker_id} error closing resources during shutdown: {e_close}",
180
+ exc_info=True,
181
+ )
157
182
  logger.info(f"Worker {self.worker_id} stopped.")
158
183
 
159
184
  async def _run_loop(self) -> None:
@@ -162,11 +187,13 @@ class RRQWorker:
162
187
  Continuously polls queues for jobs, manages concurrency, and handles shutdown.
163
188
  """
164
189
  logger.info(f"Worker {self.worker_id} starting run loop.")
165
- self._health_check_task = self._loop.create_task(self._heartbeat_loop())
190
+ loop = self._loop
191
+ assert loop is not None
192
+ self._health_check_task = loop.create_task(self._heartbeat_loop())
166
193
  if self.cron_jobs:
167
194
  for cj in self.cron_jobs:
168
195
  cj.schedule_next()
169
- self._cron_task = self._loop.create_task(self._cron_loop())
196
+ self._cron_task = loop.create_task(self._cron_loop())
170
197
 
171
198
  while not self._shutdown_event.is_set():
172
199
  try:
@@ -187,6 +214,17 @@ class RRQWorker:
187
214
  f"Worker {self.worker_id} burst mode complete: no more jobs."
188
215
  )
189
216
  break
217
+ if fetched_count == 0:
218
+ if self.status != "idle (no jobs)":
219
+ logger.debug(
220
+ f"Worker {self.worker_id} no jobs found. Waiting..."
221
+ )
222
+ self.status = "idle (no jobs)"
223
+ # Avoid tight polling loop when queues are empty
224
+ jittered_delay = self._calculate_jittered_delay(
225
+ self.settings.default_poll_delay_seconds
226
+ )
227
+ await asyncio.sleep(jittered_delay)
190
228
  else:
191
229
  if self.status != "idle (concurrency limit)":
192
230
  logger.debug(
@@ -223,7 +261,7 @@ class RRQWorker:
223
261
  with suppress(asyncio.CancelledError):
224
262
  await self._cron_task
225
263
 
226
- async def _poll_for_jobs(self, count: int) -> None:
264
+ async def _poll_for_jobs(self, count: int) -> int:
227
265
  """Polls configured queues round-robin and attempts to start processing jobs.
228
266
 
229
267
  Args:
@@ -351,7 +389,9 @@ class RRQWorker:
351
389
 
352
390
  # Create and track the execution task
353
391
  # The semaphore will be released when this task completes
354
- task = self._loop.create_task(self._execute_job(job, queue_name))
392
+ loop = self._loop
393
+ assert loop is not None
394
+ task = loop.create_task(self._execute_job(job, queue_name))
355
395
  self._running_tasks.add(task)
356
396
  task.add_done_callback(lambda t: self._task_cleanup(t, self._semaphore))
357
397
  logger.info(
@@ -423,93 +463,182 @@ class RRQWorker:
423
463
  if job.job_timeout_seconds is not None
424
464
  else self.settings.default_job_timeout_seconds
425
465
  )
466
+ attempt = job.current_retries + 1
467
+ telemetry = get_telemetry()
468
+
469
+ span_cm = telemetry.job_span(
470
+ job=job,
471
+ worker_id=self.worker_id,
472
+ queue_name=queue_name,
473
+ attempt=attempt,
474
+ timeout_seconds=float(actual_job_timeout),
475
+ )
426
476
 
427
477
  try:
428
- # --- Find Handler ---
429
- handler = self.job_registry.get_handler(job.function_name)
430
- if not handler:
431
- raise ValueError(
432
- f"No handler registered for function '{job.function_name}'"
433
- )
434
-
435
- # --- Prepare Context ---
436
- context = {
437
- "job_id": job.id,
438
- "job_try": job.current_retries + 1, # Attempt number (1-based)
439
- "enqueue_time": job.enqueue_time,
440
- "settings": self.settings,
441
- "worker_id": self.worker_id,
442
- "queue_name": queue_name,
443
- "rrq_client": self.client,
444
- }
445
-
446
- # --- Execute Handler ---
447
- result = None
448
- exc: Optional[BaseException] = None # Stores caught exception
449
-
450
- try: # Inner try for handler execution and its specific exceptions
451
- logger.debug(f"Calling handler '{job.function_name}' for job {job.id}")
452
- result = await asyncio.wait_for(
453
- handler(context, *job.job_args, **job.job_kwargs),
454
- timeout=float(actual_job_timeout),
455
- )
456
- logger.debug(f"Handler for job {job.id} returned successfully.")
457
- except TimeoutError as e_timeout: # Specifically from wait_for
458
- exc = e_timeout
459
- logger.warning(
460
- f"Job {job.id} execution timed out after {actual_job_timeout}s."
461
- )
462
- except RetryJob as e_retry: # Handler explicitly requests retry
463
- exc = e_retry
464
- logger.info(f"Job {job.id} requested retry: {e_retry}")
465
- except Exception as e_other: # Any other exception from the handler itself
466
- exc = e_other
467
- logger.error(
468
- f"Job {job.id} handler '{job.function_name}' raised unhandled exception:",
469
- exc_info=e_other,
470
- )
471
-
472
- # --- Process Outcome ---
473
- duration = time.monotonic() - start_time
474
- if exc is None: # Success
475
- await self._handle_job_success(job, result)
476
- logger.info(f"Job {job.id} completed successfully in {duration:.2f}s.")
477
- elif isinstance(exc, RetryJob):
478
- await self._process_retry_job(job, exc, queue_name)
479
- # Logging done within _process_retry_job
480
- elif isinstance(exc, asyncio.TimeoutError):
481
- error_msg = (
482
- str(exc)
483
- if str(exc)
484
- else f"Job timed out after {actual_job_timeout}s."
485
- )
486
- await self._handle_job_timeout(job, queue_name, error_msg)
487
- # Logging done within _handle_job_timeout
488
- else: # Other unhandled exception from handler
489
- await self._process_other_failure(job, exc, queue_name)
490
- # Logging done within _process_other_failure
491
-
492
- except ValueError as ve: # Catches "handler not found"
493
- logger.error(f"Job {job.id} fatal error: {ve}. Moving to DLQ.")
494
- await self._handle_fatal_job_error(job, queue_name, str(ve))
495
- except asyncio.CancelledError:
496
- # Catches cancellation of this _execute_job task (e.g., worker shutdown)
497
- logger.warning(
498
- f"Job {job.id} execution was cancelled (likely worker shutdown). Handling cancellation."
499
- )
500
- await self._handle_job_cancellation_on_shutdown(job, queue_name)
501
- # Do not re-raise; cancellation is handled.
502
- except (
503
- Exception
504
- ) as critical_exc: # Safety net for unexpected errors in this method
505
- logger.critical(
506
- f"Job {job.id} encountered an unexpected critical error during execution logic: {critical_exc}",
507
- exc_info=critical_exc,
508
- )
509
- # Fallback: Try to move to DLQ to avoid losing the job entirely
510
- await self._handle_fatal_job_error(
511
- job, queue_name, f"Critical worker error: {critical_exc}"
512
- )
478
+ with span_cm as span:
479
+ try:
480
+ # --- Find Handler ---
481
+ handler = self.job_registry.get_handler(job.function_name)
482
+ if not handler:
483
+ raise ValueError(
484
+ f"No handler registered for function '{job.function_name}'"
485
+ )
486
+
487
+ # --- Prepare Context ---
488
+ context = {
489
+ "job_id": job.id,
490
+ "job_try": attempt, # Attempt number (1-based)
491
+ "enqueue_time": job.enqueue_time,
492
+ "settings": self.settings,
493
+ "worker_id": self.worker_id,
494
+ "queue_name": queue_name,
495
+ "rrq_client": self.client,
496
+ }
497
+
498
+ # --- Execute Handler ---
499
+ result = None
500
+ exc: Optional[BaseException] = None # Stores caught exception
501
+
502
+ try: # Inner try for handler execution and its specific exceptions
503
+ logger.debug(
504
+ f"Calling handler '{job.function_name}' for job {job.id}"
505
+ )
506
+ result = await asyncio.wait_for(
507
+ handler(context, *job.job_args, **job.job_kwargs),
508
+ timeout=float(actual_job_timeout),
509
+ )
510
+ logger.debug(f"Handler for job {job.id} returned successfully.")
511
+ except TimeoutError as e_timeout: # Specifically from wait_for
512
+ exc = e_timeout
513
+ logger.warning(
514
+ f"Job {job.id} execution timed out after {actual_job_timeout}s."
515
+ )
516
+ except RetryJob as e_retry: # Handler explicitly requests retry
517
+ exc = e_retry
518
+ logger.info(f"Job {job.id} requested retry: {e_retry}")
519
+ except (
520
+ Exception
521
+ ) as e_other: # Any other exception from the handler itself
522
+ exc = e_other
523
+ logger.error(
524
+ f"Job {job.id} handler '{job.function_name}' raised unhandled exception:",
525
+ exc_info=e_other,
526
+ )
527
+
528
+ # --- Process Outcome ---
529
+ duration = time.monotonic() - start_time
530
+ if exc is None: # Success
531
+ await self._handle_job_success(job, result)
532
+ span.success(duration_seconds=duration)
533
+ logger.info(
534
+ f"Job {job.id} completed successfully in {duration:.2f}s."
535
+ )
536
+ elif isinstance(exc, RetryJob):
537
+ anticipated_retry_count = job.current_retries + 1
538
+ delay_seconds = exc.defer_seconds
539
+ if (
540
+ delay_seconds is None
541
+ and anticipated_retry_count < job.max_retries
542
+ ):
543
+ temp_job_for_backoff = Job(
544
+ id=job.id,
545
+ function_name=job.function_name,
546
+ current_retries=anticipated_retry_count,
547
+ max_retries=job.max_retries,
548
+ )
549
+ delay_seconds = (
550
+ self._calculate_backoff_ms(temp_job_for_backoff)
551
+ / 1000.0
552
+ )
553
+ await self._process_retry_job(job, exc, queue_name)
554
+ if anticipated_retry_count >= job.max_retries:
555
+ span.dlq(
556
+ duration_seconds=duration,
557
+ reason="max_retries",
558
+ error=exc,
559
+ )
560
+ else:
561
+ span.retry(
562
+ duration_seconds=duration,
563
+ delay_seconds=delay_seconds,
564
+ reason=str(exc) or None,
565
+ )
566
+ elif isinstance(exc, asyncio.TimeoutError):
567
+ error_msg = (
568
+ str(exc)
569
+ if str(exc)
570
+ else f"Job timed out after {actual_job_timeout}s."
571
+ )
572
+ await self._handle_job_timeout(job, queue_name, error_msg)
573
+ span.timeout(
574
+ duration_seconds=duration,
575
+ timeout_seconds=float(actual_job_timeout),
576
+ error_message=error_msg,
577
+ )
578
+ else: # Other unhandled exception from handler
579
+ anticipated_retry_count = job.current_retries + 1
580
+ delay_seconds = None
581
+ if anticipated_retry_count < job.max_retries:
582
+ delay_seconds = (
583
+ self._calculate_backoff_ms(
584
+ Job(
585
+ id=job.id,
586
+ function_name=job.function_name,
587
+ current_retries=anticipated_retry_count,
588
+ max_retries=job.max_retries,
589
+ )
590
+ )
591
+ / 1000.0
592
+ )
593
+ await self._process_other_failure(job, exc, queue_name)
594
+ if anticipated_retry_count >= job.max_retries:
595
+ span.dlq(
596
+ duration_seconds=duration,
597
+ reason="max_retries",
598
+ error=exc,
599
+ )
600
+ else:
601
+ span.retry(
602
+ duration_seconds=duration,
603
+ delay_seconds=delay_seconds,
604
+ reason=str(exc) or None,
605
+ )
606
+
607
+ except ValueError as ve: # Catches "handler not found"
608
+ logger.error(f"Job {job.id} fatal error: {ve}. Moving to DLQ.")
609
+ await self._handle_fatal_job_error(job, queue_name, str(ve))
610
+ span.dlq(
611
+ duration_seconds=time.monotonic() - start_time,
612
+ reason=str(ve),
613
+ error=ve,
614
+ )
615
+ except asyncio.CancelledError:
616
+ # Catches cancellation of this _execute_job task (e.g., worker shutdown)
617
+ logger.warning(
618
+ f"Job {job.id} execution was cancelled (likely worker shutdown). Handling cancellation."
619
+ )
620
+ await self._handle_job_cancellation_on_shutdown(job, queue_name)
621
+ span.cancelled(
622
+ duration_seconds=time.monotonic() - start_time,
623
+ reason="shutdown",
624
+ )
625
+ # Do not re-raise; cancellation is handled.
626
+ except (
627
+ Exception
628
+ ) as critical_exc: # Safety net for unexpected errors in this method
629
+ logger.critical(
630
+ f"Job {job.id} encountered an unexpected critical error during execution logic: {critical_exc}",
631
+ exc_info=critical_exc,
632
+ )
633
+ # Fallback: Try to move to DLQ to avoid losing the job entirely
634
+ await self._handle_fatal_job_error(
635
+ job, queue_name, f"Critical worker error: {critical_exc}"
636
+ )
637
+ span.dlq(
638
+ duration_seconds=time.monotonic() - start_time,
639
+ reason="critical_worker_error",
640
+ error=critical_exc,
641
+ )
513
642
  finally:
514
643
  # CRITICAL: Ensure the lock is released regardless of outcome
515
644
  await self.job_store.release_job_lock(job.id)
@@ -592,6 +721,15 @@ class RRQWorker:
592
721
  new_retry_count = await self.job_store.atomic_retry_job(
593
722
  job.id, target_queue, retry_at_score, str(exc), JobStatus.RETRYING
594
723
  )
724
+ try:
725
+ next_run_time = datetime.fromtimestamp(
726
+ float(retry_at_score) / 1000.0, tz=timezone.utc
727
+ )
728
+ await self.job_store.update_job_next_scheduled_run_time(
729
+ job.id, next_run_time
730
+ )
731
+ except Exception:
732
+ pass
595
733
 
596
734
  logger.info(
597
735
  f"{log_prefix} explicitly retrying in {defer_seconds:.2f}s "
@@ -646,6 +784,15 @@ class RRQWorker:
646
784
  new_retry_count = await self.job_store.atomic_retry_job(
647
785
  job.id, target_queue, retry_at_score, last_error_str, JobStatus.RETRYING
648
786
  )
787
+ try:
788
+ next_run_time = datetime.fromtimestamp(
789
+ float(retry_at_score) / 1000.0, tz=timezone.utc
790
+ )
791
+ await self.job_store.update_job_next_scheduled_run_time(
792
+ job.id, next_run_time
793
+ )
794
+ except Exception:
795
+ pass
649
796
 
650
797
  logger.info(
651
798
  f"{log_prefix} failed, retrying in {defer_ms / 1000.0:.2f}s "
@@ -732,9 +879,12 @@ class RRQWorker:
732
879
 
733
880
  def _setup_signal_handlers(self) -> None:
734
881
  """Sets up POSIX signal handlers for graceful shutdown."""
882
+ loop = self._loop
883
+ if loop is None:
884
+ return
735
885
  for sig in self.SIGNALS:
736
886
  try:
737
- self._loop.add_signal_handler(sig, self._request_shutdown)
887
+ loop.add_signal_handler(sig, self._request_shutdown)
738
888
  logger.debug(
739
889
  f"Worker {self.worker_id} registered signal handler for {sig.name}."
740
890
  )
@@ -805,6 +955,7 @@ class RRQWorker:
805
955
  async def _heartbeat_loop(self) -> None:
806
956
  """Periodically updates the worker's health status key in Redis with a TTL."""
807
957
  logger.debug(f"Worker {self.worker_id} starting heartbeat loop.")
958
+ telemetry = get_telemetry()
808
959
  while not self._shutdown_event.is_set():
809
960
  try:
810
961
  health_data = {
@@ -821,6 +972,9 @@ class RRQWorker:
821
972
  await self.job_store.set_worker_health(
822
973
  self.worker_id, health_data, int(ttl)
823
974
  )
975
+ telemetry.worker_heartbeat(
976
+ worker_id=self.worker_id, health_data=health_data
977
+ )
824
978
  # Logger call moved into set_worker_health
825
979
  except Exception as e:
826
980
  # Log error but continue the loop
@@ -860,6 +1014,15 @@ class RRQWorker:
860
1014
  if cj.due(now):
861
1015
  unique_key = f"cron:{cj.function_name}" if cj.unique else None
862
1016
  try:
1017
+ if unique_key:
1018
+ # For unique cron jobs, skip enqueueing while the unique lock is held
1019
+ # to avoid accumulating deferred duplicates on every cron tick.
1020
+ ttl = await self.job_store.get_lock_ttl(unique_key)
1021
+ if ttl > 0:
1022
+ logger.debug(
1023
+ f"Skipping cron job '{cj.function_name}' due to active unique lock (TTL: {ttl}s)."
1024
+ )
1025
+ continue
863
1026
  await self.client.enqueue(
864
1027
  cj.function_name,
865
1028
  *cj.args,
@@ -974,7 +1137,9 @@ class RRQWorker:
974
1137
  )
975
1138
  try:
976
1139
  job.status = JobStatus.PENDING
977
- job.next_scheduled_run_time = datetime.now(timezone.utc) # Re-queue immediately
1140
+ job.next_scheduled_run_time = datetime.now(
1141
+ timezone.utc
1142
+ ) # Re-queue immediately
978
1143
  job.last_error = "Job execution interrupted by worker shutdown. Re-queued."
979
1144
  # Do not increment retries for shutdown interruption
980
1145
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rrq
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
5
5
  Project-URL: Homepage, https://github.com/getresq/rrq
6
6
  Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
@@ -8,40 +8,42 @@ Author-email: Mazdak Rezvani <mazdak@me.com>
8
8
  License-File: LICENSE
9
9
  Classifier: Intended Audience :: Developers
10
10
  Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3.10
12
11
  Classifier: Programming Language :: Python :: 3.11
13
12
  Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Classifier: Programming Language :: Python :: 3.14
14
15
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
15
16
  Classifier: Topic :: System :: Distributed Computing
16
17
  Classifier: Topic :: System :: Monitoring
17
- Requires-Python: >=3.10
18
+ Requires-Python: >=3.11
18
19
  Requires-Dist: click>=8.1.3
19
20
  Requires-Dist: pydantic-settings>=2.9.1
20
21
  Requires-Dist: pydantic>=2.11.4
21
- Requires-Dist: redis[hiredis]<6,>=4.2.0
22
+ Requires-Dist: redis[hiredis]>=4.2.0
22
23
  Requires-Dist: rich>=14.0.0
23
24
  Requires-Dist: watchfiles>=0.19.0
24
25
  Provides-Extra: dev
25
26
  Requires-Dist: pytest-asyncio>=1.0.0; extra == 'dev'
26
27
  Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
27
28
  Requires-Dist: pytest>=8.3.5; extra == 'dev'
29
+ Requires-Dist: ruff==0.14.9; extra == 'dev'
30
+ Requires-Dist: ty==0.0.1-alpha.26; extra == 'dev'
28
31
  Description-Content-Type: text/markdown
29
32
 
30
33
  # RRQ: Reliable Redis Queue
31
34
 
32
35
  RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
33
36
 
34
- ## 🆕 What's New in v0.7.0
37
+ ## 🆕 What's New in v0.7.1
35
38
 
36
39
  - **Comprehensive CLI Tools**: 15+ new commands for monitoring, debugging, and management
37
40
  - **Real-time Monitoring Dashboard**: Interactive dashboard with `rrq monitor`
38
41
  - **Enhanced DLQ Management**: Sophisticated filtering and requeuing capabilities
39
- - **Python 3.10 Support**: Expanded compatibility from Python 3.11+ to 3.10+
40
42
  - **Bug Fixes**: Critical fix for unique job enqueue failures with proper deferral
41
43
 
42
44
  ## Requirements
43
45
 
44
- - Python 3.10 or higher
46
+ - Python 3.11 or higher
45
47
  - Redis 5.0 or higher
46
48
  - asyncio-compatible environment
47
49
 
@@ -143,6 +145,8 @@ this purpose.
143
145
 
144
146
  ```python
145
147
  # worker_script.py
148
+ import asyncio
149
+
146
150
  from rrq.worker import RRQWorker
147
151
  from config import rrq_settings # Import your settings
148
152
  from main_setup import job_registry # Import your registry
@@ -152,7 +156,7 @@ worker = RRQWorker(settings=rrq_settings, job_registry=job_registry)
152
156
 
153
157
  # Run the worker (blocking)
154
158
  if __name__ == "__main__":
155
- worker.run()
159
+ asyncio.run(worker.run())
156
160
  ```
157
161
 
158
162
  You can run multiple instances of `worker_script.py` for concurrent processing.
@@ -432,6 +436,41 @@ RRQ can be configured in several ways, with the following precedence:
432
436
 
433
437
  **Important Note on `job_registry`**: The `job_registry` attribute in your `RRQSettings` object is **critical** for RRQ to function. It must be an instance of `JobRegistry` and is used to register job handlers. Without a properly configured `job_registry`, workers will not know how to process jobs, and most operations will fail. Ensure it is set in your settings object to map job names to their respective handler functions.
434
438
 
439
+ ## Telemetry (Datadog / OTEL / Logfire)
440
+
441
+ RRQ supports optional distributed tracing for enqueue and job execution. Enable the
442
+ integration in both the producer and worker processes to get end-to-end traces
443
+ across the Redis queue.
444
+
445
+ ### Datadog (ddtrace)
446
+
447
+ ```python
448
+ from rrq.integrations.ddtrace import enable as enable_rrq_ddtrace
449
+
450
+ enable_rrq_ddtrace(service="myapp-rrq")
451
+ ```
452
+
453
+ This only instruments RRQ spans + propagation; it does **not** call
454
+ `ddtrace.patch_all()`. Configure `ddtrace` in your app as you already do.
455
+
456
+ ### Logfire
457
+
458
+ ```python
459
+ import logfire
460
+ from rrq.integrations.logfire import enable as enable_rrq_logfire
461
+
462
+ logfire.configure(service_name="myapp-rrq")
463
+ enable_rrq_logfire(service_name="myapp-rrq")
464
+ ```
465
+
466
+ ### OpenTelemetry (generic)
467
+
468
+ ```python
469
+ from rrq.integrations.otel import enable as enable_rrq_otel
470
+
471
+ enable_rrq_otel(service_name="myapp-rrq")
472
+ ```
473
+
435
474
  ### Comprehensive CLI Command System
436
475
  - **New modular CLI architecture** with dedicated command modules for better organization
437
476
  - **Enhanced monitoring capabilities** with real-time dashboards and beautiful table output
@@ -0,0 +1,34 @@
1
+ rrq/__init__.py,sha256=3WYv9UkvnCbjKXrvmqiLm7yuVVQiLclbVCOXq5wb6ZM,290
2
+ rrq/cli.py,sha256=paUyLTN75KtC4nSVumiCE3taMhcMF27Jv0TSkaOwb5k,27092
3
+ rrq/client.py,sha256=HGegw6H_oF6EBHPYeC8CKJXYbnliYHlibbh7CI3JBik,9057
4
+ rrq/constants.py,sha256=vWSQcNX1v0_p0f8S4skZTnUgwOK4QSNVx-CK6VmxQog,2019
5
+ rrq/cron.py,sha256=DQv9O1OS9qsuGY_AsEPZoXeInykordA_9TIhP0dYAhw,7710
6
+ rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
7
+ rrq/hooks.py,sha256=kLrYrdCXYVh0tQl-6UKmT2IYVHgPB-_Zm0FbKba_HpE,8455
8
+ rrq/job.py,sha256=z3q8hChF9CvX_bJUCO5Qh_eVWiXs6gMO2VMdHP9HVN8,4320
9
+ rrq/registry.py,sha256=NQNC2gAVlKxmcU2P-oyqPsF_xsyaXsqLkdhLrSGKxSw,2844
10
+ rrq/settings.py,sha256=NHbom3cG5jW70A2ApshBJBdf4D5ErCtGkv7OSCquSwE,5033
11
+ rrq/store.py,sha256=UfMcYudHMtHsP9IoKLuyPb3sWVRUfieaSD8buKthl2o,37783
12
+ rrq/telemetry.py,sha256=Jh3gd-QvE0ineHBd_WUM86IZxsjKykD8KH4FUUOMFww,3090
13
+ rrq/worker.py,sha256=Isuk2EX8TmBoO7kpTsKrhgQomO3pNu14wb51l45vyfQ,52846
14
+ rrq/cli_commands/__init__.py,sha256=1YO4QL-R5F3eBRp2LZVQpqFQCBBdlsCdEcrPVzUo-TA,21
15
+ rrq/cli_commands/base.py,sha256=0UY5ilCHPIFuREHyiJpSpqt_rv5QWfWdZsYneR_B1oc,3087
16
+ rrq/cli_commands/utils.py,sha256=6gxefcOPOLSefPCXcpYcrIh8R5iHu-DR2WTix-QmQNA,4352
17
+ rrq/cli_commands/commands/__init__.py,sha256=e5ahM66Lb_nj7L4sjLrWr67vF-dwA9bxbCxsu8MrTy0,30
18
+ rrq/cli_commands/commands/debug.py,sha256=U5t6gXIlur-QO4c-ik12DCA-amyqQdhIuWNexlYXiwQ,18460
19
+ rrq/cli_commands/commands/dlq.py,sha256=Swk-2CuqoXsCuuhQsmQNPXtJLMe_kVwIZGI6iyTwy7w,30751
20
+ rrq/cli_commands/commands/jobs.py,sha256=aPabfegOGklXKExz8fCs0-xugQEvhsRJVFdvLpY5gDY,18128
21
+ rrq/cli_commands/commands/monitor.py,sha256=jAAxmd5cwh2XWu8beHiT17LxKA3PXw9CLEIZilZiZ54,29978
22
+ rrq/cli_commands/commands/queues.py,sha256=C-qPFNclHxptZkJe2vzqjAg2bgLp98U5vFw4YBC5Enc,19653
23
+ rrq/exporters/__init__.py,sha256=Tv3M3NXivbyr1GqRsQitHwGwxyj13NrERqBtgTvtsZY,48
24
+ rrq/exporters/prometheus.py,sha256=xfaU7yezF8CJ4kHnGNIRb7fxF54h4BwBG8M2qNciFfU,3078
25
+ rrq/exporters/statsd.py,sha256=zVjX3Beni1iQyX4Y9T5Oqaz8hRC9OE-dcizQUZGQsMA,1876
26
+ rrq/integrations/__init__.py,sha256=UMTqvpUUW5OA3j7r0-Ydn-_xq1XGwJVHGjeuWNc-0FI,62
27
+ rrq/integrations/ddtrace.py,sha256=_lSCJvblMCD9KlMB0-8voiQi4RfP1MmAJqlg0_gUo50,13892
28
+ rrq/integrations/logfire.py,sha256=ZyIAMYdOQThGOeIpPR6aUhe2yfMFSi7jutnuM0yDTqo,756
29
+ rrq/integrations/otel.py,sha256=biJj7NRRBmNmlCOu1cT_ZC3ZWLebhol9JSNyckVqlPQ,10048
30
+ rrq-0.8.0.dist-info/METADATA,sha256=rGL8CIC1IytUfugyU-g-kFLYV2UJCtpvZMkbGntMAig,20804
31
+ rrq-0.8.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
32
+ rrq-0.8.0.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
33
+ rrq-0.8.0.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
34
+ rrq-0.8.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,26 +0,0 @@
1
- rrq/__init__.py,sha256=3WYv9UkvnCbjKXrvmqiLm7yuVVQiLclbVCOXq5wb6ZM,290
2
- rrq/cli.py,sha256=8ykQqM6VOzqHh2eK3ibtvtgk0gTr83vfyE8SHDpz4PI,27046
3
- rrq/client.py,sha256=PTuWZ23KQhNR4eHL0TuZaBsc_QLcuLVnbLx5apBdR2E,8245
4
- rrq/constants.py,sha256=vWSQcNX1v0_p0f8S4skZTnUgwOK4QSNVx-CK6VmxQog,2019
5
- rrq/cron.py,sha256=DQv9O1OS9qsuGY_AsEPZoXeInykordA_9TIhP0dYAhw,7710
6
- rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
7
- rrq/hooks.py,sha256=rJpD7ARUbtwhht-had1WTgguJgI8TYVTWqRJpRDii8E,7478
8
- rrq/job.py,sha256=YCdrYMrAF2DjckD4LoZfqg_Ow5HySBH9IYLK4bYnctQ,4052
9
- rrq/registry.py,sha256=NQNC2gAVlKxmcU2P-oyqPsF_xsyaXsqLkdhLrSGKxSw,2844
10
- rrq/settings.py,sha256=RzuPV5nNzP6gpRut15_xWUagCLTXDCu7e_YAprLPFLE,5030
11
- rrq/store.py,sha256=VdSObK6zkNtL3Ems1p_U-xd4Ir4zHqFvo9nqNw2NpQ8,36022
12
- rrq/worker.py,sha256=w_1fOIMPSDkvXXYEmPPiC-uNz3PX3XOVjKCJK_Dpt3Y,44990
13
- rrq/cli_commands/__init__.py,sha256=1YO4QL-R5F3eBRp2LZVQpqFQCBBdlsCdEcrPVzUo-TA,21
14
- rrq/cli_commands/base.py,sha256=A00NM2qAHemy5geqU8udaxhMurkZLyGShHeuXNJ0nWM,2990
15
- rrq/cli_commands/utils.py,sha256=rZDmMi_HlBXf7BxMZbmrsOY9hur3ox9sbWrmHabuZy8,4299
16
- rrq/cli_commands/commands/__init__.py,sha256=e5ahM66Lb_nj7L4sjLrWr67vF-dwA9bxbCxsu8MrTy0,30
17
- rrq/cli_commands/commands/debug.py,sha256=pt_1mP-VuXkoWpmMF60lsrR0hKr9XP5PymxkskUOSWk,18462
18
- rrq/cli_commands/commands/dlq.py,sha256=Swk-2CuqoXsCuuhQsmQNPXtJLMe_kVwIZGI6iyTwy7w,30751
19
- rrq/cli_commands/commands/jobs.py,sha256=aPabfegOGklXKExz8fCs0-xugQEvhsRJVFdvLpY5gDY,18128
20
- rrq/cli_commands/commands/monitor.py,sha256=hdXv6ZTNG5MWUydtrQxDbH4gTBB6VLriTHpsP1cWfqA,28809
21
- rrq/cli_commands/commands/queues.py,sha256=5UrsweVwwtMlG9TXG0zlgXTUhGPIb3x_zvQIV7m_-ig,19648
22
- rrq-0.7.0.dist-info/METADATA,sha256=T3BvtahQ28B6i33XxDSwy_RYOffrv3enRXiegbRq8XI,19847
23
- rrq-0.7.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
- rrq-0.7.0.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
25
- rrq-0.7.0.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
26
- rrq-0.7.0.dist-info/RECORD,,