rrq 0.4.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/client.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  import logging
4
4
  import uuid
5
- from datetime import UTC, datetime, timedelta
5
+ from datetime import timezone, datetime, timedelta
6
6
  from typing import Any, Optional
7
7
 
8
8
  from .job import Job, JobStatus
@@ -68,7 +68,7 @@ class RRQClient:
68
68
  Uses a Redis lock with `default_unique_job_lock_ttl_seconds`.
69
69
  _max_retries: Maximum number of retries for this specific job. Overrides `RRQSettings.default_max_retries`.
70
70
  _job_timeout_seconds: Timeout (in seconds) for this specific job. Overrides `RRQSettings.default_job_timeout_seconds`.
71
- _defer_until: A specific datetime (UTC recommended) when the job should become available for processing.
71
+ _defer_until: A specific datetime (timezone.utc recommended) when the job should become available for processing.
72
72
  _defer_by: A timedelta relative to now, specifying when the job should become available.
73
73
  _result_ttl_seconds: Time-to-live (in seconds) for the result of this specific job. Overrides `RRQSettings.default_result_ttl_seconds`.
74
74
  **kwargs: Keyword arguments to pass to the handler function.
@@ -79,40 +79,48 @@ class RRQClient:
79
79
  """
80
80
  # Determine job ID and enqueue timestamp
81
81
  job_id_to_use = _job_id or str(uuid.uuid4())
82
- enqueue_time_utc = datetime.now(UTC)
82
+ enqueue_time_utc = datetime.now(timezone.utc)
83
83
 
84
- # Compute unique lock TTL: cover deferral window if any
84
+ # Compute base desired run time and unique lock TTL to cover deferral
85
85
  lock_ttl_seconds = self.settings.default_unique_job_lock_ttl_seconds
86
- if _defer_by is not None:
87
- # Defer relative to now
88
- defer_secs = max(0, int(_defer_by.total_seconds()))
89
- lock_ttl_seconds = max(lock_ttl_seconds, defer_secs + 1)
90
- elif _defer_until is not None:
91
- # Defer until specific datetime
86
+ desired_run_time = enqueue_time_utc
87
+ if _defer_until is not None:
92
88
  dt = _defer_until
93
- # Normalize to UTC
94
89
  if dt.tzinfo is None:
95
- dt = dt.replace(tzinfo=UTC)
96
- elif dt.tzinfo != UTC:
97
- dt = dt.astimezone(UTC)
90
+ dt = dt.replace(tzinfo=timezone.utc)
91
+ elif dt.tzinfo != timezone.utc:
92
+ dt = dt.astimezone(timezone.utc)
93
+ desired_run_time = dt
98
94
  diff = (dt - enqueue_time_utc).total_seconds()
99
95
  if diff > 0:
100
96
  lock_ttl_seconds = max(lock_ttl_seconds, int(diff) + 1)
97
+ elif _defer_by is not None:
98
+ defer_secs = max(0, int(_defer_by.total_seconds()))
99
+ desired_run_time = enqueue_time_utc + timedelta(seconds=defer_secs)
100
+ lock_ttl_seconds = max(lock_ttl_seconds, defer_secs + 1)
101
101
 
102
102
  unique_acquired = False
103
- # Acquire unique lock if requested, with TTL covering defer window
103
+ # Handle unique key with deferral if locked
104
+ unique_acquired = False
104
105
  if _unique_key:
105
- lock_acquired = await self.job_store.acquire_unique_job_lock(
106
- unique_key=_unique_key,
107
- job_id=job_id_to_use,
108
- lock_ttl_seconds=lock_ttl_seconds,
109
- )
110
- if not lock_acquired:
111
- logger.info(
112
- f"Job with unique key '{_unique_key}' already active or recently run. Enqueue denied."
106
+ remaining_ttl = await self.job_store.get_lock_ttl(_unique_key)
107
+ if remaining_ttl > 0:
108
+ desired_run_time = max(
109
+ desired_run_time, enqueue_time_utc + timedelta(seconds=remaining_ttl)
110
+ )
111
+ else:
112
+ acquired = await self.job_store.acquire_unique_job_lock(
113
+ _unique_key, job_id_to_use, lock_ttl_seconds
113
114
  )
114
- return None
115
- unique_acquired = True
115
+ if acquired:
116
+ unique_acquired = True
117
+ else:
118
+ # Race: lock acquired after our check; defer by remaining TTL
119
+ remaining = await self.job_store.get_lock_ttl(_unique_key)
120
+ desired_run_time = max(
121
+ desired_run_time,
122
+ enqueue_time_utc + timedelta(seconds=max(0, int(remaining))),
123
+ )
116
124
 
117
125
  queue_name_to_use = _queue_name or self.settings.default_queue_name
118
126
 
@@ -146,18 +154,14 @@ class RRQClient:
146
154
 
147
155
  # Determine the score for the sorted set (queue)
148
156
  # Score is a millisecond timestamp for when the job should be processed.
149
- score_dt = enqueue_time_utc # Default to immediate processing
150
- if _defer_until:
151
- score_dt = _defer_until
152
- elif _defer_by:
153
- score_dt = enqueue_time_utc + _defer_by
157
+ score_dt = desired_run_time
154
158
 
155
- # Ensure score_dt is timezone-aware (UTC) if it's naive from user input
159
+ # Ensure score_dt is timezone-aware (timezone.utc) if it's naive from user input
156
160
  if score_dt.tzinfo is None:
157
- score_dt = score_dt.replace(tzinfo=UTC)
158
- elif score_dt.tzinfo != UTC:
159
- # Convert to UTC if it's aware but not UTC
160
- score_dt = score_dt.astimezone(UTC)
161
+ score_dt = score_dt.replace(tzinfo=timezone.utc)
162
+ elif score_dt.tzinfo != timezone.utc:
163
+ # Convert to timezone.utc if it's aware but not timezone.utc
164
+ score_dt = score_dt.astimezone(timezone.utc)
161
165
 
162
166
  score_timestamp_ms = int(score_dt.timestamp() * 1000)
163
167
  # Record when the job is next scheduled to run (for deferred execution)
rrq/constants.py CHANGED
@@ -26,6 +26,15 @@ RETRY_COUNTER_PREFIX: str = (
26
26
  "rrq:retry_count:" # Potentially, if not stored directly in job hash
27
27
  )
28
28
 
29
+ # Hybrid monitoring optimization keys
30
+ ACTIVE_QUEUES_SET: str = (
31
+ "rrq:active:queues" # ZSET: queue_name -> last_activity_timestamp
32
+ )
33
+ ACTIVE_WORKERS_SET: str = (
34
+ "rrq:active:workers" # ZSET: worker_id -> last_heartbeat_timestamp
35
+ )
36
+ MONITOR_EVENTS_STREAM: str = "rrq:monitor:events" # Stream for real-time changes
37
+
29
38
  # Default job settings (can be overridden by RRQSettings or per job)
30
39
  DEFAULT_MAX_RETRIES: int = 5
31
40
  DEFAULT_JOB_TIMEOUT_SECONDS: int = 300 # 5 minutes
@@ -41,3 +50,4 @@ DEFAULT_POLL_DELAY_SECONDS: float = 0.1
41
50
 
42
51
  # Default worker ID if not specified
43
52
  DEFAULT_WORKER_ID_PREFIX: str = "rrq_worker_"
53
+ CONNECTION_POOL_MAX_CONNECTIONS: int = 20
rrq/cron.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from datetime import UTC, datetime, timedelta
3
+ from datetime import timezone, datetime, timedelta
4
4
  from typing import Any, Optional, Sequence
5
5
 
6
6
  from pydantic import BaseModel, Field, PrivateAttr
@@ -42,22 +42,24 @@ def _parse_value(value: str, names: dict[str, int], min_val: int, max_val: int)
42
42
  return num
43
43
 
44
44
 
45
- def _parse_field(field: str, *, names: dict[str, int] | None, min_val: int, max_val: int) -> Sequence[int]:
45
+ def _parse_field(
46
+ field: str, *, names: dict[str, int] | None, min_val: int, max_val: int
47
+ ) -> Sequence[int]:
46
48
  names = names or {}
47
49
  if field == "*":
48
50
  return list(range(min_val, max_val + 1))
49
51
  values: set[int] = set()
50
- for part in field.split(','):
52
+ for part in field.split(","):
51
53
  step = 1
52
- if '/' in part:
53
- base, step_str = part.split('/', 1)
54
+ if "/" in part:
55
+ base, step_str = part.split("/", 1)
54
56
  step = int(step_str)
55
57
  else:
56
58
  base = part
57
59
  if base == "*":
58
60
  start, end = min_val, max_val
59
- elif '-' in base:
60
- a, b = base.split('-', 1)
61
+ elif "-" in base:
62
+ a, b = base.split("-", 1)
61
63
  start = _parse_value(a, names, min_val, max_val)
62
64
  end = _parse_value(b, names, min_val, max_val)
63
65
  else:
@@ -88,13 +90,62 @@ class CronSchedule:
88
90
 
89
91
  def next_after(self, dt: datetime) -> datetime:
90
92
  dt = dt.replace(second=0, microsecond=0) + timedelta(minutes=1)
91
- while True:
93
+
94
+ # Optimization: limit iterations to prevent infinite loops on edge cases
95
+ max_iterations = 400 * 24 * 60 # ~400 days worth of minutes
96
+ iterations = 0
97
+
98
+ while iterations < max_iterations:
99
+ iterations += 1
100
+
101
+ # Fast skip to next valid month if current month is invalid
92
102
  if dt.month not in self.months:
93
- dt += timedelta(minutes=1)
103
+ # Jump to the first day of the next valid month
104
+ next_month = (
105
+ min(m for m in self.months if m > dt.month)
106
+ if any(m > dt.month for m in self.months)
107
+ else min(self.months)
108
+ )
109
+ if next_month <= dt.month:
110
+ # Need to go to next year
111
+ dt = dt.replace(
112
+ year=dt.year + 1, month=next_month, day=1, hour=0, minute=0
113
+ )
114
+ else:
115
+ dt = dt.replace(month=next_month, day=1, hour=0, minute=0)
116
+ continue
117
+
118
+ # Fast skip to next valid hour if current hour is invalid
119
+ if dt.hour not in self.hours:
120
+ # Jump to the next valid hour
121
+ next_hour = (
122
+ min(h for h in self.hours if h > dt.hour)
123
+ if any(h > dt.hour for h in self.hours)
124
+ else min(self.hours)
125
+ )
126
+ if next_hour <= dt.hour:
127
+ # Need to go to next day
128
+ dt = (dt + timedelta(days=1)).replace(hour=next_hour, minute=0)
129
+ else:
130
+ dt = dt.replace(hour=next_hour, minute=0)
94
131
  continue
95
- if dt.hour not in self.hours or dt.minute not in self.minutes:
96
- dt += timedelta(minutes=1)
132
+
133
+ # Fast skip to next valid minute if current minute is invalid
134
+ if dt.minute not in self.minutes:
135
+ # Jump to the next valid minute
136
+ next_minute = (
137
+ min(m for m in self.minutes if m > dt.minute)
138
+ if any(m > dt.minute for m in self.minutes)
139
+ else min(self.minutes)
140
+ )
141
+ if next_minute <= dt.minute:
142
+ # Need to go to next hour
143
+ dt = (dt + timedelta(hours=1)).replace(minute=next_minute)
144
+ else:
145
+ dt = dt.replace(minute=next_minute)
97
146
  continue
147
+
148
+ # Check day constraints
98
149
  dom_match = dt.day in self.dom
99
150
  # Convert Python weekday (Monday=0) to cron weekday (Sunday=0)
100
151
  # Python: Mon=0, Tue=1, Wed=2, Thu=3, Fri=4, Sat=5, Sun=6
@@ -102,7 +153,7 @@ class CronSchedule:
102
153
  python_weekday = dt.weekday()
103
154
  cron_weekday = (python_weekday + 1) % 7
104
155
  dow_match = cron_weekday in self.dow
105
-
156
+
106
157
  if self.dom_all and self.dow_all:
107
158
  condition = True
108
159
  elif self.dom_all:
@@ -114,10 +165,19 @@ class CronSchedule:
114
165
  else:
115
166
  # Both constraints specified - use OR logic (standard cron behavior)
116
167
  condition = dom_match or dow_match
168
+
117
169
  if condition:
118
170
  return dt
119
- dt += timedelta(minutes=1)
120
171
 
172
+ # If day constraints don't match, skip to next day
173
+ dt = (dt + timedelta(days=1)).replace(
174
+ hour=min(self.hours), minute=min(self.minutes)
175
+ )
176
+
177
+ # If we've exceeded max iterations, fall back to a reasonable default
178
+ raise ValueError(
179
+ f"Could not find next execution time for cron schedule within {max_iterations} iterations"
180
+ )
121
181
 
122
182
 
123
183
  class CronJob(BaseModel):
@@ -141,13 +201,13 @@ class CronJob(BaseModel):
141
201
 
142
202
  def schedule_next(self, now: Optional[datetime] = None) -> None:
143
203
  """Compute the next run time strictly after *now*."""
144
- now = (now or datetime.now(UTC)).replace(second=0, microsecond=0)
204
+ now = (now or datetime.now(timezone.utc)).replace(second=0, microsecond=0)
145
205
  if self._cron is None:
146
206
  self._cron = CronSchedule(self.schedule)
147
207
  self.next_run_time = self._cron.next_after(now)
148
208
 
149
209
  def due(self, now: Optional[datetime] = None) -> bool:
150
- now = now or datetime.now(UTC)
210
+ now = now or datetime.now(timezone.utc)
151
211
  if self.next_run_time is None:
152
212
  self.schedule_next(now)
153
213
  return now >= (self.next_run_time or now)
rrq/hooks.py ADDED
@@ -0,0 +1,217 @@
1
+ """Lightweight hooks system for RRQ monitoring and integrations"""
2
+
3
+ import asyncio
4
+ import importlib
5
+ import logging
6
+ from abc import ABC, abstractmethod
7
+ from typing import Any, Callable, Dict, List
8
+
9
+ from .job import Job
10
+ from .settings import RRQSettings
11
+
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class RRQHook(ABC):
17
+ """Base class for RRQ hooks"""
18
+
19
+ def __init__(self, settings: RRQSettings):
20
+ self.settings = settings
21
+
22
+ async def on_job_enqueued(self, job: Job) -> None:
23
+ """Called when a job is enqueued"""
24
+ pass
25
+
26
+ async def on_job_started(self, job: Job, worker_id: str) -> None:
27
+ """Called when a job starts processing"""
28
+ pass
29
+
30
+ async def on_job_completed(self, job: Job, result: Any) -> None:
31
+ """Called when a job completes successfully"""
32
+ pass
33
+
34
+ async def on_job_failed(self, job: Job, error: Exception) -> None:
35
+ """Called when a job fails"""
36
+ pass
37
+
38
+ async def on_job_retrying(self, job: Job, attempt: int) -> None:
39
+ """Called when a job is being retried"""
40
+ pass
41
+
42
+ async def on_worker_started(self, worker_id: str, queues: List[str]) -> None:
43
+ """Called when a worker starts"""
44
+ pass
45
+
46
+ async def on_worker_stopped(self, worker_id: str) -> None:
47
+ """Called when a worker stops"""
48
+ pass
49
+
50
+ async def on_worker_heartbeat(self, worker_id: str, health_data: Dict) -> None:
51
+ """Called on worker heartbeat"""
52
+ pass
53
+
54
+
55
+ class MetricsExporter(ABC):
56
+ """Base class for metrics exporters"""
57
+
58
+ @abstractmethod
59
+ async def export_counter(
60
+ self, name: str, value: float, labels: Dict[str, str] = None
61
+ ) -> None:
62
+ """Export a counter metric"""
63
+ pass
64
+
65
+ @abstractmethod
66
+ async def export_gauge(
67
+ self, name: str, value: float, labels: Dict[str, str] = None
68
+ ) -> None:
69
+ """Export a gauge metric"""
70
+ pass
71
+
72
+ @abstractmethod
73
+ async def export_histogram(
74
+ self, name: str, value: float, labels: Dict[str, str] = None
75
+ ) -> None:
76
+ """Export a histogram metric"""
77
+ pass
78
+
79
+
80
+ class HookManager:
81
+ """Manages hooks and exporters for RRQ"""
82
+
83
+ def __init__(self, settings: RRQSettings):
84
+ self.settings = settings
85
+ self.hooks: List[RRQHook] = []
86
+ self.exporters: Dict[str, MetricsExporter] = {}
87
+ self._initialized = False
88
+
89
+ async def initialize(self):
90
+ """Initialize hooks and exporters from settings"""
91
+ if self._initialized:
92
+ return
93
+
94
+ # Load event handlers
95
+ if hasattr(self.settings, "event_handlers"):
96
+ for handler_path in self.settings.event_handlers:
97
+ try:
98
+ hook = self._load_hook(handler_path)
99
+ self.hooks.append(hook)
100
+ logger.info(f"Loaded hook: {handler_path}")
101
+ except Exception as e:
102
+ logger.error(f"Failed to load hook {handler_path}: {e}")
103
+
104
+ # Load metrics exporter
105
+ if hasattr(self.settings, "metrics_exporter"):
106
+ exporter_type = self.settings.metrics_exporter
107
+ if exporter_type:
108
+ try:
109
+ exporter = self._load_exporter(exporter_type)
110
+ self.exporters[exporter_type] = exporter
111
+ logger.info(f"Loaded metrics exporter: {exporter_type}")
112
+ except Exception as e:
113
+ logger.error(f"Failed to load exporter {exporter_type}: {e}")
114
+
115
+ self._initialized = True
116
+
117
+ def _load_hook(self, handler_path: str) -> RRQHook:
118
+ """Load a hook from a module path"""
119
+ module_path, class_name = handler_path.rsplit(".", 1)
120
+ module = importlib.import_module(module_path)
121
+ hook_class = getattr(module, class_name)
122
+
123
+ if not issubclass(hook_class, RRQHook):
124
+ raise ValueError(f"{handler_path} is not a subclass of RRQHook")
125
+
126
+ return hook_class(self.settings)
127
+
128
+ def _load_exporter(self, exporter_type: str) -> MetricsExporter:
129
+ """Load a metrics exporter"""
130
+ # Built-in exporters
131
+ if exporter_type == "prometheus":
132
+ from .exporters.prometheus import PrometheusExporter
133
+
134
+ return PrometheusExporter(self.settings)
135
+ elif exporter_type == "statsd":
136
+ from .exporters.statsd import StatsdExporter
137
+
138
+ return StatsdExporter(self.settings)
139
+ else:
140
+ # Try to load as custom exporter
141
+ return self._load_hook(exporter_type)
142
+
143
+ async def trigger_event(self, event_name: str, *args, **kwargs):
144
+ """Trigger an event on all hooks"""
145
+ if not self._initialized:
146
+ await self.initialize()
147
+
148
+ # Run hooks concurrently but catch exceptions
149
+ tasks = []
150
+ for hook in self.hooks:
151
+ if hasattr(hook, event_name):
152
+ method = getattr(hook, event_name)
153
+ task = asyncio.create_task(self._safe_call(method, *args, **kwargs))
154
+ tasks.append(task)
155
+
156
+ if tasks:
157
+ await asyncio.gather(*tasks, return_exceptions=True)
158
+
159
+ async def _safe_call(self, method: Callable, *args, **kwargs):
160
+ """Safely call a hook method"""
161
+ try:
162
+ await method(*args, **kwargs)
163
+ except Exception as e:
164
+ logger.error(f"Error in hook {method.__qualname__}: {e}")
165
+
166
+ async def export_metric(
167
+ self, metric_type: str, name: str, value: float, labels: Dict[str, str] = None
168
+ ):
169
+ """Export a metric to all configured exporters"""
170
+ if not self._initialized:
171
+ await self.initialize()
172
+
173
+ for exporter in self.exporters.values():
174
+ try:
175
+ if metric_type == "counter":
176
+ await exporter.export_counter(name, value, labels)
177
+ elif metric_type == "gauge":
178
+ await exporter.export_gauge(name, value, labels)
179
+ elif metric_type == "histogram":
180
+ await exporter.export_histogram(name, value, labels)
181
+ except Exception as e:
182
+ logger.error(f"Error exporting metric {name}: {e}")
183
+
184
+ async def close(self):
185
+ """Close all exporters"""
186
+ for exporter in self.exporters.values():
187
+ if hasattr(exporter, "close"):
188
+ try:
189
+ await exporter.close()
190
+ except Exception as e:
191
+ logger.error(f"Error closing exporter: {e}")
192
+
193
+
194
+ # Example hook implementation
195
+ class LoggingHook(RRQHook):
196
+ """Example hook that logs all events"""
197
+
198
+ async def on_job_enqueued(self, job: Job) -> None:
199
+ logger.info(f"Job enqueued: {job.id} - {job.function_name}")
200
+
201
+ async def on_job_started(self, job: Job, worker_id: str) -> None:
202
+ logger.info(f"Job started: {job.id} on worker {worker_id}")
203
+
204
+ async def on_job_completed(self, job: Job, result: Any) -> None:
205
+ logger.info(f"Job completed: {job.id}")
206
+
207
+ async def on_job_failed(self, job: Job, error: Exception) -> None:
208
+ logger.error(f"Job failed: {job.id} - {error}")
209
+
210
+ async def on_job_retrying(self, job: Job, attempt: int) -> None:
211
+ logger.warning(f"Job retrying: {job.id} - attempt {attempt}")
212
+
213
+ async def on_worker_started(self, worker_id: str, queues: List[str]) -> None:
214
+ logger.info(f"Worker started: {worker_id} on queues {queues}")
215
+
216
+ async def on_worker_stopped(self, worker_id: str) -> None:
217
+ logger.info(f"Worker stopped: {worker_id}")
rrq/job.py CHANGED
@@ -3,7 +3,7 @@ including the Job model and JobStatus enumeration.
3
3
  """
4
4
 
5
5
  import uuid
6
- from datetime import UTC, datetime
6
+ from datetime import timezone, datetime
7
7
  from enum import Enum
8
8
  from typing import Any, Optional
9
9
 
@@ -50,8 +50,8 @@ class Job(BaseModel):
50
50
  )
51
51
 
52
52
  enqueue_time: datetime = Field(
53
- default_factory=lambda: datetime.now(UTC),
54
- description="Timestamp (UTC) when the job was initially enqueued.",
53
+ default_factory=lambda: datetime.now(timezone.utc),
54
+ description="Timestamp (timezone.utc) when the job was initially enqueued.",
55
55
  )
56
56
 
57
57
  status: JobStatus = Field(
@@ -62,7 +62,7 @@ class Job(BaseModel):
62
62
  )
63
63
  next_scheduled_run_time: Optional[datetime] = Field(
64
64
  default=None,
65
- description="Timestamp (UTC) when the job is next scheduled to run (for retries/deferrals).",
65
+ description="Timestamp (timezone.utc) when the job is next scheduled to run (for retries/deferrals).",
66
66
  )
67
67
 
68
68
  # Execution control parameters, can be overridden from worker defaults.
@@ -86,7 +86,7 @@ class Job(BaseModel):
86
86
  # Fields populated upon job completion or failure.
87
87
  completion_time: Optional[datetime] = Field(
88
88
  default=None,
89
- description="Timestamp (UTC) when the job finished (completed or failed permanently).",
89
+ description="Timestamp (timezone.utc) when the job finished (completed or failed permanently).",
90
90
  )
91
91
  result: Optional[Any] = Field(
92
92
  default=None,
rrq/registry.py CHANGED
@@ -69,6 +69,3 @@ class JobRegistry:
69
69
  def clear(self) -> None:
70
70
  """Clears all registered handlers from the registry."""
71
71
  self._handlers.clear()
72
-
73
-
74
- # Global instance for convenience, though applications might manage their own.
rrq/settings.py CHANGED
@@ -5,7 +5,7 @@ Settings can be loaded from environment variables (with a prefix of `RRQ_`) or
5
5
  from a .env file. Sensible defaults are provided for most settings.
6
6
  """
7
7
 
8
- from typing import Awaitable, Callable, Optional
8
+ from typing import Awaitable, Callable, List, Optional
9
9
 
10
10
  from pydantic import Field
11
11
  from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -102,6 +102,18 @@ class RRQSettings(BaseSettings):
102
102
  default_factory=list,
103
103
  description="Optional list of cron job specifications to run periodically.",
104
104
  )
105
+ event_handlers: List[str] = Field(
106
+ default_factory=list,
107
+ description="List of module paths to event handler classes that implement RRQHook.",
108
+ )
109
+ expected_job_ttl: int = Field(
110
+ default=30,
111
+ description="Expected job processing time buffer for locks (in seconds)."
112
+ )
113
+ metrics_exporter: Optional[str] = Field(
114
+ default=None,
115
+ description="Metrics exporter type ('prometheus', 'statsd') or module path to custom exporter.",
116
+ )
105
117
  model_config = SettingsConfigDict(
106
118
  env_prefix="RRQ_",
107
119
  extra="ignore",