rrq 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq/cli.py +5 -3
- rrq/cli_commands/base.py +4 -1
- rrq/cli_commands/commands/debug.py +2 -2
- rrq/cli_commands/commands/monitor.py +92 -60
- rrq/cli_commands/commands/queues.py +2 -2
- rrq/cli_commands/utils.py +5 -4
- rrq/client.py +110 -100
- rrq/exporters/__init__.py +1 -0
- rrq/exporters/prometheus.py +90 -0
- rrq/exporters/statsd.py +60 -0
- rrq/hooks.py +80 -47
- rrq/integrations/__init__.py +1 -0
- rrq/integrations/ddtrace.py +456 -0
- rrq/integrations/logfire.py +23 -0
- rrq/integrations/otel.py +325 -0
- rrq/job.py +6 -0
- rrq/settings.py +2 -2
- rrq/store.py +49 -6
- rrq/telemetry.py +129 -0
- rrq/worker.py +259 -94
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/METADATA +47 -8
- rrq-0.8.0.dist-info/RECORD +34 -0
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/WHEEL +1 -1
- rrq-0.7.0.dist-info/RECORD +0 -26
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/entry_points.txt +0 -0
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/licenses/LICENSE +0 -0
rrq/client.py
CHANGED
|
@@ -8,6 +8,7 @@ from typing import Any, Optional
|
|
|
8
8
|
from .job import Job, JobStatus
|
|
9
9
|
from .settings import RRQSettings
|
|
10
10
|
from .store import JobStore
|
|
11
|
+
from .telemetry import get_telemetry
|
|
11
12
|
|
|
12
13
|
logger = logging.getLogger(__name__)
|
|
13
14
|
|
|
@@ -77,110 +78,119 @@ class RRQClient:
|
|
|
77
78
|
The created Job object if successfully enqueued, or None if enqueueing was denied
|
|
78
79
|
(e.g., due to a unique key conflict).
|
|
79
80
|
"""
|
|
80
|
-
# Determine job ID and
|
|
81
|
+
# Determine job ID and queue name early for telemetry.
|
|
81
82
|
job_id_to_use = _job_id or str(uuid.uuid4())
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
desired_run_time =
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
)
|
|
115
|
-
if
|
|
116
|
-
unique_acquired = True
|
|
117
|
-
else:
|
|
118
|
-
# Race: lock acquired after our check; defer by remaining TTL
|
|
119
|
-
remaining = await self.job_store.get_lock_ttl(_unique_key)
|
|
83
|
+
queue_name_to_use = _queue_name or self.settings.default_queue_name
|
|
84
|
+
|
|
85
|
+
telemetry = get_telemetry()
|
|
86
|
+
with telemetry.enqueue_span(
|
|
87
|
+
job_id=job_id_to_use,
|
|
88
|
+
function_name=function_name,
|
|
89
|
+
queue_name=queue_name_to_use,
|
|
90
|
+
) as trace_context:
|
|
91
|
+
# Determine enqueue timestamp (after telemetry span starts).
|
|
92
|
+
enqueue_time_utc = datetime.now(timezone.utc)
|
|
93
|
+
|
|
94
|
+
# Compute base desired run time and unique lock TTL to cover deferral
|
|
95
|
+
lock_ttl_seconds = self.settings.default_unique_job_lock_ttl_seconds
|
|
96
|
+
desired_run_time = enqueue_time_utc
|
|
97
|
+
if _defer_until is not None:
|
|
98
|
+
dt = _defer_until
|
|
99
|
+
if dt.tzinfo is None:
|
|
100
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
101
|
+
elif dt.tzinfo != timezone.utc:
|
|
102
|
+
dt = dt.astimezone(timezone.utc)
|
|
103
|
+
desired_run_time = dt
|
|
104
|
+
diff = (dt - enqueue_time_utc).total_seconds()
|
|
105
|
+
if diff > 0:
|
|
106
|
+
lock_ttl_seconds = max(lock_ttl_seconds, int(diff) + 1)
|
|
107
|
+
elif _defer_by is not None:
|
|
108
|
+
defer_secs = max(0, int(_defer_by.total_seconds()))
|
|
109
|
+
desired_run_time = enqueue_time_utc + timedelta(seconds=defer_secs)
|
|
110
|
+
lock_ttl_seconds = max(lock_ttl_seconds, defer_secs + 1)
|
|
111
|
+
|
|
112
|
+
# Handle unique key with deferral if locked
|
|
113
|
+
unique_acquired = False
|
|
114
|
+
if _unique_key:
|
|
115
|
+
remaining_ttl = await self.job_store.get_lock_ttl(_unique_key)
|
|
116
|
+
if remaining_ttl > 0:
|
|
120
117
|
desired_run_time = max(
|
|
121
118
|
desired_run_time,
|
|
122
|
-
enqueue_time_utc + timedelta(seconds=
|
|
119
|
+
enqueue_time_utc + timedelta(seconds=remaining_ttl),
|
|
123
120
|
)
|
|
121
|
+
else:
|
|
122
|
+
acquired = await self.job_store.acquire_unique_job_lock(
|
|
123
|
+
_unique_key, job_id_to_use, lock_ttl_seconds
|
|
124
|
+
)
|
|
125
|
+
if acquired:
|
|
126
|
+
unique_acquired = True
|
|
127
|
+
else:
|
|
128
|
+
# Race: lock acquired after our check; defer by remaining TTL
|
|
129
|
+
remaining = await self.job_store.get_lock_ttl(_unique_key)
|
|
130
|
+
desired_run_time = max(
|
|
131
|
+
desired_run_time,
|
|
132
|
+
enqueue_time_utc
|
|
133
|
+
+ timedelta(seconds=max(0, int(remaining))),
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Create the Job instance with all provided details and defaults
|
|
137
|
+
job = Job(
|
|
138
|
+
id=job_id_to_use,
|
|
139
|
+
function_name=function_name,
|
|
140
|
+
job_args=list(args),
|
|
141
|
+
job_kwargs=kwargs,
|
|
142
|
+
enqueue_time=enqueue_time_utc,
|
|
143
|
+
status=JobStatus.PENDING,
|
|
144
|
+
current_retries=0,
|
|
145
|
+
max_retries=(
|
|
146
|
+
_max_retries
|
|
147
|
+
if _max_retries is not None
|
|
148
|
+
else self.settings.default_max_retries
|
|
149
|
+
),
|
|
150
|
+
job_timeout_seconds=(
|
|
151
|
+
_job_timeout_seconds
|
|
152
|
+
if _job_timeout_seconds is not None
|
|
153
|
+
else self.settings.default_job_timeout_seconds
|
|
154
|
+
),
|
|
155
|
+
result_ttl_seconds=(
|
|
156
|
+
_result_ttl_seconds
|
|
157
|
+
if _result_ttl_seconds is not None
|
|
158
|
+
else self.settings.default_result_ttl_seconds
|
|
159
|
+
),
|
|
160
|
+
job_unique_key=_unique_key,
|
|
161
|
+
queue_name=queue_name_to_use, # Store the target queue name
|
|
162
|
+
trace_context=trace_context,
|
|
163
|
+
)
|
|
124
164
|
|
|
125
|
-
|
|
165
|
+
# Determine the score for the sorted set (queue)
|
|
166
|
+
# Score is a millisecond timestamp for when the job should be processed.
|
|
167
|
+
score_dt = desired_run_time
|
|
168
|
+
|
|
169
|
+
# Ensure score_dt is timezone-aware (timezone.utc) if it's naive from user input
|
|
170
|
+
if score_dt.tzinfo is None:
|
|
171
|
+
score_dt = score_dt.replace(tzinfo=timezone.utc)
|
|
172
|
+
elif score_dt.tzinfo != timezone.utc:
|
|
173
|
+
# Convert to timezone.utc if it's aware but not timezone.utc
|
|
174
|
+
score_dt = score_dt.astimezone(timezone.utc)
|
|
175
|
+
|
|
176
|
+
score_timestamp_ms = int(score_dt.timestamp() * 1000)
|
|
177
|
+
# Record when the job is next scheduled to run (for deferred execution)
|
|
178
|
+
job.next_scheduled_run_time = score_dt
|
|
179
|
+
|
|
180
|
+
# Save the full job definition and add to queue (ensure unique lock is released on error)
|
|
181
|
+
try:
|
|
182
|
+
await self.job_store.save_job_definition(job)
|
|
183
|
+
await self.job_store.add_job_to_queue(
|
|
184
|
+
queue_name_to_use,
|
|
185
|
+
job.id,
|
|
186
|
+
float(score_timestamp_ms),
|
|
187
|
+
)
|
|
188
|
+
except Exception:
|
|
189
|
+
if unique_acquired and _unique_key is not None:
|
|
190
|
+
await self.job_store.release_unique_job_lock(_unique_key)
|
|
191
|
+
raise
|
|
126
192
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
id=job_id_to_use,
|
|
130
|
-
function_name=function_name,
|
|
131
|
-
job_args=list(args),
|
|
132
|
-
job_kwargs=kwargs,
|
|
133
|
-
enqueue_time=enqueue_time_utc,
|
|
134
|
-
status=JobStatus.PENDING,
|
|
135
|
-
current_retries=0,
|
|
136
|
-
max_retries=(
|
|
137
|
-
_max_retries
|
|
138
|
-
if _max_retries is not None
|
|
139
|
-
else self.settings.default_max_retries
|
|
140
|
-
),
|
|
141
|
-
job_timeout_seconds=(
|
|
142
|
-
_job_timeout_seconds
|
|
143
|
-
if _job_timeout_seconds is not None
|
|
144
|
-
else self.settings.default_job_timeout_seconds
|
|
145
|
-
),
|
|
146
|
-
result_ttl_seconds=(
|
|
147
|
-
_result_ttl_seconds
|
|
148
|
-
if _result_ttl_seconds is not None
|
|
149
|
-
else self.settings.default_result_ttl_seconds
|
|
150
|
-
),
|
|
151
|
-
job_unique_key=_unique_key,
|
|
152
|
-
queue_name=queue_name_to_use, # Store the target queue name
|
|
153
|
-
)
|
|
154
|
-
|
|
155
|
-
# Determine the score for the sorted set (queue)
|
|
156
|
-
# Score is a millisecond timestamp for when the job should be processed.
|
|
157
|
-
score_dt = desired_run_time
|
|
158
|
-
|
|
159
|
-
# Ensure score_dt is timezone-aware (timezone.utc) if it's naive from user input
|
|
160
|
-
if score_dt.tzinfo is None:
|
|
161
|
-
score_dt = score_dt.replace(tzinfo=timezone.utc)
|
|
162
|
-
elif score_dt.tzinfo != timezone.utc:
|
|
163
|
-
# Convert to timezone.utc if it's aware but not timezone.utc
|
|
164
|
-
score_dt = score_dt.astimezone(timezone.utc)
|
|
165
|
-
|
|
166
|
-
score_timestamp_ms = int(score_dt.timestamp() * 1000)
|
|
167
|
-
# Record when the job is next scheduled to run (for deferred execution)
|
|
168
|
-
job.next_scheduled_run_time = score_dt
|
|
169
|
-
|
|
170
|
-
# Save the full job definition and add to queue (ensure unique lock is released on error)
|
|
171
|
-
try:
|
|
172
|
-
await self.job_store.save_job_definition(job)
|
|
173
|
-
await self.job_store.add_job_to_queue(
|
|
174
|
-
queue_name_to_use,
|
|
175
|
-
job.id,
|
|
176
|
-
float(score_timestamp_ms),
|
|
193
|
+
logger.debug(
|
|
194
|
+
f"Enqueued job {job.id} ('{job.function_name}') to queue '{queue_name_to_use}' with score {score_timestamp_ms}"
|
|
177
195
|
)
|
|
178
|
-
|
|
179
|
-
if unique_acquired:
|
|
180
|
-
await self.job_store.release_unique_job_lock(_unique_key)
|
|
181
|
-
raise
|
|
182
|
-
|
|
183
|
-
logger.debug(
|
|
184
|
-
f"Enqueued job {job.id} ('{job.function_name}') to queue '{queue_name_to_use}' with score {score_timestamp_ms}"
|
|
185
|
-
)
|
|
186
|
-
return job
|
|
196
|
+
return job
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Built-in metrics exporters for RRQ hooks."""
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Prometheus metrics exporter for RRQ hooks.
|
|
2
|
+
|
|
3
|
+
This exporter is optional and requires `prometheus_client` to be installed.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from ..hooks import MetricsExporter
|
|
11
|
+
from ..settings import RRQSettings
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PrometheusExporter(MetricsExporter):
|
|
15
|
+
"""Exports RRQ metrics via `prometheus_client`."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, settings: RRQSettings):
|
|
18
|
+
super().__init__(settings)
|
|
19
|
+
try:
|
|
20
|
+
from prometheus_client import Counter, Gauge, Histogram # type: ignore[import-not-found]
|
|
21
|
+
except ImportError as e: # pragma: no cover
|
|
22
|
+
raise ImportError(
|
|
23
|
+
"Prometheus exporter requires `prometheus_client` to be installed."
|
|
24
|
+
) from e
|
|
25
|
+
|
|
26
|
+
self._counter_cls = Counter
|
|
27
|
+
self._gauge_cls = Gauge
|
|
28
|
+
self._histogram_cls = Histogram
|
|
29
|
+
|
|
30
|
+
self._counters: dict[tuple[str, tuple[str, ...]], Any] = {}
|
|
31
|
+
self._gauges: dict[tuple[str, tuple[str, ...]], Any] = {}
|
|
32
|
+
self._histograms: dict[tuple[str, tuple[str, ...]], Any] = {}
|
|
33
|
+
|
|
34
|
+
def _get_metric(
|
|
35
|
+
self,
|
|
36
|
+
store: dict[tuple[str, tuple[str, ...]], Any],
|
|
37
|
+
metric_cls: Any,
|
|
38
|
+
name: str,
|
|
39
|
+
labelnames: tuple[str, ...],
|
|
40
|
+
) -> Any:
|
|
41
|
+
key = (name, labelnames)
|
|
42
|
+
metric = store.get(key)
|
|
43
|
+
if metric is not None:
|
|
44
|
+
return metric
|
|
45
|
+
|
|
46
|
+
description = name
|
|
47
|
+
if labelnames:
|
|
48
|
+
metric = metric_cls(name, description, labelnames=labelnames)
|
|
49
|
+
else:
|
|
50
|
+
metric = metric_cls(name, description)
|
|
51
|
+
store[key] = metric
|
|
52
|
+
return metric
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def _sorted_labelnames(labels: dict[str, str] | None) -> tuple[str, ...]:
|
|
56
|
+
if not labels:
|
|
57
|
+
return ()
|
|
58
|
+
return tuple(sorted(labels.keys()))
|
|
59
|
+
|
|
60
|
+
async def export_counter(
|
|
61
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
62
|
+
) -> None:
|
|
63
|
+
labelnames = self._sorted_labelnames(labels)
|
|
64
|
+
counter = self._get_metric(self._counters, self._counter_cls, name, labelnames)
|
|
65
|
+
if labelnames and labels:
|
|
66
|
+
counter.labels(**{k: labels[k] for k in labelnames}).inc(value)
|
|
67
|
+
else:
|
|
68
|
+
counter.inc(value)
|
|
69
|
+
|
|
70
|
+
async def export_gauge(
|
|
71
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
72
|
+
) -> None:
|
|
73
|
+
labelnames = self._sorted_labelnames(labels)
|
|
74
|
+
gauge = self._get_metric(self._gauges, self._gauge_cls, name, labelnames)
|
|
75
|
+
if labelnames and labels:
|
|
76
|
+
gauge.labels(**{k: labels[k] for k in labelnames}).set(value)
|
|
77
|
+
else:
|
|
78
|
+
gauge.set(value)
|
|
79
|
+
|
|
80
|
+
async def export_histogram(
|
|
81
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
82
|
+
) -> None:
|
|
83
|
+
labelnames = self._sorted_labelnames(labels)
|
|
84
|
+
histogram = self._get_metric(
|
|
85
|
+
self._histograms, self._histogram_cls, name, labelnames
|
|
86
|
+
)
|
|
87
|
+
if labelnames and labels:
|
|
88
|
+
histogram.labels(**{k: labels[k] for k in labelnames}).observe(value)
|
|
89
|
+
else:
|
|
90
|
+
histogram.observe(value)
|
rrq/exporters/statsd.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""StatsD metrics exporter for RRQ hooks.
|
|
2
|
+
|
|
3
|
+
This exporter is optional and requires `statsd` to be installed.
|
|
4
|
+
|
|
5
|
+
Labels are currently ignored because vanilla StatsD does not support tags.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
from ..hooks import MetricsExporter
|
|
13
|
+
from ..settings import RRQSettings
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class StatsdExporter(MetricsExporter):
|
|
17
|
+
"""Exports RRQ metrics via `statsd.StatsClient`."""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
settings: RRQSettings,
|
|
22
|
+
*,
|
|
23
|
+
host: str | None = None,
|
|
24
|
+
port: int | None = None,
|
|
25
|
+
prefix: str | None = None,
|
|
26
|
+
):
|
|
27
|
+
super().__init__(settings)
|
|
28
|
+
try:
|
|
29
|
+
from statsd import StatsClient # type: ignore[import-not-found]
|
|
30
|
+
except ImportError as e: # pragma: no cover
|
|
31
|
+
raise ImportError(
|
|
32
|
+
"StatsD exporter requires `statsd` to be installed."
|
|
33
|
+
) from e
|
|
34
|
+
|
|
35
|
+
resolved_host = host or os.getenv("RRQ_STATSD_HOST", "localhost")
|
|
36
|
+
resolved_port = port or int(os.getenv("RRQ_STATSD_PORT", "8125"))
|
|
37
|
+
resolved_prefix = prefix or os.getenv("RRQ_STATSD_PREFIX", "rrq")
|
|
38
|
+
|
|
39
|
+
self._client = StatsClient(
|
|
40
|
+
host=resolved_host, port=resolved_port, prefix=resolved_prefix
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
async def export_counter(
|
|
44
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
45
|
+
) -> None:
|
|
46
|
+
_ = labels
|
|
47
|
+
self._client.incr(name, int(value))
|
|
48
|
+
|
|
49
|
+
async def export_gauge(
|
|
50
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
51
|
+
) -> None:
|
|
52
|
+
_ = labels
|
|
53
|
+
self._client.gauge(name, value)
|
|
54
|
+
|
|
55
|
+
async def export_histogram(
|
|
56
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
57
|
+
) -> None:
|
|
58
|
+
_ = labels
|
|
59
|
+
# StatsD doesn't have a standard histogram type; use timing in milliseconds.
|
|
60
|
+
self._client.timing(name, int(value * 1000))
|
rrq/hooks.py
CHANGED
|
@@ -4,7 +4,8 @@ import asyncio
|
|
|
4
4
|
import importlib
|
|
5
5
|
import logging
|
|
6
6
|
from abc import ABC, abstractmethod
|
|
7
|
-
from
|
|
7
|
+
from collections.abc import Awaitable, Callable
|
|
8
|
+
from typing import Any, cast
|
|
8
9
|
|
|
9
10
|
from .job import Job
|
|
10
11
|
from .settings import RRQSettings
|
|
@@ -39,7 +40,7 @@ class RRQHook(ABC):
|
|
|
39
40
|
"""Called when a job is being retried"""
|
|
40
41
|
pass
|
|
41
42
|
|
|
42
|
-
async def on_worker_started(self, worker_id: str, queues:
|
|
43
|
+
async def on_worker_started(self, worker_id: str, queues: list[str]) -> None:
|
|
43
44
|
"""Called when a worker starts"""
|
|
44
45
|
pass
|
|
45
46
|
|
|
@@ -47,7 +48,9 @@ class RRQHook(ABC):
|
|
|
47
48
|
"""Called when a worker stops"""
|
|
48
49
|
pass
|
|
49
50
|
|
|
50
|
-
async def on_worker_heartbeat(
|
|
51
|
+
async def on_worker_heartbeat(
|
|
52
|
+
self, worker_id: str, health_data: dict[str, Any]
|
|
53
|
+
) -> None:
|
|
51
54
|
"""Called on worker heartbeat"""
|
|
52
55
|
pass
|
|
53
56
|
|
|
@@ -55,62 +58,67 @@ class RRQHook(ABC):
|
|
|
55
58
|
class MetricsExporter(ABC):
|
|
56
59
|
"""Base class for metrics exporters"""
|
|
57
60
|
|
|
61
|
+
def __init__(self, settings: RRQSettings):
|
|
62
|
+
self.settings = settings
|
|
63
|
+
|
|
58
64
|
@abstractmethod
|
|
59
65
|
async def export_counter(
|
|
60
|
-
self, name: str, value: float, labels:
|
|
66
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
61
67
|
) -> None:
|
|
62
68
|
"""Export a counter metric"""
|
|
63
69
|
pass
|
|
64
70
|
|
|
65
71
|
@abstractmethod
|
|
66
72
|
async def export_gauge(
|
|
67
|
-
self, name: str, value: float, labels:
|
|
73
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
68
74
|
) -> None:
|
|
69
75
|
"""Export a gauge metric"""
|
|
70
76
|
pass
|
|
71
77
|
|
|
72
78
|
@abstractmethod
|
|
73
79
|
async def export_histogram(
|
|
74
|
-
self, name: str, value: float, labels:
|
|
80
|
+
self, name: str, value: float, labels: dict[str, str] | None = None
|
|
75
81
|
) -> None:
|
|
76
82
|
"""Export a histogram metric"""
|
|
77
83
|
pass
|
|
78
84
|
|
|
85
|
+
async def close(self) -> None:
|
|
86
|
+
"""Close any exporter resources."""
|
|
87
|
+
return None
|
|
88
|
+
|
|
79
89
|
|
|
80
90
|
class HookManager:
|
|
81
91
|
"""Manages hooks and exporters for RRQ"""
|
|
82
92
|
|
|
83
93
|
def __init__(self, settings: RRQSettings):
|
|
84
94
|
self.settings = settings
|
|
85
|
-
self.hooks:
|
|
86
|
-
self.exporters:
|
|
95
|
+
self.hooks: list[RRQHook] = []
|
|
96
|
+
self.exporters: dict[str, MetricsExporter] = {}
|
|
87
97
|
self._initialized = False
|
|
88
98
|
|
|
89
|
-
async def initialize(self):
|
|
99
|
+
async def initialize(self) -> None:
|
|
90
100
|
"""Initialize hooks and exporters from settings"""
|
|
91
101
|
if self._initialized:
|
|
92
102
|
return
|
|
93
103
|
|
|
94
104
|
# Load event handlers
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
logger.error(f"Failed to load hook {handler_path}: {e}")
|
|
105
|
+
for handler_path in self.settings.event_handlers:
|
|
106
|
+
try:
|
|
107
|
+
hook = self._load_hook(handler_path)
|
|
108
|
+
self.hooks.append(hook)
|
|
109
|
+
logger.info(f"Loaded hook: {handler_path}")
|
|
110
|
+
except Exception as e:
|
|
111
|
+
logger.error(f"Failed to load hook {handler_path}: {e}")
|
|
103
112
|
|
|
104
113
|
# Load metrics exporter
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
logger.error(f"Failed to load exporter {exporter_type}: {e}")
|
|
114
|
+
exporter_type = self.settings.metrics_exporter
|
|
115
|
+
if exporter_type is not None:
|
|
116
|
+
try:
|
|
117
|
+
exporter = self._load_exporter(exporter_type)
|
|
118
|
+
self.exporters[exporter_type] = exporter
|
|
119
|
+
logger.info(f"Loaded metrics exporter: {exporter_type}")
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logger.error(f"Failed to load exporter {exporter_type}: {e}")
|
|
114
122
|
|
|
115
123
|
self._initialized = True
|
|
116
124
|
|
|
@@ -120,7 +128,7 @@ class HookManager:
|
|
|
120
128
|
module = importlib.import_module(module_path)
|
|
121
129
|
hook_class = getattr(module, class_name)
|
|
122
130
|
|
|
123
|
-
if not issubclass(hook_class, RRQHook):
|
|
131
|
+
if not isinstance(hook_class, type) or not issubclass(hook_class, RRQHook):
|
|
124
132
|
raise ValueError(f"{handler_path} is not a subclass of RRQHook")
|
|
125
133
|
|
|
126
134
|
return hook_class(self.settings)
|
|
@@ -136,36 +144,62 @@ class HookManager:
|
|
|
136
144
|
from .exporters.statsd import StatsdExporter
|
|
137
145
|
|
|
138
146
|
return StatsdExporter(self.settings)
|
|
139
|
-
|
|
140
|
-
# Try to load as custom exporter
|
|
141
|
-
return self._load_hook(exporter_type)
|
|
147
|
+
return self._load_custom_exporter(exporter_type)
|
|
142
148
|
|
|
143
|
-
|
|
149
|
+
def _load_custom_exporter(self, exporter_path: str) -> MetricsExporter:
|
|
150
|
+
"""Load a metrics exporter from a module path."""
|
|
151
|
+
module_path, class_name = exporter_path.rsplit(".", 1)
|
|
152
|
+
module = importlib.import_module(module_path)
|
|
153
|
+
exporter_class = getattr(module, class_name)
|
|
154
|
+
|
|
155
|
+
if not isinstance(exporter_class, type) or not issubclass(
|
|
156
|
+
exporter_class, MetricsExporter
|
|
157
|
+
):
|
|
158
|
+
raise ValueError(f"{exporter_path} is not a subclass of MetricsExporter")
|
|
159
|
+
|
|
160
|
+
return exporter_class(self.settings)
|
|
161
|
+
|
|
162
|
+
async def trigger_event(self, event_name: str, *args: Any, **kwargs: Any) -> None:
|
|
144
163
|
"""Trigger an event on all hooks"""
|
|
145
164
|
if not self._initialized:
|
|
146
165
|
await self.initialize()
|
|
147
166
|
|
|
148
167
|
# Run hooks concurrently but catch exceptions
|
|
149
|
-
tasks = []
|
|
168
|
+
tasks: list[asyncio.Task[object]] = []
|
|
150
169
|
for hook in self.hooks:
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
170
|
+
method = getattr(hook, event_name, None)
|
|
171
|
+
if method is None:
|
|
172
|
+
continue
|
|
173
|
+
|
|
174
|
+
task = asyncio.create_task(
|
|
175
|
+
self._safe_call(
|
|
176
|
+
cast(Callable[..., Awaitable[Any]], method), *args, **kwargs
|
|
177
|
+
)
|
|
178
|
+
)
|
|
179
|
+
tasks.append(task)
|
|
155
180
|
|
|
156
181
|
if tasks:
|
|
157
182
|
await asyncio.gather(*tasks, return_exceptions=True)
|
|
158
183
|
|
|
159
|
-
async def _safe_call(
|
|
184
|
+
async def _safe_call(
|
|
185
|
+
self, method: Callable[..., Awaitable[Any]], *args: Any, **kwargs: Any
|
|
186
|
+
) -> None:
|
|
160
187
|
"""Safely call a hook method"""
|
|
161
188
|
try:
|
|
162
189
|
await method(*args, **kwargs)
|
|
163
190
|
except Exception as e:
|
|
164
|
-
|
|
191
|
+
method_name = getattr(
|
|
192
|
+
method, "__qualname__", getattr(method, "__name__", "")
|
|
193
|
+
)
|
|
194
|
+
logger.error(f"Error in hook {method_name}: {e}")
|
|
165
195
|
|
|
166
196
|
async def export_metric(
|
|
167
|
-
self,
|
|
168
|
-
|
|
197
|
+
self,
|
|
198
|
+
metric_type: str,
|
|
199
|
+
name: str,
|
|
200
|
+
value: float,
|
|
201
|
+
labels: dict[str, str] | None = None,
|
|
202
|
+
) -> None:
|
|
169
203
|
"""Export a metric to all configured exporters"""
|
|
170
204
|
if not self._initialized:
|
|
171
205
|
await self.initialize()
|
|
@@ -181,14 +215,13 @@ class HookManager:
|
|
|
181
215
|
except Exception as e:
|
|
182
216
|
logger.error(f"Error exporting metric {name}: {e}")
|
|
183
217
|
|
|
184
|
-
async def close(self):
|
|
218
|
+
async def close(self) -> None:
|
|
185
219
|
"""Close all exporters"""
|
|
186
220
|
for exporter in self.exporters.values():
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
logger.error(f"Error closing exporter: {e}")
|
|
221
|
+
try:
|
|
222
|
+
await exporter.close()
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.error(f"Error closing exporter: {e}")
|
|
192
225
|
|
|
193
226
|
|
|
194
227
|
# Example hook implementation
|
|
@@ -210,7 +243,7 @@ class LoggingHook(RRQHook):
|
|
|
210
243
|
async def on_job_retrying(self, job: Job, attempt: int) -> None:
|
|
211
244
|
logger.warning(f"Job retrying: {job.id} - attempt {attempt}")
|
|
212
245
|
|
|
213
|
-
async def on_worker_started(self, worker_id: str, queues:
|
|
246
|
+
async def on_worker_started(self, worker_id: str, queues: list[str]) -> None:
|
|
214
247
|
logger.info(f"Worker started: {worker_id} on queues {queues}")
|
|
215
248
|
|
|
216
249
|
async def on_worker_stopped(self, worker_id: str) -> None:
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Telemetry integrations for RRQ (optional dependencies)."""
|