rrq 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rrq/cli.py +5 -3
- rrq/cli_commands/base.py +4 -1
- rrq/cli_commands/commands/debug.py +2 -2
- rrq/cli_commands/commands/monitor.py +92 -60
- rrq/cli_commands/commands/queues.py +2 -2
- rrq/cli_commands/utils.py +5 -4
- rrq/client.py +110 -100
- rrq/exporters/__init__.py +1 -0
- rrq/exporters/prometheus.py +90 -0
- rrq/exporters/statsd.py +60 -0
- rrq/hooks.py +80 -47
- rrq/integrations/__init__.py +1 -0
- rrq/integrations/ddtrace.py +456 -0
- rrq/integrations/logfire.py +23 -0
- rrq/integrations/otel.py +325 -0
- rrq/job.py +6 -0
- rrq/settings.py +2 -2
- rrq/store.py +49 -6
- rrq/telemetry.py +129 -0
- rrq/worker.py +259 -94
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/METADATA +47 -8
- rrq-0.8.0.dist-info/RECORD +34 -0
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/WHEEL +1 -1
- rrq-0.7.0.dist-info/RECORD +0 -26
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/entry_points.txt +0 -0
- {rrq-0.7.0.dist-info → rrq-0.8.0.dist-info}/licenses/LICENSE +0 -0
rrq/cli.py
CHANGED
|
@@ -502,7 +502,8 @@ def _run_single_worker(
|
|
|
502
502
|
"""Helper function to run a single RRQ worker instance."""
|
|
503
503
|
rrq_settings = _load_app_settings(settings_object_path)
|
|
504
504
|
|
|
505
|
-
|
|
505
|
+
job_registry = rrq_settings.job_registry
|
|
506
|
+
if job_registry is None:
|
|
506
507
|
click.echo(
|
|
507
508
|
click.style(
|
|
508
509
|
"ERROR: No 'job_registry'. You must provide a JobRegistry instance in settings.",
|
|
@@ -511,15 +512,16 @@ def _run_single_worker(
|
|
|
511
512
|
err=True,
|
|
512
513
|
)
|
|
513
514
|
sys.exit(1)
|
|
515
|
+
assert job_registry is not None
|
|
514
516
|
|
|
515
517
|
logger.debug(
|
|
516
|
-
f"Registered handlers (from effective registry): {
|
|
518
|
+
f"Registered handlers (from effective registry): {job_registry.get_registered_functions()}"
|
|
517
519
|
)
|
|
518
520
|
logger.debug(f"Effective RRQ settings for worker: {rrq_settings}")
|
|
519
521
|
|
|
520
522
|
worker_instance = RRQWorker(
|
|
521
523
|
settings=rrq_settings,
|
|
522
|
-
job_registry=
|
|
524
|
+
job_registry=job_registry,
|
|
523
525
|
queues=queues_arg,
|
|
524
526
|
burst=burst,
|
|
525
527
|
)
|
rrq/cli_commands/base.py
CHANGED
|
@@ -63,7 +63,10 @@ def auto_discover_commands(package_path: str) -> list[type[BaseCommand]]:
|
|
|
63
63
|
# Get the package module
|
|
64
64
|
try:
|
|
65
65
|
package = importlib.import_module(package_path)
|
|
66
|
-
|
|
66
|
+
package_file = package.__file__
|
|
67
|
+
if package_file is None:
|
|
68
|
+
return commands
|
|
69
|
+
package_dir = os.path.dirname(package_file)
|
|
67
70
|
except ImportError:
|
|
68
71
|
# Return empty list for non-existent packages
|
|
69
72
|
return commands
|
|
@@ -452,7 +452,7 @@ class DebugCommands(AsyncCommand):
|
|
|
452
452
|
console.print(f"Delay: {delay}s")
|
|
453
453
|
|
|
454
454
|
finally:
|
|
455
|
-
await client.
|
|
455
|
+
await client.close()
|
|
456
456
|
|
|
457
457
|
async def _clear_data(
|
|
458
458
|
self, settings_object_path: str, confirm: bool, pattern: str
|
|
@@ -548,4 +548,4 @@ class DebugCommands(AsyncCommand):
|
|
|
548
548
|
console.print(f"\nStress test complete: {total_jobs} jobs submitted")
|
|
549
549
|
|
|
550
550
|
finally:
|
|
551
|
-
await client.
|
|
551
|
+
await client.close()
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
from collections import defaultdict, deque
|
|
5
5
|
from datetime import datetime
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Any, TypedDict
|
|
7
7
|
|
|
8
8
|
import click
|
|
9
9
|
from rich.align import Align
|
|
@@ -20,6 +20,8 @@ from rrq.constants import (
|
|
|
20
20
|
DLQ_KEY_PREFIX,
|
|
21
21
|
)
|
|
22
22
|
from rrq.cli_commands.base import AsyncCommand, load_app_settings, get_job_store
|
|
23
|
+
from rrq.settings import RRQSettings
|
|
24
|
+
from rrq.store import JobStore
|
|
23
25
|
from ..utils import (
|
|
24
26
|
console,
|
|
25
27
|
format_duration,
|
|
@@ -32,6 +34,12 @@ from ..utils import (
|
|
|
32
34
|
ERROR_DISPLAY_LENGTH = 50 # For consistent display across DLQ and monitor
|
|
33
35
|
|
|
34
36
|
|
|
37
|
+
class DLQStats(TypedDict):
|
|
38
|
+
total_jobs: int
|
|
39
|
+
newest_error: str | None
|
|
40
|
+
top_errors: dict[str, int]
|
|
41
|
+
|
|
42
|
+
|
|
35
43
|
class MonitorCommands(AsyncCommand):
|
|
36
44
|
"""Real-time monitoring commands"""
|
|
37
45
|
|
|
@@ -56,12 +64,12 @@ class MonitorCommands(AsyncCommand):
|
|
|
56
64
|
multiple=True,
|
|
57
65
|
help="Specific queues to monitor (default: all)",
|
|
58
66
|
)
|
|
59
|
-
def monitor(settings_object_path: str, refresh: float, queues: tuple):
|
|
67
|
+
def monitor(settings_object_path: str, refresh: float, queues: tuple[str, ...]):
|
|
60
68
|
"""Launch real-time monitoring dashboard"""
|
|
61
69
|
self.make_async(self._monitor)(settings_object_path, refresh, queues)
|
|
62
70
|
|
|
63
71
|
async def _monitor(
|
|
64
|
-
self, settings_object_path: str, refresh: float, queues: tuple
|
|
72
|
+
self, settings_object_path: str, refresh: float, queues: tuple[str, ...]
|
|
65
73
|
) -> None:
|
|
66
74
|
"""Run the monitoring dashboard"""
|
|
67
75
|
settings = load_app_settings(settings_object_path)
|
|
@@ -76,26 +84,44 @@ class MonitorCommands(AsyncCommand):
|
|
|
76
84
|
class Dashboard:
|
|
77
85
|
"""Real-time monitoring dashboard"""
|
|
78
86
|
|
|
79
|
-
def __init__(
|
|
80
|
-
self
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
settings: RRQSettings,
|
|
90
|
+
refresh_interval: float,
|
|
91
|
+
queue_filter: tuple[str, ...] | None,
|
|
92
|
+
):
|
|
93
|
+
self.settings: RRQSettings = settings
|
|
81
94
|
self.refresh_interval = refresh_interval
|
|
82
|
-
self.queue_filter
|
|
83
|
-
|
|
95
|
+
self.queue_filter: list[str] | None = (
|
|
96
|
+
list(queue_filter) if queue_filter else None
|
|
97
|
+
)
|
|
98
|
+
self.job_store: JobStore | None = None
|
|
84
99
|
|
|
85
100
|
# Metrics storage
|
|
86
|
-
self.queue_sizes
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
self.
|
|
101
|
+
self.queue_sizes: defaultdict[str, deque[int]] = defaultdict(
|
|
102
|
+
lambda: deque(maxlen=60)
|
|
103
|
+
) # 60 data points
|
|
104
|
+
self.processing_rates: defaultdict[str, deque[float]] = defaultdict(
|
|
105
|
+
lambda: deque(maxlen=60)
|
|
106
|
+
)
|
|
107
|
+
self.error_counts: defaultdict[str, int] = defaultdict(int)
|
|
108
|
+
self.dlq_stats: DLQStats = {
|
|
109
|
+
"total_jobs": 0,
|
|
110
|
+
"newest_error": None,
|
|
111
|
+
"top_errors": {},
|
|
112
|
+
}
|
|
90
113
|
self.last_update = datetime.now()
|
|
91
114
|
|
|
92
115
|
# Event streaming for real-time updates
|
|
93
116
|
self._last_event_id = "0"
|
|
94
117
|
self._event_buffer = deque(maxlen=100)
|
|
118
|
+
self.workers: list[dict[str, Any]] = []
|
|
119
|
+
self.recent_jobs: list[dict[str, Any]] = []
|
|
95
120
|
|
|
96
121
|
async def run(self):
|
|
97
122
|
"""Run the dashboard"""
|
|
98
|
-
|
|
123
|
+
job_store = await get_job_store(self.settings)
|
|
124
|
+
self.job_store = job_store
|
|
99
125
|
|
|
100
126
|
try:
|
|
101
127
|
layout = self.create_layout()
|
|
@@ -108,7 +134,7 @@ class Dashboard:
|
|
|
108
134
|
self.update_layout(layout)
|
|
109
135
|
await asyncio.sleep(self.refresh_interval)
|
|
110
136
|
finally:
|
|
111
|
-
await
|
|
137
|
+
await job_store.aclose()
|
|
112
138
|
|
|
113
139
|
def create_layout(self) -> Layout:
|
|
114
140
|
"""Create the dashboard layout"""
|
|
@@ -143,6 +169,12 @@ class Dashboard:
|
|
|
143
169
|
|
|
144
170
|
return layout
|
|
145
171
|
|
|
172
|
+
def _require_job_store(self) -> JobStore:
|
|
173
|
+
job_store = self.job_store
|
|
174
|
+
if job_store is None:
|
|
175
|
+
raise RuntimeError("Dashboard job_store is not initialized")
|
|
176
|
+
return job_store
|
|
177
|
+
|
|
146
178
|
async def update_metrics(self):
|
|
147
179
|
"""Update all metrics using hybrid monitoring approach"""
|
|
148
180
|
try:
|
|
@@ -170,8 +202,9 @@ class Dashboard:
|
|
|
170
202
|
|
|
171
203
|
async def _process_monitoring_events(self):
|
|
172
204
|
"""Process real-time monitoring events from Redis streams"""
|
|
205
|
+
job_store = self._require_job_store()
|
|
173
206
|
try:
|
|
174
|
-
events = await
|
|
207
|
+
events = await job_store.consume_monitor_events(
|
|
175
208
|
last_id=self._last_event_id,
|
|
176
209
|
count=50,
|
|
177
210
|
block=10, # Short non-blocking read
|
|
@@ -203,17 +236,19 @@ class Dashboard:
|
|
|
203
236
|
|
|
204
237
|
async def _refresh_queue_size(self, queue_name: str):
|
|
205
238
|
"""Immediately refresh size for a specific queue"""
|
|
239
|
+
job_store = self._require_job_store()
|
|
206
240
|
try:
|
|
207
241
|
queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
|
|
208
|
-
size = await
|
|
242
|
+
size = await job_store.redis.zcard(queue_key)
|
|
209
243
|
self.queue_sizes[queue_name].append(size)
|
|
210
244
|
except Exception:
|
|
211
245
|
pass
|
|
212
246
|
|
|
213
247
|
async def _refresh_worker_status(self, worker_id: str):
|
|
214
248
|
"""Immediately refresh status for a specific worker"""
|
|
249
|
+
job_store = self._require_job_store()
|
|
215
250
|
try:
|
|
216
|
-
health_data, ttl = await
|
|
251
|
+
health_data, ttl = await job_store.get_worker_health(worker_id)
|
|
217
252
|
if health_data:
|
|
218
253
|
# Update worker in current list
|
|
219
254
|
for i, worker in enumerate(self.workers):
|
|
@@ -231,19 +266,20 @@ class Dashboard:
|
|
|
231
266
|
except Exception:
|
|
232
267
|
pass
|
|
233
268
|
|
|
234
|
-
async def _get_recent_jobs(self, limit: int = 10) ->
|
|
269
|
+
async def _get_recent_jobs(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
235
270
|
"""Get recently processed jobs"""
|
|
271
|
+
job_store = self._require_job_store()
|
|
236
272
|
jobs = []
|
|
237
273
|
job_pattern = f"{JOB_KEY_PREFIX}*"
|
|
238
274
|
|
|
239
275
|
# Sample recent jobs
|
|
240
276
|
count = 0
|
|
241
|
-
async for key in
|
|
277
|
+
async for key in job_store.redis.scan_iter(match=job_pattern):
|
|
242
278
|
if count >= limit * 2: # Sample more to find recent ones
|
|
243
279
|
break
|
|
244
280
|
|
|
245
281
|
job_id = key.decode().replace(JOB_KEY_PREFIX, "")
|
|
246
|
-
job_dict = await
|
|
282
|
+
job_dict = await job_store.get_job_data_dict(job_id)
|
|
247
283
|
if job_dict:
|
|
248
284
|
# Only include recently updated jobs
|
|
249
285
|
if "completed_at" in job_dict or "started_at" in job_dict:
|
|
@@ -267,14 +303,13 @@ class Dashboard:
|
|
|
267
303
|
|
|
268
304
|
return jobs[:limit]
|
|
269
305
|
|
|
270
|
-
async def _update_queue_metrics_optimized(self) ->
|
|
306
|
+
async def _update_queue_metrics_optimized(self) -> dict[str, int]:
|
|
271
307
|
"""Hybrid queue metrics collection using active registries and efficient batch operations"""
|
|
308
|
+
job_store = self._require_job_store()
|
|
272
309
|
# Use the hybrid monitoring approach: get active queues from registry
|
|
273
310
|
try:
|
|
274
311
|
# Get recently active queues from the registry (O(log N) operation)
|
|
275
|
-
active_queue_names = await
|
|
276
|
-
max_age_seconds=300
|
|
277
|
-
)
|
|
312
|
+
active_queue_names = await job_store.get_active_queues(max_age_seconds=300)
|
|
278
313
|
|
|
279
314
|
# Apply filtering if specified
|
|
280
315
|
if self.queue_filter:
|
|
@@ -284,9 +319,7 @@ class Dashboard:
|
|
|
284
319
|
|
|
285
320
|
# Use batch operation to get queue sizes efficiently
|
|
286
321
|
if active_queue_names:
|
|
287
|
-
queue_data = await
|
|
288
|
-
active_queue_names
|
|
289
|
-
)
|
|
322
|
+
queue_data = await job_store.batch_get_queue_sizes(active_queue_names)
|
|
290
323
|
else:
|
|
291
324
|
queue_data = {}
|
|
292
325
|
|
|
@@ -300,24 +333,23 @@ class Dashboard:
|
|
|
300
333
|
|
|
301
334
|
return queue_data
|
|
302
335
|
|
|
303
|
-
async def _legacy_scan_queue_metrics(self) ->
|
|
336
|
+
async def _legacy_scan_queue_metrics(self) -> dict[str, int]:
|
|
304
337
|
"""Legacy scan-based queue metrics as fallback"""
|
|
338
|
+
job_store = self._require_job_store()
|
|
305
339
|
queue_keys = []
|
|
306
340
|
queue_pattern = f"{QUEUE_KEY_PREFIX}*"
|
|
307
341
|
|
|
308
342
|
# Perform limited scan (max 100 keys at a time)
|
|
309
343
|
scan_count = 0
|
|
310
344
|
try:
|
|
311
|
-
async for key in
|
|
312
|
-
match=queue_pattern, count=50
|
|
313
|
-
):
|
|
345
|
+
async for key in job_store.redis.scan_iter(match=queue_pattern, count=50):
|
|
314
346
|
queue_keys.append(key)
|
|
315
347
|
scan_count += 1
|
|
316
348
|
if scan_count >= 100: # Limit scan operations
|
|
317
349
|
break
|
|
318
350
|
except TypeError:
|
|
319
351
|
# Handle mocks that don't support count parameter
|
|
320
|
-
async for key in
|
|
352
|
+
async for key in job_store.redis.scan_iter(match=queue_pattern):
|
|
321
353
|
queue_keys.append(key)
|
|
322
354
|
scan_count += 1
|
|
323
355
|
if scan_count >= 100: # Limit scan operations
|
|
@@ -328,23 +360,22 @@ class Dashboard:
|
|
|
328
360
|
for key in queue_keys:
|
|
329
361
|
queue_name = key.decode().replace(QUEUE_KEY_PREFIX, "")
|
|
330
362
|
if not self.queue_filter or queue_name in self.queue_filter:
|
|
331
|
-
size = await
|
|
363
|
+
size = await job_store.redis.zcard(key)
|
|
332
364
|
queue_data[queue_name] = size
|
|
333
365
|
|
|
334
366
|
return queue_data
|
|
335
367
|
|
|
336
|
-
async def _update_worker_metrics_optimized(self):
|
|
368
|
+
async def _update_worker_metrics_optimized(self) -> None:
|
|
337
369
|
"""Hybrid worker metrics collection using active registries"""
|
|
370
|
+
job_store = self._require_job_store()
|
|
338
371
|
try:
|
|
339
372
|
# Use the hybrid monitoring approach: get active workers from registry
|
|
340
|
-
active_worker_ids = await
|
|
341
|
-
max_age_seconds=60
|
|
342
|
-
)
|
|
373
|
+
active_worker_ids = await job_store.get_active_workers(max_age_seconds=60)
|
|
343
374
|
|
|
344
375
|
# Get worker health data efficiently
|
|
345
376
|
workers = []
|
|
346
377
|
for worker_id in active_worker_ids:
|
|
347
|
-
health_data, ttl = await
|
|
378
|
+
health_data, ttl = await job_store.get_worker_health(worker_id)
|
|
348
379
|
|
|
349
380
|
if health_data:
|
|
350
381
|
workers.append(
|
|
@@ -368,23 +399,22 @@ class Dashboard:
|
|
|
368
399
|
|
|
369
400
|
self.workers = workers
|
|
370
401
|
|
|
371
|
-
async def _legacy_scan_worker_metrics(self) -> list:
|
|
402
|
+
async def _legacy_scan_worker_metrics(self) -> list[dict[str, Any]]:
|
|
372
403
|
"""Legacy scan-based worker metrics as fallback"""
|
|
404
|
+
job_store = self._require_job_store()
|
|
373
405
|
worker_keys = []
|
|
374
406
|
health_pattern = f"{HEALTH_KEY_PREFIX}*"
|
|
375
407
|
|
|
376
408
|
scan_count = 0
|
|
377
409
|
try:
|
|
378
|
-
async for key in
|
|
379
|
-
match=health_pattern, count=50
|
|
380
|
-
):
|
|
410
|
+
async for key in job_store.redis.scan_iter(match=health_pattern, count=50):
|
|
381
411
|
worker_keys.append(key)
|
|
382
412
|
scan_count += 1
|
|
383
413
|
if scan_count >= 50: # Limit worker scans
|
|
384
414
|
break
|
|
385
415
|
except TypeError:
|
|
386
416
|
# Handle mocks that don't support count parameter
|
|
387
|
-
async for key in
|
|
417
|
+
async for key in job_store.redis.scan_iter(match=health_pattern):
|
|
388
418
|
worker_keys.append(key)
|
|
389
419
|
scan_count += 1
|
|
390
420
|
if scan_count >= 50: # Limit worker scans
|
|
@@ -394,7 +424,7 @@ class Dashboard:
|
|
|
394
424
|
workers = []
|
|
395
425
|
for key in worker_keys:
|
|
396
426
|
worker_id = key.decode().replace(HEALTH_KEY_PREFIX, "")
|
|
397
|
-
health_data, ttl = await
|
|
427
|
+
health_data, ttl = await job_store.get_worker_health(worker_id)
|
|
398
428
|
|
|
399
429
|
if health_data:
|
|
400
430
|
workers.append(
|
|
@@ -410,8 +440,9 @@ class Dashboard:
|
|
|
410
440
|
|
|
411
441
|
return workers
|
|
412
442
|
|
|
413
|
-
async def _get_recent_jobs_optimized(self, limit: int = 10) ->
|
|
443
|
+
async def _get_recent_jobs_optimized(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
414
444
|
"""Optimized recent jobs collection with limited scanning"""
|
|
445
|
+
job_store = self._require_job_store()
|
|
415
446
|
jobs = []
|
|
416
447
|
job_pattern = f"{JOB_KEY_PREFIX}*"
|
|
417
448
|
|
|
@@ -419,16 +450,14 @@ class Dashboard:
|
|
|
419
450
|
job_keys = []
|
|
420
451
|
scan_count = 0
|
|
421
452
|
try:
|
|
422
|
-
async for key in
|
|
423
|
-
match=job_pattern, count=20
|
|
424
|
-
):
|
|
453
|
+
async for key in job_store.redis.scan_iter(match=job_pattern, count=20):
|
|
425
454
|
job_keys.append(key)
|
|
426
455
|
scan_count += 1
|
|
427
456
|
if scan_count >= limit * 3: # Scan 3x the needed amount max
|
|
428
457
|
break
|
|
429
458
|
except TypeError:
|
|
430
459
|
# Handle mocks that don't support count parameter
|
|
431
|
-
async for key in
|
|
460
|
+
async for key in job_store.redis.scan_iter(match=job_pattern):
|
|
432
461
|
job_keys.append(key)
|
|
433
462
|
scan_count += 1
|
|
434
463
|
if scan_count >= limit * 3: # Scan 3x the needed amount max
|
|
@@ -439,7 +468,7 @@ class Dashboard:
|
|
|
439
468
|
recent_jobs = []
|
|
440
469
|
for key in job_keys:
|
|
441
470
|
job_id = key.decode().replace(JOB_KEY_PREFIX, "")
|
|
442
|
-
job_dict = await
|
|
471
|
+
job_dict = await job_store.get_job_data_dict(job_id)
|
|
443
472
|
if job_dict:
|
|
444
473
|
try:
|
|
445
474
|
# Only include recently updated jobs
|
|
@@ -686,30 +715,33 @@ class Dashboard:
|
|
|
686
715
|
|
|
687
716
|
async def _update_dlq_stats(self):
|
|
688
717
|
"""Update DLQ statistics"""
|
|
718
|
+
job_store = self._require_job_store()
|
|
689
719
|
dlq_name = self.settings.default_dlq_name
|
|
690
720
|
dlq_key = f"{DLQ_KEY_PREFIX}{dlq_name}"
|
|
691
721
|
|
|
692
722
|
# Get total DLQ job count
|
|
693
|
-
|
|
723
|
+
total_jobs = int(await job_store.redis.llen(dlq_key))
|
|
724
|
+
self.dlq_stats["total_jobs"] = total_jobs
|
|
694
725
|
|
|
695
|
-
if
|
|
726
|
+
if total_jobs > 0:
|
|
696
727
|
# Get some recent DLQ jobs for error analysis
|
|
697
|
-
job_ids = await
|
|
698
|
-
job_ids = [
|
|
728
|
+
job_ids = await job_store.redis.lrange(dlq_key, 0, 9) # Get first 10
|
|
729
|
+
job_ids = [
|
|
730
|
+
job_id.decode("utf-8") if isinstance(job_id, bytes) else str(job_id)
|
|
731
|
+
for job_id in job_ids
|
|
732
|
+
]
|
|
699
733
|
|
|
700
|
-
errors = []
|
|
701
|
-
newest_time = 0
|
|
734
|
+
errors: list[str] = []
|
|
735
|
+
newest_time: float = 0.0
|
|
702
736
|
|
|
703
737
|
for job_id in job_ids:
|
|
704
|
-
job_data = await
|
|
738
|
+
job_data = await job_store.get_job(job_id)
|
|
705
739
|
if job_data:
|
|
706
740
|
error = job_data.get("last_error", "Unknown error")
|
|
707
741
|
completion_time = job_data.get("completion_time", 0)
|
|
708
742
|
|
|
709
743
|
if isinstance(completion_time, str):
|
|
710
744
|
try:
|
|
711
|
-
from datetime import datetime
|
|
712
|
-
|
|
713
745
|
completion_time = datetime.fromisoformat(
|
|
714
746
|
completion_time.replace("Z", "+00:00")
|
|
715
747
|
).timestamp()
|
|
@@ -717,7 +749,7 @@ class Dashboard:
|
|
|
717
749
|
completion_time = 0
|
|
718
750
|
|
|
719
751
|
if completion_time > newest_time:
|
|
720
|
-
newest_time = completion_time
|
|
752
|
+
newest_time = float(completion_time)
|
|
721
753
|
self.dlq_stats["newest_error"] = (
|
|
722
754
|
error[:ERROR_DISPLAY_LENGTH] + "..."
|
|
723
755
|
if len(error) > ERROR_DISPLAY_LENGTH
|
|
@@ -731,7 +763,7 @@ class Dashboard:
|
|
|
731
763
|
)
|
|
732
764
|
|
|
733
765
|
# Count error types
|
|
734
|
-
error_counts = {}
|
|
766
|
+
error_counts: dict[str, int] = {}
|
|
735
767
|
for error in errors:
|
|
736
768
|
error_counts[error] = error_counts.get(error, 0) + 1
|
|
737
769
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Queue management and statistics commands"""
|
|
2
2
|
|
|
3
3
|
from datetime import datetime
|
|
4
|
-
from typing import Dict, List, Tuple
|
|
4
|
+
from typing import Any, Dict, List, Tuple
|
|
5
5
|
|
|
6
6
|
import click
|
|
7
7
|
|
|
@@ -390,7 +390,7 @@ class QueueCommands(AsyncCommand):
|
|
|
390
390
|
|
|
391
391
|
async def _get_queue_statistics(
|
|
392
392
|
self, job_store: JobStore, queue_name: str, max_scan: int = 1000
|
|
393
|
-
) -> Dict[str,
|
|
393
|
+
) -> Dict[str, Any]:
|
|
394
394
|
"""Get detailed statistics for a queue"""
|
|
395
395
|
stats = {
|
|
396
396
|
"total": 0,
|
rrq/cli_commands/utils.py
CHANGED
|
@@ -94,11 +94,12 @@ def format_duration(seconds: float | None) -> str:
|
|
|
94
94
|
|
|
95
95
|
def format_bytes(size: int) -> str:
|
|
96
96
|
"""Format byte size for display"""
|
|
97
|
+
size_float = float(size)
|
|
97
98
|
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
|
98
|
-
if
|
|
99
|
-
return f"{
|
|
100
|
-
|
|
101
|
-
return f"{
|
|
99
|
+
if size_float < 1024.0:
|
|
100
|
+
return f"{size_float:.1f}{unit}"
|
|
101
|
+
size_float /= 1024.0
|
|
102
|
+
return f"{size_float:.1f}PB"
|
|
102
103
|
|
|
103
104
|
|
|
104
105
|
def print_error(message: str) -> None:
|