rrq 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/store.py ADDED
@@ -0,0 +1,568 @@
1
+ """This module defines the JobStore class, responsible for all interactions
2
+ with the Redis backend for storing and managing RRQ job data and queues.
3
+ """
4
+
5
+ import json
6
+ import logging
7
+ from datetime import UTC, datetime
8
+ from typing import Any, Optional
9
+
10
+ from redis.asyncio import Redis as AsyncRedis
11
+
12
+ from .constants import (
13
+ DEFAULT_DLQ_RESULT_TTL_SECONDS,
14
+ JOB_KEY_PREFIX,
15
+ LOCK_KEY_PREFIX,
16
+ QUEUE_KEY_PREFIX,
17
+ UNIQUE_JOB_LOCK_PREFIX,
18
+ )
19
+ from .job import Job, JobStatus
20
+ from .settings import RRQSettings
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ class JobStore:
25
+ """Provides an abstraction layer for interacting with Redis for RRQ operations.
26
+
27
+ Handles serialization/deserialization, key management, and atomic operations
28
+ related to jobs, queues, locks, and worker health.
29
+ """
30
+
31
+ def __init__(self, settings: RRQSettings):
32
+ """Initializes the JobStore with a Redis connection.
33
+
34
+ Args:
35
+ redis_dsn: The Redis Data Source Name (DSN) string.
36
+ """
37
+ self.settings = settings
38
+ self.redis = AsyncRedis.from_url(
39
+ settings.redis_dsn, decode_responses=False
40
+ ) # Work with bytes initially
41
+
42
+ async def aclose(self):
43
+ """Closes the Redis connection pool associated with this store."""
44
+ await self.redis.aclose()
45
+
46
+ async def _serialize_job_field(self, value: Any) -> bytes:
47
+ """Serializes a single field value for storing in a Redis hash."""
48
+ # Pydantic models are dumped to dict, then JSON string, then bytes.
49
+ # Basic types are JSON dumped directly.
50
+ if hasattr(value, "model_dump_json"): # For Pydantic sub-models if any
51
+ return value.model_dump_json().encode("utf-8")
52
+ if isinstance(value, dict | list) or (
53
+ hasattr(value, "__dict__") and not callable(value)
54
+ ):
55
+ # Fallback for other dict-like or list-like objects, and simple custom objects
56
+ try:
57
+ # Use Pydantic-aware JSON dumping if possible
58
+ if hasattr(value, "model_dump"):
59
+ value = value.model_dump(mode="json")
60
+ return json.dumps(value, default=str).encode(
61
+ "utf-8"
62
+ ) # default=str for datetimes etc.
63
+ except TypeError:
64
+ return str(value).encode("utf-8") # Last resort
65
+ return str(value).encode("utf-8") # For simple types like int, str, bool
66
+
67
+ async def _deserialize_job_field(self, value_bytes: bytes) -> Any:
68
+ """Deserializes a single field value from Redis bytes."""
69
+ try:
70
+ # Attempt to parse as JSON first, as most complex types will be stored this way.
71
+ return json.loads(value_bytes.decode("utf-8"))
72
+ except (json.JSONDecodeError, UnicodeDecodeError):
73
+ # If it fails, it might be a simple string that wasn't JSON encoded (e.g. status enums)
74
+ # or a raw byte representation that needs specific handling (not covered here yet)
75
+ return value_bytes.decode("utf-8") # Fallback to string
76
+
77
+ async def save_job_definition(self, job: Job) -> None:
78
+ """Saves the complete job definition as a Redis hash.
79
+
80
+ Handles manual serialization of complex fields (args, kwargs, result).
81
+
82
+ Args:
83
+ job: The Job object to save.
84
+ """
85
+ job_key = f"{JOB_KEY_PREFIX}{job.id}"
86
+ # print(
87
+ # f"DEBUG JobStore.save_job_definition (ENTRY): job.id={job.id}, job.job_args={job.job_args}, job.job_kwargs={job.job_kwargs}, type(job.job_args)={type(job.job_args)}"
88
+ # )
89
+
90
+ # Dump model excluding fields handled manually
91
+ job_data_dict = job.model_dump(
92
+ mode="json", exclude={"job_args", "job_kwargs", "result"}
93
+ )
94
+
95
+ # Manually serialize potentially complex fields to JSON strings
96
+ job_args_json = json.dumps(job.job_args if job.job_args is not None else None)
97
+ job_kwargs_json = json.dumps(
98
+ job.job_kwargs if job.job_kwargs is not None else None
99
+ )
100
+ result_json = json.dumps(job.result if job.result is not None else None)
101
+
102
+ # Combine base fields (converted to string) with manually serialized ones
103
+ final_mapping_for_hset = {
104
+ str(k): str(v) for k, v in job_data_dict.items() if v is not None
105
+ }
106
+ final_mapping_for_hset["job_args"] = job_args_json
107
+ final_mapping_for_hset["job_kwargs"] = job_kwargs_json
108
+ final_mapping_for_hset["result"] = result_json
109
+
110
+ # Ensure ID is present
111
+ if "id" not in final_mapping_for_hset:
112
+ final_mapping_for_hset["id"] = job.id
113
+
114
+ # print(
115
+ # f"!!! RRQ_JOB_STORE_SAVE (PRINT) (JobID:{job.id}) -> Mapping for HSET: { {k: str(v)[:50] + '...' if isinstance(v, str) and len(v) > 50 else v for k, v in final_mapping_for_hset.items()} }"
116
+ # )
117
+ if final_mapping_for_hset: # Avoid HSET with empty mapping
118
+ await self.redis.hset(job_key, mapping=final_mapping_for_hset)
119
+ logger.debug(f"Saved job definition for {job.id} to Redis hash {job_key}.")
120
+
121
+ async def get_job_definition(self, job_id: str) -> Optional[Job]:
122
+ """Retrieves a job definition from Redis and reconstructs the Job object.
123
+
124
+ Handles manual deserialization of complex fields (args, kwargs, result).
125
+
126
+ Args:
127
+ job_id: The unique ID of the job to retrieve.
128
+
129
+ Returns:
130
+ The reconstructed Job object, or None if the job ID is not found or parsing fails.
131
+ """
132
+ job_key = f"{JOB_KEY_PREFIX}{job_id}"
133
+ job_data_raw_bytes = await self.redis.hgetall(job_key)
134
+
135
+ if not job_data_raw_bytes:
136
+ logger.debug(f"Job definition not found for ID: {job_id}")
137
+ return None
138
+
139
+ # Decode all keys and values from bytes to str first
140
+ job_data_dict_str = {
141
+ k.decode("utf-8"): v.decode("utf-8") for k, v in job_data_raw_bytes.items()
142
+ }
143
+
144
+ # Manually extract and parse complex fields
145
+ job_args_list = None
146
+ job_kwargs_dict = None
147
+ result_obj = None
148
+
149
+ job_args_str = job_data_dict_str.pop("job_args", None)
150
+ job_kwargs_str = job_data_dict_str.pop("job_kwargs", None)
151
+ result_str = job_data_dict_str.pop("result", None)
152
+
153
+ if job_args_str and job_args_str.lower() != "null":
154
+ try:
155
+ job_args_list = json.loads(job_args_str)
156
+ # print(f"DEBUG get_job_def: Parsed job_args_list = {job_args_list}")
157
+ except json.JSONDecodeError:
158
+ logger.error(
159
+ f"Failed to JSON decode 'job_args' for job {job_id} from string: '{job_args_str}'",
160
+ exc_info=True,
161
+ )
162
+
163
+ if job_kwargs_str and job_kwargs_str.lower() != "null":
164
+ try:
165
+ job_kwargs_dict = json.loads(job_kwargs_str)
166
+ # print(f"DEBUG get_job_def: Parsed job_kwargs_dict = {job_kwargs_dict}")
167
+ except json.JSONDecodeError:
168
+ logger.error(
169
+ f"Failed to JSON decode 'job_kwargs' for job {job_id} from string: '{job_kwargs_str}'",
170
+ exc_info=True,
171
+ )
172
+
173
+ if result_str and result_str.lower() != "null":
174
+ try:
175
+ # Always try to load result as JSON, as it's stored via json.dumps
176
+ result_obj = json.loads(result_str)
177
+ except json.JSONDecodeError:
178
+ logger.error(
179
+ f"Failed to JSON decode 'result' for job {job_id} from string: '{result_str}'",
180
+ exc_info=True,
181
+ )
182
+ # Decide on fallback: None or the raw string?
183
+ # If stored via json.dumps, failure here indicates corruption or non-JSON string stored previously.
184
+ result_obj = None # Safest fallback is likely None
185
+
186
+ # Validate the remaining dictionary using Pydantic Job model
187
+ try:
188
+ # Pass only the remaining fields to the constructor
189
+ base_job_data = dict(job_data_dict_str)
190
+ validated_job = Job(**base_job_data)
191
+
192
+ # Manually assign the parsed complex fields, ensuring correct types
193
+ validated_job.job_args = job_args_list if job_args_list is not None else []
194
+ validated_job.job_kwargs = (
195
+ job_kwargs_dict if job_kwargs_dict is not None else {}
196
+ )
197
+ validated_job.result = result_obj
198
+
199
+ # print(
200
+ # f"DEBUG get_job_def (POST-MANUAL-ASSIGN): job_id={job_id}, job.job_args='{validated_job.job_args}', type={type(validated_job.job_args)}"
201
+ # )
202
+ # print(
203
+ # f"!!! RRQ_JOB_STORE_GET (POST_CREATE_VALIDATED) -> JobID:{validated_job.id}, Status:{validated_job.status.value if validated_job.status else None}, Retries:{validated_job.current_retries}"
204
+ # )
205
+ logger.debug(f"Successfully retrieved and parsed job {validated_job.id}")
206
+ return validated_job
207
+ except Exception as e_val:
208
+ logger.error(
209
+ f"Pydantic validation error in get_job_definition for job {job_id}: {e_val} on data {base_job_data}",
210
+ exc_info=True,
211
+ )
212
+ return None
213
+
214
+ async def add_job_to_queue(
215
+ self, queue_name: str, job_id: str, score: float
216
+ ) -> None:
217
+ """Adds a job ID to a specific queue (Redis Sorted Set) with a score.
218
+
219
+ The score typically represents the time (e.g., milliseconds since epoch)
220
+ when the job should become available for processing.
221
+
222
+ Args:
223
+ queue_name: The name of the queue (without the prefix).
224
+ job_id: The ID of the job to add.
225
+ score: The score (float) determining the job's position/priority in the queue.
226
+ """
227
+ queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
228
+ await self.redis.zadd(
229
+ queue_key, {job_id.encode("utf-8"): score}
230
+ ) # Store job_id as bytes
231
+ logger.debug(f"Added job {job_id} to queue '{queue_key}' with score {score}")
232
+
233
+ async def get_queued_job_ids(
234
+ self, queue_name: str, start: int = 0, end: int = -1
235
+ ) -> list[str]:
236
+ """Retrieves a range of job IDs from a queue (Sorted Set) by index.
237
+
238
+ Args:
239
+ queue_name: The name of the queue (without the prefix).
240
+ start: The starting index (0-based).
241
+ end: The ending index (inclusive, -1 means to the end).
242
+
243
+ Returns:
244
+ A list of job IDs as strings.
245
+ """
246
+ queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
247
+ job_ids_bytes = await self.redis.zrange(queue_key, start, end)
248
+ return [job_id.decode("utf-8") for job_id in job_ids_bytes]
249
+
250
+ async def get_ready_job_ids(self, queue_name: str, count: int) -> list[str]:
251
+ """Retrieves ready job IDs from the queue (score <= now) up to a specified count.
252
+
253
+ Args:
254
+ queue_name: The name of the queue (without the prefix).
255
+ count: The maximum number of job IDs to retrieve.
256
+
257
+ Returns:
258
+ A list of ready job IDs as strings.
259
+ """
260
+ if count <= 0:
261
+ return []
262
+ queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
263
+ now_ms = int(datetime.now(UTC).timestamp() * 1000)
264
+ # Fetch jobs with score from -inf up to current time, limit by count
265
+ job_ids_bytes = await self.redis.zrangebyscore(
266
+ queue_key, min=float("-inf"), max=float(now_ms), start=0, num=count
267
+ )
268
+ ids = [job_id.decode("utf-8") for job_id in job_ids_bytes]
269
+ if ids:
270
+ logger.debug(f"Found {len(ids)} ready jobs in queue '{queue_key}'.")
271
+ return ids
272
+
273
+ async def acquire_job_lock(
274
+ self, job_id: str, worker_id: str, lock_timeout_ms: int
275
+ ) -> bool:
276
+ """Attempts to acquire an exclusive processing lock for a job using SET NX PX.
277
+
278
+ Args:
279
+ job_id: The ID of the job to lock.
280
+ worker_id: The ID of the worker attempting to acquire the lock.
281
+ lock_timeout_ms: The lock timeout/TTL in milliseconds.
282
+
283
+ Returns:
284
+ True if the lock was acquired successfully, False otherwise.
285
+ """
286
+ lock_key = f"{LOCK_KEY_PREFIX}{job_id}"
287
+ result = await self.redis.set(
288
+ lock_key, worker_id.encode("utf-8"), nx=True, px=lock_timeout_ms
289
+ )
290
+ if result:
291
+ logger.debug(
292
+ f"Worker {worker_id} acquired lock for job {job_id} ({lock_key})."
293
+ )
294
+ return result is True
295
+
296
+ async def release_job_lock(self, job_id: str) -> None:
297
+ """Releases the processing lock for a job.
298
+
299
+ Args:
300
+ job_id: The ID of the job whose lock should be released.
301
+ """
302
+ lock_key = f"{LOCK_KEY_PREFIX}{job_id}"
303
+ deleted_count = await self.redis.delete(lock_key)
304
+ if deleted_count > 0:
305
+ logger.debug(f"Released lock for job {job_id} ({lock_key}).")
306
+ # No need to log if lock didn't exist
307
+
308
+ async def update_job_status(self, job_id: str, status: JobStatus) -> None:
309
+ """Updates only the status field of a job in its Redis hash.
310
+
311
+ Args:
312
+ job_id: The ID of the job to update.
313
+ status: The new JobStatus.
314
+ """
315
+ job_key = f"{JOB_KEY_PREFIX}{job_id}"
316
+ # Status enum value needs to be accessed
317
+ await self.redis.hset(job_key, "status", status.value.encode("utf-8"))
318
+ logger.debug(f"Updated status of job {job_id} to {status.value}.")
319
+
320
+ async def increment_job_retries(self, job_id: str) -> int:
321
+ """Atomically increments the 'current_retries' field for a job.
322
+
323
+ Args:
324
+ job_id: The ID of the job whose retry count should be incremented.
325
+
326
+ Returns:
327
+ The new retry count after incrementing.
328
+ """
329
+ job_key = f"{JOB_KEY_PREFIX}{job_id}"
330
+ new_retry_count = await self.redis.hincrby(job_key, "current_retries", 1)
331
+ new_count = int(new_retry_count) # hincrby might return bytes/str
332
+ logger.debug(f"Incremented retries for job {job_id} to {new_count}.")
333
+ return new_count
334
+
335
+ async def move_job_to_dlq(
336
+ self, job_id: str, dlq_name: str, error_message: str, completion_time: datetime
337
+ ) -> None:
338
+ """Moves a job to the Dead Letter Queue (DLQ).
339
+
340
+ This involves updating the job's status to FAILED, storing the final error
341
+ and completion time in its hash, adding the job ID to the DLQ list,
342
+ and setting a TTL on the job hash itself.
343
+
344
+ Args:
345
+ job_id: The ID of the job to move.
346
+ dlq_name: The name of the DLQ list (without prefix).
347
+ error_message: The final error message to store.
348
+ completion_time: The timestamp when the job failed permanently.
349
+ """
350
+ job_key = f"{JOB_KEY_PREFIX}{job_id}"
351
+ dlq_redis_key = f"{QUEUE_KEY_PREFIX}{dlq_name}"
352
+
353
+ # Ensure complex fields are properly handled if needed (error could be complex)
354
+ # For now, assuming simple string error message
355
+ update_data = {
356
+ "status": JobStatus.FAILED.value.encode("utf-8"),
357
+ "last_error": error_message.encode("utf-8"),
358
+ "completion_time": completion_time.isoformat().encode("utf-8"),
359
+ }
360
+
361
+ # Use pipeline for atomicity
362
+ async with self.redis.pipeline(transaction=True) as pipe:
363
+ pipe.hset(job_key, mapping=update_data)
364
+ pipe.lpush(dlq_redis_key, job_id.encode("utf-8"))
365
+ pipe.expire(job_key, DEFAULT_DLQ_RESULT_TTL_SECONDS)
366
+ results = await pipe.execute()
367
+ logger.info(f"Moved job {job_id} to DLQ '{dlq_redis_key}'. Results: {results}")
368
+
369
+ async def get_job_lock_owner(self, job_id: str) -> Optional[str]:
370
+ """Gets the current owner (worker ID) of a job's processing lock, if held.
371
+
372
+ Args:
373
+ job_id: The ID of the job.
374
+
375
+ Returns:
376
+ The worker ID holding the lock, or None if the lock is not held.
377
+ """
378
+ lock_key = f"{LOCK_KEY_PREFIX}{job_id}"
379
+ owner_bytes = await self.redis.get(lock_key)
380
+ return owner_bytes.decode("utf-8") if owner_bytes else None
381
+
382
+ async def remove_job_from_queue(self, queue_name: str, job_id: str) -> int:
383
+ """Removes a specific job ID from a queue (Sorted Set).
384
+
385
+ Args:
386
+ queue_name: The name of the queue (without prefix).
387
+ job_id: The ID of the job to remove.
388
+
389
+ Returns:
390
+ The number of elements removed (0 or 1).
391
+ """
392
+ queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
393
+ removed_count = await self.redis.zrem(queue_key, job_id.encode("utf-8"))
394
+ count = int(removed_count) # Ensure int
395
+ if count > 0:
396
+ logger.debug(f"Removed job {job_id} from queue '{queue_key}'.")
397
+ return count
398
+
399
+ async def acquire_unique_job_lock(
400
+ self, unique_key: str, job_id: str, lock_ttl_seconds: int
401
+ ) -> bool:
402
+ """Acquires a lock for a unique job key in Redis if it doesn't already exist.
403
+
404
+ This is used to prevent duplicate job submissions based on a user-defined unique key.
405
+ The lock stores the job ID that acquired it and has a TTL.
406
+
407
+ Args:
408
+ unique_key: The user-defined key for ensuring job uniqueness.
409
+ job_id: The ID of the job attempting to acquire the lock. This is stored
410
+ as the value of the lock for traceability.
411
+ lock_ttl_seconds: The Time-To-Live (in seconds) for the lock.
412
+
413
+ Returns:
414
+ True if the lock was successfully acquired, False otherwise (e.g., lock already held).
415
+ """
416
+ lock_key = f"{UNIQUE_JOB_LOCK_PREFIX}{unique_key}"
417
+ # NX = only set if not exists. EX = set TTL in seconds.
418
+ # The value stored is the job_id for traceability of who holds the lock.
419
+ was_set = await self.redis.set(
420
+ lock_key, job_id.encode("utf-8"), ex=lock_ttl_seconds, nx=True
421
+ )
422
+ if was_set:
423
+ logger.info(
424
+ f"Acquired unique job lock for key '{unique_key}' (job_id: {job_id}, TTL: {lock_ttl_seconds}s)"
425
+ )
426
+ return True
427
+ else:
428
+ locked_by_job_id_bytes = await self.redis.get(lock_key)
429
+ locked_by_job_id = (
430
+ locked_by_job_id_bytes.decode("utf-8")
431
+ if locked_by_job_id_bytes
432
+ else "unknown"
433
+ )
434
+ logger.debug(
435
+ f"Failed to acquire unique job lock for key '{unique_key}'. Lock held by job_id: {locked_by_job_id}."
436
+ )
437
+ return False
438
+
439
+ async def release_unique_job_lock(self, unique_key: str) -> None:
440
+ """Deletes the lock associated with a unique job key from Redis.
441
+
442
+ Args:
443
+ unique_key: The user-defined key for which the lock should be released.
444
+ """
445
+ lock_key = f"{UNIQUE_JOB_LOCK_PREFIX}{unique_key}"
446
+ deleted_count = await self.redis.delete(lock_key)
447
+ if deleted_count > 0:
448
+ logger.info(
449
+ f"Released unique job lock for key '{unique_key}' (lock: {lock_key})"
450
+ )
451
+ else:
452
+ # This might happen if the lock expired naturally via TTL before explicit release.
453
+ # Or if release is called multiple times, or on a key that never had a lock.
454
+ logger.debug(
455
+ f"No unique job lock found to release for key '{unique_key}' (lock: {lock_key}), or it already expired."
456
+ )
457
+
458
+ async def save_job_result(self, job_id: str, result: Any, ttl_seconds: int) -> None:
459
+ """Saves the successful result and completion time for a job, sets TTL, and updates status.
460
+
461
+ Args:
462
+ job_id: The ID of the job.
463
+ result: The result data to save (will be JSON serialized).
464
+ ttl_seconds: The Time-To-Live in seconds for the job definition hash.
465
+ 0 means persist indefinitely. < 0 means leave existing TTL.
466
+ """
467
+ job_key = f"{JOB_KEY_PREFIX}{job_id}"
468
+ completion_time = datetime.now(UTC)
469
+
470
+ # Serialize result to JSON string
471
+ try:
472
+ # Use pydantic-core for robust serialization if available, else standard json
473
+ if hasattr(result, "model_dump_json"):
474
+ result_str = result.model_dump_json()
475
+ elif isinstance(result, str):
476
+ result_str = result # Store plain strings directly if not JSON-like?
477
+ # Let's stick to JSON encoding everything for consistency in save/load.
478
+ # If it's already a string, json.dumps adds quotes.
479
+ result_str = json.dumps(result)
480
+ else:
481
+ result_str = json.dumps(result, default=str) # Handle datetimes etc.
482
+ except TypeError as e:
483
+ logger.error(
484
+ f"Failed to serialize result for job {job_id}: {e}", exc_info=True
485
+ )
486
+ result_str = json.dumps(f"<Unserializable Result: {type(result).__name__}>")
487
+
488
+ update_data = {
489
+ "result": result_str.encode("utf-8"),
490
+ "completion_time": completion_time.isoformat().encode("utf-8"),
491
+ "status": JobStatus.COMPLETED.value.encode("utf-8"),
492
+ }
493
+
494
+ # Use pipeline for atomicity of update + expire
495
+ async with self.redis.pipeline(transaction=True) as pipe:
496
+ pipe.hset(job_key, mapping=update_data)
497
+ if ttl_seconds > 0:
498
+ pipe.expire(job_key, ttl_seconds)
499
+ elif ttl_seconds == 0:
500
+ pipe.persist(job_key)
501
+ results = await pipe.execute()
502
+ logger.debug(
503
+ f"Saved result for job {job_id}. Status set to COMPLETED. TTL={ttl_seconds}. Results: {results}"
504
+ )
505
+
506
+ async def set_worker_health(
507
+ self, worker_id: str, data: dict[str, Any], ttl_seconds: int
508
+ ) -> None:
509
+ """Sets the health check data (as a JSON string) for a worker with a TTL.
510
+
511
+ Args:
512
+ worker_id: The unique ID of the worker.
513
+ data: The health data dictionary to store.
514
+ ttl_seconds: The Time-To-Live for the health key in seconds.
515
+ """
516
+ health_key = f"rrq:health:worker:{worker_id}"
517
+ try:
518
+ payload = json.dumps(data, default=str).encode("utf-8")
519
+ await self.redis.set(health_key, payload, ex=ttl_seconds)
520
+ logger.debug(
521
+ f"Set health data for worker {worker_id} with TTL {ttl_seconds}s."
522
+ )
523
+ except Exception as e:
524
+ logger.error(
525
+ f"Failed to set health data for worker {worker_id}: {e}", exc_info=True
526
+ )
527
+
528
+ async def get_worker_health(
529
+ self, worker_id: str
530
+ ) -> tuple[Optional[dict[str, Any]], Optional[int]]:
531
+ """Retrieves the health check data and TTL for a worker.
532
+
533
+ Returns:
534
+ A tuple containing:
535
+ - The parsed health data dictionary (or None if key doesn't exist or JSON is invalid).
536
+ - The current TTL of the key in seconds (or None if key doesn't exist or has no TTL).
537
+ """
538
+ health_key = f"rrq:health:worker:{worker_id}"
539
+
540
+ async with self.redis.pipeline(transaction=False) as pipe:
541
+ pipe.get(health_key)
542
+ pipe.ttl(health_key)
543
+ results = await pipe.execute()
544
+
545
+ payload_bytes: Optional[bytes] = results[0]
546
+ ttl_seconds: int = results[
547
+ 1
548
+ ] # TTL returns -2 if key not found, -1 if no expiry
549
+
550
+ if payload_bytes is None:
551
+ logger.debug(f"Health key not found for worker {worker_id}.")
552
+ return None, None # Key doesn't exist
553
+
554
+ health_data = None
555
+ try:
556
+ health_data = json.loads(payload_bytes.decode("utf-8"))
557
+ except (json.JSONDecodeError, UnicodeDecodeError):
558
+ logger.error(
559
+ f"Failed to parse health check JSON for worker {worker_id}",
560
+ exc_info=True,
561
+ )
562
+
563
+ # Return TTL as None if it's -1 (no expiry) or -2 (key not found - though handled above)
564
+ final_ttl = ttl_seconds if ttl_seconds >= 0 else None
565
+ logger.debug(
566
+ f"Retrieved health data for worker {worker_id}: TTL={final_ttl}, Data keys={list(health_data.keys()) if health_data else None}"
567
+ )
568
+ return health_data, final_ttl