rrq 0.2.5__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/cli.py ADDED
@@ -0,0 +1,532 @@
1
+ """RRQ: Reliable Redis Queue Command Line Interface"""
2
+
3
+ import asyncio
4
+ import importlib
5
+ import logging
6
+ import os
7
+ import signal
8
+ import subprocess
9
+ import sys
10
+ from contextlib import suppress
11
+
12
+ import click
13
+ import redis.exceptions
14
+ from watchfiles import awatch
15
+
16
+ from .constants import HEALTH_KEY_PREFIX
17
+ from .settings import RRQSettings
18
+ from .store import JobStore
19
+ from .worker import RRQWorker
20
+
21
+ # Attempt to import dotenv components for .env file loading
22
+ try:
23
+ from dotenv import find_dotenv, load_dotenv
24
+
25
+ DOTENV_AVAILABLE = True
26
+ except ImportError:
27
+ DOTENV_AVAILABLE = False
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ # Helper to load settings for commands
33
+ def _load_app_settings(settings_object_path: str | None = None) -> RRQSettings:
34
+ """Load the settings object from the given path.
35
+ If not provided, the RRQ_SETTINGS environment variable will be used.
36
+ If the environment variable is not set, will create a default settings object.
37
+ RRQ Setting objects, automatically pick up ENVIRONMENT variables starting with RRQ_.
38
+
39
+ This function will also attempt to load a .env file if python-dotenv is installed
40
+ and a .env file is found. System environment variables take precedence over .env variables.
41
+
42
+ Args:
43
+ settings_object_path: A string representing the path to the settings object. (e.g. "myapp.worker_config.rrq_settings").
44
+
45
+ Returns:
46
+ The RRQSettings object.
47
+ """
48
+ if DOTENV_AVAILABLE:
49
+ dotenv_path = find_dotenv(usecwd=True)
50
+ if dotenv_path:
51
+ logger.debug(f"Loading .env file at: {dotenv_path}...")
52
+ load_dotenv(dotenv_path=dotenv_path, override=False)
53
+
54
+ try:
55
+ if settings_object_path is None:
56
+ settings_object_path = os.getenv("RRQ_SETTINGS")
57
+
58
+ if settings_object_path is None:
59
+ return RRQSettings()
60
+
61
+ # Split into module path and object name
62
+ parts = settings_object_path.split(".")
63
+ settings_object_name = parts[-1]
64
+ settings_object_module_path = ".".join(parts[:-1])
65
+
66
+ # Import the module
67
+ settings_object_module = importlib.import_module(settings_object_module_path)
68
+
69
+ # Get the object
70
+ settings_object = getattr(settings_object_module, settings_object_name)
71
+
72
+ return settings_object
73
+ except ImportError:
74
+ click.echo(
75
+ click.style(
76
+ f"ERROR: Could not import settings object '{settings_object_path}'. Make sure it is in PYTHONPATH.",
77
+ fg="red",
78
+ ),
79
+ err=True,
80
+ )
81
+ sys.exit(1)
82
+ except Exception as e:
83
+ click.echo(
84
+ click.style(
85
+ f"ERROR: Unexpected error processing settings object '{settings_object_path}': {e}",
86
+ fg="red",
87
+ ),
88
+ err=True,
89
+ )
90
+ sys.exit(1)
91
+
92
+
93
+ # --- Health Check ---
94
+ async def check_health_async_impl(settings_object_path: str | None = None) -> bool:
95
+ """Performs health check for RRQ workers."""
96
+ rrq_settings = _load_app_settings(settings_object_path)
97
+
98
+ logger.info("Performing RRQ worker health check...")
99
+ job_store = None
100
+ try:
101
+ job_store = JobStore(settings=rrq_settings)
102
+ await job_store.redis.ping()
103
+ logger.debug(f"Successfully connected to Redis: {rrq_settings.redis_dsn}")
104
+
105
+ health_key_pattern = f"{HEALTH_KEY_PREFIX}*"
106
+ worker_keys = [
107
+ key_bytes.decode("utf-8")
108
+ async for key_bytes in job_store.redis.scan_iter(match=health_key_pattern)
109
+ ]
110
+
111
+ if not worker_keys:
112
+ click.echo(
113
+ click.style(
114
+ "Worker Health Check: FAIL (No active workers found)", fg="red"
115
+ )
116
+ )
117
+ return False
118
+
119
+ click.echo(
120
+ click.style(
121
+ f"Worker Health Check: Found {len(worker_keys)} active worker(s):",
122
+ fg="green",
123
+ )
124
+ )
125
+ for key in worker_keys:
126
+ worker_id = key.split(HEALTH_KEY_PREFIX)[1]
127
+ health_data, ttl = await job_store.get_worker_health(worker_id)
128
+ if health_data:
129
+ status = health_data.get("status", "N/A")
130
+ active_jobs = health_data.get("active_jobs", "N/A")
131
+ timestamp = health_data.get("timestamp", "N/A")
132
+ click.echo(
133
+ f" - Worker ID: {click.style(worker_id, bold=True)}\n"
134
+ f" Status: {status}\n"
135
+ f" Active Jobs: {active_jobs}\n"
136
+ f" Last Heartbeat: {timestamp}\n"
137
+ f" TTL: {ttl if ttl is not None else 'N/A'} seconds"
138
+ )
139
+ else:
140
+ click.echo(
141
+ f" - Worker ID: {click.style(worker_id, bold=True)} - Health data missing/invalid. TTL: {ttl if ttl is not None else 'N/A'}s"
142
+ )
143
+ return True
144
+ except redis.exceptions.ConnectionError as e:
145
+ logger.error(f"Redis connection failed during health check: {e}", exc_info=True)
146
+ click.echo(
147
+ click.style(
148
+ f"Worker Health Check: FAIL - Redis connection error: {e}", fg="red"
149
+ )
150
+ )
151
+ return False
152
+ except Exception as e:
153
+ logger.error(
154
+ f"An unexpected error occurred during health check: {e}", exc_info=True
155
+ )
156
+ click.echo(
157
+ click.style(f"Worker Health Check: FAIL - Unexpected error: {e}", fg="red")
158
+ )
159
+ return False
160
+ finally:
161
+ if job_store:
162
+ await job_store.aclose()
163
+
164
+
165
+ # --- Process Management ---
166
+ def start_rrq_worker_subprocess(
167
+ is_detached: bool = False,
168
+ settings_object_path: str | None = None,
169
+ queues: list[str] | None = None,
170
+ ) -> subprocess.Popen | None:
171
+ """Start an RRQ worker process, optionally for specific queues."""
172
+ command = ["rrq", "worker", "run"]
173
+
174
+ if settings_object_path:
175
+ command.extend(["--settings", settings_object_path])
176
+
177
+ # Add queue filters if specified
178
+ if queues:
179
+ for q in queues:
180
+ command.extend(["--queue", q])
181
+
182
+ logger.info(f"Starting worker subprocess with command: {' '.join(command)}")
183
+ if is_detached:
184
+ process = subprocess.Popen(
185
+ command,
186
+ start_new_session=True,
187
+ stdout=subprocess.DEVNULL,
188
+ stderr=subprocess.DEVNULL,
189
+ stdin=subprocess.DEVNULL,
190
+ )
191
+ logger.info(f"RRQ worker started in background with PID: {process.pid}")
192
+ else:
193
+ process = subprocess.Popen(
194
+ command,
195
+ start_new_session=True,
196
+ stdout=sys.stdout,
197
+ stderr=sys.stderr,
198
+ )
199
+
200
+ return process
201
+
202
+
203
+ def terminate_worker_process(
204
+ process: subprocess.Popen | None, logger: logging.Logger
205
+ ) -> None:
206
+ if not process or process.pid is None:
207
+ logger.debug("No active worker process to terminate.")
208
+ return
209
+
210
+ try:
211
+ if process.poll() is not None:
212
+ logger.debug(
213
+ f"Worker process {process.pid} already terminated (poll returned exit code: {process.returncode})."
214
+ )
215
+ return
216
+
217
+ pgid = os.getpgid(process.pid)
218
+ logger.info(
219
+ f"Terminating worker process group for PID {process.pid} (PGID {pgid})..."
220
+ )
221
+ os.killpg(pgid, signal.SIGTERM)
222
+ process.wait(timeout=5)
223
+ except subprocess.TimeoutExpired:
224
+ logger.warning(
225
+ f"Worker process {process.pid} did not terminate gracefully (SIGTERM timeout), sending SIGKILL."
226
+ )
227
+ with suppress(ProcessLookupError):
228
+ os.killpg(os.getpgid(process.pid), signal.SIGKILL)
229
+ except Exception as e:
230
+ logger.error(f"Unexpected error checking worker process {process.pid}: {e}")
231
+
232
+
233
+ async def watch_rrq_worker_impl(
234
+ watch_path: str,
235
+ settings_object_path: str | None = None,
236
+ queues: list[str] | None = None,
237
+ ) -> None:
238
+ abs_watch_path = os.path.abspath(watch_path)
239
+ click.echo(
240
+ f"Watching for file changes in {abs_watch_path} to restart RRQ worker (app settings: {settings_object_path})..."
241
+ )
242
+ worker_process: subprocess.Popen | None = None
243
+ loop = asyncio.get_event_loop()
244
+ shutdown_event = asyncio.Event()
245
+
246
+ def sig_handler(_signum, _frame):
247
+ logger.info("Signal received, stopping watcher and worker...")
248
+ if worker_process is not None:
249
+ terminate_worker_process(worker_process, logger)
250
+ loop.call_soon_threadsafe(shutdown_event.set)
251
+
252
+ original_sigint = signal.getsignal(signal.SIGINT)
253
+ original_sigterm = signal.getsignal(signal.SIGTERM)
254
+ signal.signal(signal.SIGINT, sig_handler)
255
+ signal.signal(signal.SIGTERM, sig_handler)
256
+
257
+ try:
258
+ worker_process = start_rrq_worker_subprocess(
259
+ is_detached=False,
260
+ settings_object_path=settings_object_path,
261
+ queues=queues,
262
+ )
263
+ async for changes in awatch(abs_watch_path, stop_event=shutdown_event):
264
+ if shutdown_event.is_set():
265
+ break
266
+ if not changes:
267
+ continue
268
+
269
+ logger.info(f"File changes detected: {changes}. Restarting RRQ worker...")
270
+ if worker_process is not None:
271
+ terminate_worker_process(worker_process, logger)
272
+ await asyncio.sleep(1)
273
+ if shutdown_event.is_set():
274
+ break
275
+ worker_process = start_rrq_worker_subprocess(
276
+ is_detached=False,
277
+ settings_object_path=settings_object_path,
278
+ queues=queues,
279
+ )
280
+ except Exception as e:
281
+ logger.error(f"Error in watch_rrq_worker: {e}", exc_info=True)
282
+ finally:
283
+ logger.info("Exiting watch mode. Ensuring worker process is terminated.")
284
+ if not shutdown_event.is_set():
285
+ shutdown_event.set()
286
+ if worker_process is not None:
287
+ terminate_worker_process(worker_process, logger)
288
+ signal.signal(signal.SIGINT, original_sigint)
289
+ signal.signal(signal.SIGTERM, original_sigterm)
290
+ logger.info("Watch worker cleanup complete.")
291
+
292
+
293
+ # --- Click CLI Definitions ---
294
+
295
+ CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
296
+
297
+
298
+ @click.group(context_settings=CONTEXT_SETTINGS)
299
+ def rrq():
300
+ """RRQ: Reliable Redis Queue Command Line Interface.
301
+
302
+ Provides tools for running RRQ workers, checking system health,
303
+ and managing jobs. Requires an application-specific settings object
304
+ for most operations.
305
+ """
306
+ pass
307
+
308
+
309
+ @rrq.group("worker")
310
+ def worker_cli():
311
+ """Manage RRQ workers (run, watch)."""
312
+ pass
313
+
314
+
315
+ @worker_cli.command("run")
316
+ @click.option(
317
+ "--burst",
318
+ is_flag=True,
319
+ help="Run worker in burst mode (process one job/batch then exit).",
320
+ )
321
+ @click.option(
322
+ "--queue",
323
+ "queues",
324
+ type=str,
325
+ multiple=True,
326
+ help="Queue(s) to poll. Defaults to settings.default_queue_name.",
327
+ )
328
+ @click.option(
329
+ "--settings",
330
+ "settings_object_path",
331
+ type=str,
332
+ required=False,
333
+ default=None,
334
+ help=(
335
+ "Python settings path for application worker settings "
336
+ "(e.g., myapp.worker_config.rrq_settings). "
337
+ "Alternatively, this can be specified as RRQ_SETTINGS env variable. "
338
+ "The specified settings object must include a `job_registry: JobRegistry`."
339
+ ),
340
+ )
341
+ def worker_run_command(
342
+ burst: bool,
343
+ queues: tuple[str, ...],
344
+ settings_object_path: str,
345
+ ):
346
+ """Run an RRQ worker process.
347
+ Requires an application-specific settings object.
348
+ """
349
+ rrq_settings = _load_app_settings(settings_object_path)
350
+
351
+ # Determine queues to poll
352
+ queues_arg = list(queues) if queues else None
353
+ # Run worker in foreground (burst or continuous mode)
354
+
355
+ logger.info(
356
+ f"Starting RRQ Worker (Burst: {burst}, App Settings: {settings_object_path})"
357
+ )
358
+
359
+ if not rrq_settings.job_registry:
360
+ click.echo(
361
+ click.style(
362
+ "ERROR: No 'job_registry_app'. You must provide a JobRegistry instance in settings.",
363
+ fg="red",
364
+ ),
365
+ err=True,
366
+ )
367
+ sys.exit(1)
368
+
369
+ logger.debug(
370
+ f"Registered handlers (from effective registry): {rrq_settings.job_registry.get_registered_functions()}"
371
+ )
372
+ logger.debug(f"Effective RRQ settings for worker: {rrq_settings}")
373
+
374
+ worker_instance = RRQWorker(
375
+ settings=rrq_settings,
376
+ job_registry=rrq_settings.job_registry,
377
+ queues=queues_arg,
378
+ burst=burst,
379
+ )
380
+
381
+ loop = asyncio.get_event_loop()
382
+ try:
383
+ logger.info("Starting worker run loop...")
384
+ loop.run_until_complete(worker_instance.run())
385
+ except KeyboardInterrupt:
386
+ logger.info("RRQ Worker run interrupted by user (KeyboardInterrupt).")
387
+ except Exception as e:
388
+ logger.error(f"Exception during RRQ Worker run: {e}", exc_info=True)
389
+ finally:
390
+ logger.info("RRQ Worker run finished or exited. Cleaning up event loop.")
391
+ if loop.is_running():
392
+ loop.run_until_complete(loop.shutdown_asyncgens())
393
+ loop.close()
394
+ logger.info("RRQ Worker has shut down.")
395
+
396
+
397
+ @worker_cli.command("watch")
398
+ @click.option(
399
+ "--path",
400
+ default=".",
401
+ type=click.Path(exists=True, dir_okay=True, file_okay=False, readable=True),
402
+ help="Directory path to watch for changes. Default is current directory.",
403
+ show_default=True,
404
+ )
405
+ @click.option(
406
+ "--settings",
407
+ "settings_object_path",
408
+ type=str,
409
+ required=False,
410
+ default=None,
411
+ help=(
412
+ "Python settings path for application worker settings "
413
+ "(e.g., myapp.worker_config.rrq_settings). "
414
+ "The specified settings object must define a `job_registry: JobRegistry`."
415
+ ),
416
+ )
417
+ @click.option(
418
+ "--queue",
419
+ "queues",
420
+ type=str,
421
+ multiple=True,
422
+ help="Queue(s) to poll when restarting worker. Defaults to settings.default_queue_name.",
423
+ )
424
+ def worker_watch_command(
425
+ path: str,
426
+ settings_object_path: str,
427
+ queues: tuple[str, ...],
428
+ ):
429
+ """Run the RRQ worker with auto-restart on file changes in PATH.
430
+ Requires an application-specific settings object.
431
+ """
432
+ # Run watch with optional queue filters
433
+ asyncio.run(
434
+ watch_rrq_worker_impl(
435
+ path,
436
+ settings_object_path=settings_object_path,
437
+ queues=list(queues) if queues else None,
438
+ )
439
+ )
440
+
441
+
442
+ # --- DLQ Requeue CLI Command (delegates to JobStore) ---
443
+
444
+
445
+ @rrq.command("check")
446
+ @click.option(
447
+ "--settings",
448
+ "settings_object_path",
449
+ type=str,
450
+ required=False,
451
+ default=None,
452
+ help=(
453
+ "Python settings path for application worker settings "
454
+ "(e.g., myapp.worker_config.rrq_settings). "
455
+ "Must include `job_registry: JobRegistry` to identify workers."
456
+ ),
457
+ )
458
+ def check_command(settings_object_path: str):
459
+ """Perform a health check on active RRQ worker(s).
460
+ Requires an application-specific settings object.
461
+ """
462
+ click.echo("Performing RRQ health check...")
463
+ healthy = asyncio.run(
464
+ check_health_async_impl(settings_object_path=settings_object_path)
465
+ )
466
+ if healthy:
467
+ click.echo(click.style("Health check PASSED.", fg="green"))
468
+ else:
469
+ click.echo(click.style("Health check FAILED.", fg="red"))
470
+ sys.exit(1)
471
+
472
+
473
+ @rrq.group("dlq")
474
+ def dlq_cli():
475
+ """Manage the Dead Letter Queue (DLQ)."""
476
+ pass
477
+
478
+
479
+ @dlq_cli.command("requeue")
480
+ @click.option(
481
+ "--settings",
482
+ "settings_object_path",
483
+ type=str,
484
+ required=False,
485
+ default=None,
486
+ help=(
487
+ "Python settings path for application worker settings "
488
+ "(e.g., myapp.worker_config.rrq_settings). "
489
+ "Must include `job_registry: JobRegistry` if requeueing requires handler resolution."
490
+ ),
491
+ )
492
+ @click.option(
493
+ "--dlq-name",
494
+ "dlq_name",
495
+ type=str,
496
+ required=False,
497
+ default=None,
498
+ help="Name of the DLQ (without prefix). Defaults to settings.default_dlq_name.",
499
+ )
500
+ @click.option(
501
+ "--queue",
502
+ "target_queue",
503
+ type=str,
504
+ required=False,
505
+ default=None,
506
+ help="Name of the target queue (without prefix). Defaults to settings.default_queue_name.",
507
+ )
508
+ @click.option(
509
+ "--limit",
510
+ type=int,
511
+ required=False,
512
+ default=None,
513
+ help="Maximum number of DLQ jobs to requeue; all if not set.",
514
+ )
515
+ def dlq_requeue_command(
516
+ settings_object_path: str,
517
+ dlq_name: str,
518
+ target_queue: str,
519
+ limit: int,
520
+ ):
521
+ """Requeue jobs from the dead letter queue back into a live queue."""
522
+ rrq_settings = _load_app_settings(settings_object_path)
523
+ dlq_to_use = dlq_name or rrq_settings.default_dlq_name
524
+ queue_to_use = target_queue or rrq_settings.default_queue_name
525
+ job_store = JobStore(settings=rrq_settings)
526
+ click.echo(
527
+ f"Requeuing jobs from DLQ '{dlq_to_use}' to queue '{queue_to_use}' (limit: {limit or 'all'})..."
528
+ )
529
+ count = asyncio.run(job_store.requeue_dlq(dlq_to_use, queue_to_use, limit))
530
+ click.echo(
531
+ f"Requeued {count} job(s) from DLQ '{dlq_to_use}' to queue '{queue_to_use}'."
532
+ )
rrq/client.py CHANGED
@@ -77,26 +77,44 @@ class RRQClient:
77
77
  The created Job object if successfully enqueued, or None if enqueueing was denied
78
78
  (e.g., due to a unique key conflict).
79
79
  """
80
- # print(
81
- # f"DEBUG RRQClient.enqueue: function_name='{function_name}', args={args}, kwargs={kwargs}"
82
- # ) # DEBUG
83
-
80
+ # Determine job ID and enqueue timestamp
84
81
  job_id_to_use = _job_id or str(uuid.uuid4())
82
+ enqueue_time_utc = datetime.now(UTC)
85
83
 
84
+ # Compute unique lock TTL: cover deferral window if any
85
+ lock_ttl_seconds = self.settings.default_unique_job_lock_ttl_seconds
86
+ if _defer_by is not None:
87
+ # Defer relative to now
88
+ defer_secs = max(0, int(_defer_by.total_seconds()))
89
+ lock_ttl_seconds = max(lock_ttl_seconds, defer_secs + 1)
90
+ elif _defer_until is not None:
91
+ # Defer until specific datetime
92
+ dt = _defer_until
93
+ # Normalize to UTC
94
+ if dt.tzinfo is None:
95
+ dt = dt.replace(tzinfo=UTC)
96
+ elif dt.tzinfo != UTC:
97
+ dt = dt.astimezone(UTC)
98
+ diff = (dt - enqueue_time_utc).total_seconds()
99
+ if diff > 0:
100
+ lock_ttl_seconds = max(lock_ttl_seconds, int(diff) + 1)
101
+
102
+ unique_acquired = False
103
+ # Acquire unique lock if requested, with TTL covering defer window
86
104
  if _unique_key:
87
105
  lock_acquired = await self.job_store.acquire_unique_job_lock(
88
106
  unique_key=_unique_key,
89
- job_id=job_id_to_use, # Store current job_id in lock for traceability
90
- lock_ttl_seconds=self.settings.default_unique_job_lock_ttl_seconds,
107
+ job_id=job_id_to_use,
108
+ lock_ttl_seconds=lock_ttl_seconds,
91
109
  )
92
110
  if not lock_acquired:
93
111
  logger.info(
94
112
  f"Job with unique key '{_unique_key}' already active or recently run. Enqueue denied."
95
113
  )
96
114
  return None
115
+ unique_acquired = True
97
116
 
98
117
  queue_name_to_use = _queue_name or self.settings.default_queue_name
99
- enqueue_time_utc = datetime.now(UTC)
100
118
 
101
119
  # Create the Job instance with all provided details and defaults
102
120
  job = Job(
@@ -126,9 +144,6 @@ class RRQClient:
126
144
  queue_name=queue_name_to_use, # Store the target queue name
127
145
  )
128
146
 
129
- # Save the full job definition
130
- await self.job_store.save_job_definition(job)
131
-
132
147
  # Determine the score for the sorted set (queue)
133
148
  # Score is a millisecond timestamp for when the job should be processed.
134
149
  score_dt = enqueue_time_utc # Default to immediate processing
@@ -145,13 +160,21 @@ class RRQClient:
145
160
  score_dt = score_dt.astimezone(UTC)
146
161
 
147
162
  score_timestamp_ms = int(score_dt.timestamp() * 1000)
148
-
149
- # Add the job ID to the processing queue
150
- await self.job_store.add_job_to_queue(
151
- queue_name_to_use,
152
- job.id,
153
- float(score_timestamp_ms), # Redis ZADD score must be float
154
- )
163
+ # Record when the job is next scheduled to run (for deferred execution)
164
+ job.next_scheduled_run_time = score_dt
165
+
166
+ # Save the full job definition and add to queue (ensure unique lock is released on error)
167
+ try:
168
+ await self.job_store.save_job_definition(job)
169
+ await self.job_store.add_job_to_queue(
170
+ queue_name_to_use,
171
+ job.id,
172
+ float(score_timestamp_ms),
173
+ )
174
+ except Exception:
175
+ if unique_acquired:
176
+ await self.job_store.release_unique_job_lock(_unique_key)
177
+ raise
155
178
 
156
179
  logger.debug(
157
180
  f"Enqueued job {job.id} ('{job.function_name}') to queue '{queue_name_to_use}' with score {score_timestamp_ms}"
rrq/constants.py CHANGED
@@ -15,6 +15,7 @@ DEFAULT_DLQ_NAME: str = "rrq:dlq:default"
15
15
  # Redis key prefixes
16
16
  JOB_KEY_PREFIX: str = "rrq:job:"
17
17
  QUEUE_KEY_PREFIX: str = "rrq:queue:" # For ZSETs holding job IDs
18
+ DLQ_KEY_PREFIX: str = "rrq:dlq:" # For lists holding Dead Letter Queue job IDs
18
19
  ACTIVE_JOBS_PREFIX: str = (
19
20
  "rrq:active:" # For lists of active jobs per worker (optional, for recovery)
20
21
  )
rrq/job.py CHANGED
@@ -53,8 +53,6 @@ class Job(BaseModel):
53
53
  default_factory=lambda: datetime.now(UTC),
54
54
  description="Timestamp (UTC) when the job was initially enqueued.",
55
55
  )
56
- # score: Optional[float] = None # The score in the ZSET, derived from defer_until/defer_by
57
- # Not stored in the job hash directly, but used for queueing.
58
56
 
59
57
  status: JobStatus = Field(
60
58
  default=JobStatus.PENDING, description="Current status of the job."
@@ -107,27 +105,3 @@ class Job(BaseModel):
107
105
  default=None,
108
106
  description="The name of the Dead Letter Queue this job will be moved to if it fails permanently.",
109
107
  )
110
-
111
- # For model_config to allow arbitrary types if result is complex and not Pydantic model
112
- # class Config:
113
- # arbitrary_types_allowed = True
114
-
115
- # def to_redis_hash(self) -> dict[str, Any]:
116
- # """Prepares the job model for storage as a Redis hash.
117
- # Pydantic's model_dump is good, but we might want to ensure all values are easily
118
- # storable as strings or simple types for Redis, or handle serialization here.
119
- # For now, model_dump with json_encoders should suffice with a good serializer.
120
- # """
121
- # # Using model_dump ensures that Pydantic models are properly serialized (e.g., datetimes to ISO strings)
122
- # # We will use a JSON serializer in JobStore that handles Pydantic models correctly.
123
- # return self.model_dump(exclude_none=True)
124
-
125
- # @classmethod
126
- # def from_redis_hash(cls, data: dict[str, Any]) -> "Job":
127
- # """Reconstructs a Job instance from data retrieved from a Redis hash."""""""""
128
- # # Pydantic will handle parsing basic types. Datetimes are expected to be ISO strings.
129
- # # Handle potential None values for args/kwargs if they were excluded from dump
130
- # # data.setdefault("args", None) # Removed
131
- # # data.setdefault("kwargs", None) # Removed
132
- # return cls(**data)
133
- pass # Add pass if class body becomes empty after removing methods, or remove if not needed
rrq/registry.py CHANGED
@@ -2,8 +2,6 @@
2
2
 
3
3
  from typing import Any, Callable, Optional
4
4
 
5
- # Potentially: from collections.abc import Callable if more specific async callable needed
6
-
7
5
 
8
6
  class JobRegistry:
9
7
  """Manages the registration and retrieval of job handler functions.
@@ -74,4 +72,3 @@ class JobRegistry:
74
72
 
75
73
 
76
74
  # Global instance for convenience, though applications might manage their own.
77
- # job_registry = JobRegistry()