FlowerPower 0.9.13.1__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. flowerpower/__init__.py +17 -2
  2. flowerpower/cfg/__init__.py +201 -149
  3. flowerpower/cfg/base.py +122 -24
  4. flowerpower/cfg/pipeline/__init__.py +254 -0
  5. flowerpower/cfg/pipeline/adapter.py +66 -0
  6. flowerpower/cfg/pipeline/run.py +40 -11
  7. flowerpower/cfg/pipeline/schedule.py +69 -79
  8. flowerpower/cfg/project/__init__.py +149 -0
  9. flowerpower/cfg/project/adapter.py +57 -0
  10. flowerpower/cfg/project/job_queue.py +165 -0
  11. flowerpower/cli/__init__.py +92 -37
  12. flowerpower/cli/job_queue.py +878 -0
  13. flowerpower/cli/mqtt.py +32 -1
  14. flowerpower/cli/pipeline.py +559 -406
  15. flowerpower/cli/utils.py +29 -18
  16. flowerpower/flowerpower.py +12 -8
  17. flowerpower/fs/__init__.py +20 -2
  18. flowerpower/fs/base.py +350 -26
  19. flowerpower/fs/ext.py +797 -216
  20. flowerpower/fs/storage_options.py +1097 -55
  21. flowerpower/io/base.py +13 -18
  22. flowerpower/io/loader/__init__.py +28 -0
  23. flowerpower/io/loader/deltatable.py +7 -10
  24. flowerpower/io/metadata.py +1 -0
  25. flowerpower/io/saver/__init__.py +28 -0
  26. flowerpower/io/saver/deltatable.py +4 -3
  27. flowerpower/job_queue/__init__.py +252 -0
  28. flowerpower/job_queue/apscheduler/__init__.py +11 -0
  29. flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
  30. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
  31. flowerpower/job_queue/apscheduler/manager.py +1063 -0
  32. flowerpower/job_queue/apscheduler/setup.py +524 -0
  33. flowerpower/job_queue/apscheduler/trigger.py +169 -0
  34. flowerpower/job_queue/apscheduler/utils.py +309 -0
  35. flowerpower/job_queue/base.py +382 -0
  36. flowerpower/job_queue/rq/__init__.py +10 -0
  37. flowerpower/job_queue/rq/_trigger.py +37 -0
  38. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
  39. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
  40. flowerpower/job_queue/rq/manager.py +1449 -0
  41. flowerpower/job_queue/rq/setup.py +150 -0
  42. flowerpower/job_queue/rq/utils.py +69 -0
  43. flowerpower/pipeline/__init__.py +5 -0
  44. flowerpower/pipeline/base.py +118 -0
  45. flowerpower/pipeline/io.py +407 -0
  46. flowerpower/pipeline/job_queue.py +505 -0
  47. flowerpower/pipeline/manager.py +1586 -0
  48. flowerpower/pipeline/registry.py +560 -0
  49. flowerpower/pipeline/runner.py +560 -0
  50. flowerpower/pipeline/visualizer.py +142 -0
  51. flowerpower/plugins/mqtt/__init__.py +12 -0
  52. flowerpower/plugins/mqtt/cfg.py +16 -0
  53. flowerpower/plugins/mqtt/manager.py +789 -0
  54. flowerpower/settings.py +110 -0
  55. flowerpower/utils/logging.py +21 -0
  56. flowerpower/utils/misc.py +57 -9
  57. flowerpower/utils/sql.py +122 -24
  58. flowerpower/utils/templates.py +2 -142
  59. flowerpower-1.0.0b1.dist-info/METADATA +324 -0
  60. flowerpower-1.0.0b1.dist-info/RECORD +94 -0
  61. flowerpower/_web/__init__.py +0 -61
  62. flowerpower/_web/routes/config.py +0 -103
  63. flowerpower/_web/routes/pipelines.py +0 -173
  64. flowerpower/_web/routes/scheduler.py +0 -136
  65. flowerpower/cfg/pipeline/tracker.py +0 -14
  66. flowerpower/cfg/project/open_telemetry.py +0 -8
  67. flowerpower/cfg/project/tracker.py +0 -11
  68. flowerpower/cfg/project/worker.py +0 -19
  69. flowerpower/cli/scheduler.py +0 -309
  70. flowerpower/cli/web.py +0 -44
  71. flowerpower/event_handler.py +0 -23
  72. flowerpower/mqtt.py +0 -609
  73. flowerpower/pipeline.py +0 -2499
  74. flowerpower/scheduler.py +0 -680
  75. flowerpower/tui.py +0 -79
  76. flowerpower/utils/datastore.py +0 -186
  77. flowerpower/utils/eventbroker.py +0 -127
  78. flowerpower/utils/executor.py +0 -58
  79. flowerpower/utils/trigger.py +0 -140
  80. flowerpower-0.9.13.1.dist-info/METADATA +0 -586
  81. flowerpower-0.9.13.1.dist-info/RECORD +0 -76
  82. /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
  83. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/WHEEL +0 -0
  84. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/entry_points.txt +0 -0
  85. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1449 @@
1
+ """
2
+ RQSchedulerBackend implementation for FlowerPower using RQ and rq-scheduler.
3
+
4
+ This module implements the scheduler backend using RQ (Redis Queue) and rq-scheduler.
5
+ """
6
+
7
+ import datetime as dt
8
+ import multiprocessing
9
+ import platform
10
+ import sys
11
+ import time
12
+ import uuid
13
+ from typing import Any, Callable
14
+ import duration_parser
15
+ from cron_descriptor import get_description
16
+ from humanize import precisedelta
17
+ from loguru import logger
18
+ from rq import Queue, Repeat, Retry
19
+ from rq.job import Job
20
+ from rq.results import Result
21
+ from rq.worker import Worker
22
+ from rq.worker_pool import WorkerPool
23
+ from rq_scheduler import Scheduler
24
+
25
+ from ...fs import AbstractFileSystem
26
+ from ...utils.logging import setup_logging
27
+ from ..base import BaseJobQueueManager
28
+ from .setup import RQBackend
29
+
30
+ setup_logging()
31
+
32
+ if sys.platform == "darwin" and platform.machine() == "arm64":
33
+ try:
34
+ # Check if the start method has already been set to avoid errors
35
+ if multiprocessing.get_start_method(allow_none=True) is None:
36
+ multiprocessing.set_start_method("fork")
37
+ logger.debug("Set multiprocessing start method to 'fork' for macOS ARM.")
38
+ elif multiprocessing.get_start_method() != "fork":
39
+ logger.warning(
40
+ f"Multiprocessing start method already set to '{multiprocessing.get_start_method()}'. "
41
+ f"Cannot set to 'fork'. This might cause issues on macOS ARM."
42
+ )
43
+ except RuntimeError as e:
44
+ # Handle cases where the context might already be started
45
+ logger.warning(f"Could not set multiprocessing start method to 'fork': {e}")
46
+
47
+
48
+ class RQManager(BaseJobQueueManager):
49
+ """Implementation of BaseScheduler using Redis Queue (RQ) and rq-scheduler.
50
+
51
+ This worker class uses RQ and rq-scheduler as the backend to manage jobs and schedules.
52
+ It supports multiple queues, background workers, and job scheduling capabilities.
53
+
54
+ Typical usage:
55
+ ```python
56
+ worker = RQManager(name="my_rq_worker")
57
+ worker.start_worker(background=True)
58
+
59
+ # Add a job
60
+ def my_job(x: int) -> int:
61
+ return x * 2
62
+
63
+ job_id = worker.add_job(my_job, func_args=(10,))
64
+ ```
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ name: str = "rq_scheduler",
70
+ base_dir: str | None = None,
71
+ backend: RQBackend | None = None,
72
+ storage_options: dict[str, Any] | None = None,
73
+ fs: AbstractFileSystem | None = None,
74
+ log_level: str | None = None,
75
+ ):
76
+ """Initialize the RQ scheduler backend.
77
+
78
+ Args:
79
+ name: Name of the scheduler instance. Used for identification in logs
80
+ and queue names.
81
+ base_dir: Base directory for the FlowerPower project. Used for finding
82
+ configuration files.
83
+ backend: RQBackend instance for Redis connection configuration.
84
+ If None, configuration is loaded from project settings.
85
+ storage_options: Options for configuring file system storage access.
86
+ Example: {"mode": "async", "root": "/tmp"}
87
+ fs: Custom filesystem implementation for storage operations.
88
+ log_level: Logging level to use for this worker instance.
89
+ Example: "DEBUG", "INFO", "WARNING", etc.
90
+
91
+ Raises:
92
+ RuntimeError: If backend setup fails due to Redis connection issues
93
+ or missing configurations.
94
+ ImportError: If required dependencies are not installed.
95
+
96
+ Example:
97
+ ```python
98
+ # Basic initialization
99
+ worker = RQManager(name="my_worker")
100
+
101
+ # With custom backend and logging
102
+ backend = RQBackend(
103
+ uri="redis://localhost:6379/0",
104
+ queues=["high", "default", "low"]
105
+ )
106
+ worker = RQManager(
107
+ name="custom_worker",
108
+ backend=backend,
109
+ log_level="DEBUG"
110
+ )
111
+ ```
112
+ """
113
+ if log_level:
114
+ setup_logging(level=log_level)
115
+ self._log_level = log_level or "INFO"
116
+
117
+ super().__init__(
118
+ type="rq",
119
+ name=name,
120
+ base_dir=base_dir,
121
+ backend=backend,
122
+ fs=fs,
123
+ storage_options=storage_options,
124
+ )
125
+
126
+ if self._backend is None:
127
+ self._setup_backend()
128
+
129
+ redis_conn = self._backend.client
130
+ self._queues = {}
131
+
132
+ self._queue_names = self._backend.queues # [:-1]
133
+ for queue_name in self._queue_names:
134
+ queue = Queue(name=queue_name, connection=redis_conn)
135
+ self._queues[queue_name] = queue
136
+ self._queues[queue_name].log = logger
137
+ logger.debug(f"Created queue and scheduler for '{queue_name}'")
138
+
139
+ self._scheduler_name = self._backend.queues[-1]
140
+ self._scheduler = Scheduler(
141
+ connection=redis_conn, queue_name=self._backend.queues[-1], interval=60
142
+ )
143
+ self._scheduler.log = logger
144
+
145
+ def _setup_backend(self) -> None:
146
+ """Set up the Redis backend for the scheduler.
147
+
148
+ This internal method initializes the Redis connection and queues based on
149
+ project configuration. It validates configuration, handles errors, and logs
150
+ the setup process.
151
+
152
+ Raises:
153
+ RuntimeError: If Redis connection fails or configuration is invalid.
154
+ """
155
+ backend_cfg = getattr(self.cfg, "backend", None)
156
+ if not backend_cfg:
157
+ logger.error(
158
+ "Backend configuration is missing in project.worker.rq_backend.backend."
159
+ )
160
+ raise RuntimeError("Backend configuration is missing.")
161
+ try:
162
+ self._backend = RQBackend(**backend_cfg.to_dict())
163
+ logger.info(
164
+ f"RQ backend setup successful (type: {self._backend.type}, uri: {self._backend.uri})"
165
+ )
166
+ except Exception as exc:
167
+ logger.exception(
168
+ f"Failed to set up RQ backend (type: {getattr(self._backend, 'type', None)}, uri: {getattr(self._backend, 'uri', None)}): {exc}"
169
+ )
170
+ raise RuntimeError(f"Failed to set up RQ backend: {exc}") from exc
171
+
172
+ def start_worker(
173
+ self,
174
+ background: bool = False,
175
+ queue_names: list[str] | None = None,
176
+ with_scheduler: bool = True,
177
+ **kwargs: Any,
178
+ ) -> None:
179
+ """Start a worker process for processing jobs from the queues.
180
+
181
+ Args:
182
+ background: If True, runs the worker in a non-blocking background mode.
183
+ If False, runs in the current process and blocks until stopped.
184
+ queue_names: List of queue names to process. If None, processes all
185
+ queues defined in the backend configuration.
186
+ with_scheduler: Whether to include the scheduler queue for processing
187
+ scheduled jobs.
188
+ **kwargs: Additional arguments passed to RQ's Worker class.
189
+ Example: {"burst": True, "logging_level": "INFO", "job_monitoring_interval": 30}
190
+
191
+ Raises:
192
+ RuntimeError: If worker fails to start or if Redis connection fails.
193
+
194
+ Example:
195
+ ```python
196
+ # Start worker in background processing all queues
197
+ worker.start_worker(background=True)
198
+
199
+ # Start worker for specific queues
200
+ worker.start_worker(
201
+ background=True,
202
+ queue_names=["high", "default"],
203
+ with_scheduler=False
204
+ )
205
+
206
+ # Start worker with custom settings
207
+ worker.start_worker(
208
+ background=True,
209
+ max_jobs=100,
210
+ job_monitoring_interval=30
211
+ )
212
+ ```
213
+ """
214
+ import multiprocessing
215
+
216
+ logging_level = kwargs.pop("logging_level", self._log_level)
217
+ burst = kwargs.pop("burst", False)
218
+ max_jobs = kwargs.pop("max_jobs", None)
219
+ # Determine which queues to process
220
+ if queue_names is None:
221
+ # Use all queues by default
222
+ queue_names = self._queue_names
223
+ queue_names_str = ", ".join(queue_names)
224
+ else:
225
+ # Filter to only include valid queue names
226
+ queue_names = [name for name in queue_names if name in self._queue_names]
227
+ queue_names_str = ", ".join(queue_names)
228
+
229
+ if not queue_names:
230
+ logger.error("No valid queues specified, cannot start worker")
231
+ return
232
+
233
+ if with_scheduler:
234
+ # Add the scheduler queue to the list of queues
235
+ queue_names.append(self._scheduler_name)
236
+ queue_names_str = ", ".join(queue_names)
237
+
238
+ # Create a worker instance with queue names (not queue objects)
239
+ worker = Worker(queue_names, connection=self._backend.client, **kwargs)
240
+
241
+ if background:
242
+ # We need to use a separate process rather than a thread because
243
+ # RQ's signal handler registration only works in the main thread
244
+ def run_worker_process(queue_names_arg):
245
+ # Import RQ inside the process to avoid connection sharing issues
246
+ from redis import Redis
247
+ from rq import Worker
248
+
249
+ # Create a fresh Redis connection in this process
250
+ redis_conn = Redis.from_url(self._backend.uri)
251
+
252
+ # Create a worker instance with queue names
253
+ worker_proc = Worker(queue_names_arg, connection=redis_conn)
254
+
255
+ # Disable the default signal handlers in RQ worker by patching
256
+ # the _install_signal_handlers method to do nothing
257
+ worker_proc._install_signal_handlers = lambda: None
258
+
259
+ # Work until terminated
260
+ worker_proc.work(
261
+ with_scheduler=True,
262
+ logging_level=logging_level,
263
+ burst=burst,
264
+ max_jobs=max_jobs,
265
+ )
266
+
267
+ # Create and start the process
268
+ process = multiprocessing.Process(
269
+ target=run_worker_process,
270
+ args=(queue_names,),
271
+ name=f"rq-worker-{self.name}",
272
+ )
273
+ # Don't use daemon=True to avoid the "daemonic processes are not allowed to have children" error
274
+ process.start()
275
+ self._worker_process = process
276
+ logger.info(
277
+ f"Started RQ worker in background process (PID: {process.pid}) for queues: {queue_names_str}"
278
+ )
279
+ else:
280
+ # Start worker in the current process (blocking)
281
+ logger.info(
282
+ f"Starting RQ worker in current process (blocking) for queues: {queue_names_str}"
283
+ )
284
+ worker.work(
285
+ with_scheduler=True,
286
+ logging_level=logging_level,
287
+ burst=burst,
288
+ max_jobs=max_jobs,
289
+ )
290
+
291
+ def stop_worker(self) -> None:
292
+ """Stop the worker process.
293
+
294
+ This method stops the worker process if running in background mode and
295
+ performs cleanup. It should be called before program exit.
296
+
297
+ Example:
298
+ ```python
299
+ try:
300
+ worker.start_worker(background=True)
301
+ # ... do work ...
302
+ finally:
303
+ worker.stop_worker()
304
+ ```
305
+ """
306
+ if hasattr(self, "_worker_process") and self._worker_process is not None:
307
+ if self._worker_process.is_alive():
308
+ self._worker_process.terminate()
309
+ self._worker_process.join(timeout=5)
310
+ logger.info("RQ worker process terminated")
311
+ self._worker_process = None
312
+ else:
313
+ logger.warning("No worker process to stop")
314
+
315
+ def start_worker_pool(
316
+ self,
317
+ num_workers: int | None = None,
318
+ background: bool = False,
319
+ queue_names: list[str] | None = None,
320
+ with_scheduler: bool = True,
321
+ **kwargs: Any,
322
+ ) -> None:
323
+ """Start a pool of worker processes to handle jobs in parallel.
324
+
325
+ This implementation uses RQ's WorkerPool class which provides robust worker
326
+ management with proper monitoring and graceful shutdown.
327
+
328
+ Args:
329
+ num_workers: Number of worker processes to start. If None, uses CPU
330
+ count or configuration value.
331
+ background: If True, runs the worker pool in background mode.
332
+ If False, runs in the current process and blocks.
333
+ queue_names: List of queue names to process. If None, processes all
334
+ queues defined in the backend configuration.
335
+ with_scheduler: Whether to include the scheduler queue for processing
336
+ scheduled jobs.
337
+ **kwargs: Additional arguments passed to RQ's WorkerPool class.
338
+ Example: {"max_jobs": 100, "job_monitoring_interval": 30}
339
+
340
+ Raises:
341
+ RuntimeError: If worker pool fails to start or Redis connection fails.
342
+
343
+ Example:
344
+ ```python
345
+ # Start pool with default settings
346
+ worker.start_worker_pool(num_workers=4, background=True)
347
+
348
+ # Start pool for specific queues
349
+ worker.start_worker_pool(
350
+ num_workers=4,
351
+ background=True,
352
+ queue_names=["high", "default"],
353
+ with_scheduler=False
354
+ )
355
+
356
+ # Start pool with custom settings
357
+ worker.start_worker_pool(
358
+ num_workers=4,
359
+ background=True,
360
+ max_jobs=100,
361
+ job_monitoring_interval=30
362
+ )
363
+ ```
364
+ """
365
+ import multiprocessing
366
+
367
+ logging_level = kwargs.pop("logging_level", self._log_level)
368
+ burst = kwargs.pop("burst", False)
369
+ max_jobs = kwargs.pop("max_jobs", None)
370
+
371
+ if num_workers is None:
372
+ backend = getattr(self.cfg, "rq_backend", None)
373
+ if backend is not None:
374
+ num_workers = getattr(backend, "num_workers", None)
375
+ if num_workers is None:
376
+ num_workers = multiprocessing.cpu_count()
377
+ # Determine which queues to process
378
+ if queue_names is None:
379
+ # Use all queues by default
380
+ queue_list = self._queue_names
381
+ queue_names_str = ", ".join(queue_list)
382
+ else:
383
+ # Filter to only include valid queue names
384
+ queue_list = [name for name in queue_names if name in self._queue_names]
385
+ queue_names_str = ", ".join(queue_list)
386
+
387
+ if not queue_list:
388
+ logger.error("No valid queues specified, cannot start worker pool")
389
+ return
390
+ if with_scheduler:
391
+ # Add the scheduler queue to the list of queues
392
+ queue_list.append(self._scheduler_name)
393
+ queue_names_str = ", ".join(queue_list)
394
+
395
+ # Initialize RQ's WorkerPool
396
+ worker_pool = WorkerPool(
397
+ queues=queue_list,
398
+ connection=self._backend.client,
399
+ num_workers=num_workers,
400
+ **kwargs,
401
+ )
402
+ # worker_pool.log = logger
403
+
404
+ self._worker_pool = worker_pool
405
+
406
+ if background:
407
+ # Start the worker pool process using multiprocessing to avoid signal handler issues
408
+ def run_pool_process():
409
+ worker_pool.start(
410
+ burst=burst, logging_level=logging_level, max_jobs=max_jobs
411
+ )
412
+
413
+ self._pool_process = multiprocessing.Process(
414
+ target=run_pool_process,
415
+ name=f"rq-worker-pool-{self.name}",
416
+ )
417
+ self._pool_process.start()
418
+ logger.info(
419
+ f"Worker pool started with {num_workers} workers across queues: {queue_names_str} in background process (PID: {self._pool_process.pid})"
420
+ )
421
+ else:
422
+ # Start the worker pool in the current process (blocking)
423
+ logger.info(
424
+ f"Starting worker pool with {num_workers} workers across queues: {queue_names_str} in foreground (blocking)"
425
+ )
426
+ worker_pool.start(burst=burst, logging_level=logging_level)
427
+
428
+ def stop_worker_pool(self) -> None:
429
+ """Stop all worker processes in the pool.
430
+
431
+ This method stops all worker processes in the pool and performs cleanup.
432
+ It ensures a graceful shutdown of all workers.
433
+
434
+ Example:
435
+ ```python
436
+ try:
437
+ worker.start_worker_pool(num_workers=4, background=True)
438
+ # ... do work ...
439
+ finally:
440
+ worker.stop_worker_pool()
441
+ ```
442
+ """
443
+ if hasattr(self, "_worker_pool"):
444
+ logger.info("Stopping RQ worker pool")
445
+ self._worker_pool.stop_workers()
446
+
447
+ if hasattr(self, "_pool_process") and self._pool_process.is_alive():
448
+ # Terminate the worker pool process
449
+ self._pool_process.terminate()
450
+ self._pool_process.join(timeout=10)
451
+ if self._pool_process.is_alive():
452
+ logger.warning(
453
+ "Worker pool process did not terminate within timeout"
454
+ )
455
+
456
+ self._worker_pool = None
457
+
458
+ if hasattr(self, "_pool_process"):
459
+ self._pool_process = None
460
+ else:
461
+ logger.warning("No worker pool to stop")
462
+
463
+ def start_scheduler(self, background: bool = False, interval: int = 60) -> None:
464
+ """Start the RQ scheduler process.
465
+
466
+ The scheduler process manages scheduled and recurring jobs. It must be
467
+ running for scheduled jobs to execute.
468
+
469
+ Args:
470
+ background: If True, runs the scheduler in a non-blocking background mode.
471
+ If False, runs in the current process and blocks.
472
+ interval: How often to check for scheduled jobs, in seconds.
473
+
474
+ Raises:
475
+ RuntimeError: If scheduler fails to start or Redis connection fails.
476
+
477
+ Example:
478
+ ```python
479
+ # Start scheduler in background checking every 30 seconds
480
+ worker.start_scheduler(background=True, interval=30)
481
+
482
+ # Start scheduler in foreground (blocking)
483
+ worker.start_scheduler(background=False)
484
+ ```
485
+ """
486
+ # Create a scheduler instance with the queue name
487
+ if not hasattr(self, "_scheduler"):
488
+ self._scheduler = Scheduler(
489
+ connection=self._backend.client,
490
+ queue_name=self._backend.queues[-1],
491
+ interval=interval,
492
+ )
493
+ self._scheduler.log = logger
494
+
495
+ elif self._scheduler._interval != interval:
496
+ self._scheduler = Scheduler(
497
+ connection=self._backend.client,
498
+ queue_name=self._backend.queues[-1],
499
+ interval=interval,
500
+ )
501
+ self._scheduler.log = logger
502
+
503
+ if background:
504
+
505
+ def run_scheduler():
506
+ self._scheduler.run()
507
+
508
+ self._scheduler_process = multiprocessing.Process(
509
+ target=run_scheduler, name=f"rq-scheduler-{self.name}"
510
+ )
511
+ self._scheduler_process.start()
512
+ logger.info(
513
+ f"Started RQ scheduler in background (PID: {self._scheduler_process.pid})"
514
+ )
515
+ else:
516
+ logger.info("Starting RQ scheduler in current process (blocking)")
517
+ self._scheduler.run()
518
+
519
+ def stop_scheduler(self) -> None:
520
+ """Stop the RQ scheduler process.
521
+
522
+ This method stops the scheduler process if running in background mode
523
+ and performs cleanup.
524
+
525
+ Example:
526
+ ```python
527
+ try:
528
+ worker.start_scheduler(background=True)
529
+ # ... do work ...
530
+ finally:
531
+ worker.stop_scheduler()
532
+ ```
533
+ """
534
+ if hasattr(self, "_scheduler_process") and self._scheduler_process is not None:
535
+ if self._scheduler_process.is_alive():
536
+ self._scheduler_process.terminate()
537
+ self._scheduler_process.join(timeout=5)
538
+ logger.info("RQ scheduler process terminated")
539
+ self._scheduler_process = None
540
+ else:
541
+ logger.warning("No scheduler process to stop")
542
+
543
+ ## Jobs ###
544
+
545
+ def add_job(
546
+ self,
547
+ func: Callable,
548
+ func_args: tuple | None = None,
549
+ func_kwargs: dict[str, Any] | None = None,
550
+ job_id: str | None = None,
551
+ result_ttl: float | dt.timedelta | None = None,
552
+ ttl: float | dt.timedelta | None = None,
553
+ queue_name: str | None = None,
554
+ run_at: dt.datetime | str | None = None,
555
+ run_in: dt.timedelta | int | str | None = None,
556
+ retry: int | dict | None = None,
557
+ repeat: int | dict | None = None,
558
+ meta: dict | None = None,
559
+ **job_kwargs,
560
+ ) -> Job:
561
+ """Add a job for immediate or scheduled execution.
562
+
563
+ Args:
564
+ func: Function to execute. Must be importable from the worker process.
565
+ func_args: Positional arguments to pass to the function.
566
+ func_kwargs: Keyword arguments to pass to the function.
567
+ job_id: Optional unique identifier for the job. If None, a UUID is generated.
568
+ result_ttl: Time to live for the job result, as seconds or timedelta.
569
+ After this time, the result may be removed from Redis.
570
+ ttl: Maximum time the job can exist in Redis, as seconds or timedelta.
571
+ After this time, the job will be removed even if not complete.
572
+ queue_name: Name of the queue to place the job in. If None, uses the
573
+ first queue from configuration.
574
+ run_at: Schedule the job to run at a specific datetime.
575
+ run_in: Schedule the job to run after a delay.
576
+ retry: Number of retries or retry configuration dictionary.
577
+ Example dict: {"max": 3, "interval": 60}
578
+ repeat: Number of repetitions or repeat configuration dictionary.
579
+ Example dict: {"max": 5, "interval": 3600}
580
+ meta: Additional metadata to store with the job.
581
+ **job_kwargs: Additional arguments for RQ's Job class.
582
+
583
+ Returns:
584
+ Job: The created job instance.
585
+
586
+ Raises:
587
+ ValueError: If the function is not serializable or arguments are invalid.
588
+ RuntimeError: If Redis connection fails.
589
+
590
+ Example:
591
+ ```python
592
+ def my_task(x: int, y: int = 0) -> int:
593
+ return x + y
594
+
595
+ # Add immediate job
596
+ job = worker.add_job(
597
+ my_task,
598
+ func_args=(1,),
599
+ func_kwargs={"y": 2},
600
+ result_ttl=3600 # Keep result for 1 hour
601
+ )
602
+
603
+ # Add scheduled job
604
+ tomorrow = dt.datetime.now() + dt.timedelta(days=1)
605
+ job = worker.add_job(
606
+ my_task,
607
+ func_args=(1, 2),
608
+ run_at=tomorrow,
609
+ queue_name="scheduled"
610
+ )
611
+
612
+ # Add job with retries
613
+ job = worker.add_job(
614
+ my_task,
615
+ func_args=(1, 2),
616
+ retry={"max": 3, "interval": 60} # 3 retries, 1 minute apart
617
+ )
618
+
619
+ # Add repeating job
620
+ job = worker.add_job(
621
+ my_task,
622
+ func_args=(1, 2),
623
+ repeat={"max": 5, "interval": 3600} # 5 times, hourly
624
+ )
625
+ ```
626
+ """
627
+ job_id = job_id or str(uuid.uuid4())
628
+ if isinstance(result_ttl, (int, float)):
629
+ result_ttl = dt.timedelta(seconds=result_ttl)
630
+ # args = args or ()
631
+ # kwargs = kwargs or {}
632
+ if queue_name is None:
633
+ queue_name = self._queue_names[0]
634
+ elif queue_name not in self._queue_names:
635
+ logger.warning(
636
+ f"Queue '{queue_name}' not found, using '{self._queue_names[0]}'"
637
+ )
638
+ queue_name = self._queue_names[0]
639
+
640
+ if repeat:
641
+ # If repeat is an int, convert it to a Repeat instance
642
+ if isinstance(repeat, int):
643
+ repeat = Repeat(max=repeat)
644
+ elif isinstance(repeat, dict):
645
+ # If repeat is a dict, convert it to a Repeat instance
646
+ repeat = Repeat(**repeat)
647
+ else:
648
+ raise ValueError("Invalid repeat value. Must be int or dict.")
649
+ if retry:
650
+ if isinstance(retry, int):
651
+ retry = Retry(max=retry)
652
+ elif isinstance(retry, dict):
653
+ # If retry is a dict, convert it to a Retry instance
654
+ retry = Retry(**retry)
655
+ else:
656
+ raise ValueError("Invalid retry value. Must be int or dict.")
657
+
658
+ queue = self._queues[queue_name]
659
+ if run_at:
660
+ # Schedule the job to run at a specific time
661
+ run_at = dt.datetime.fromisoformat(run_at) if isinstance(run_at, str) else run_at
662
+ job = queue.enqueue_at(
663
+ run_at,
664
+ func,
665
+ args=func_args,
666
+ kwargs=func_kwargs,
667
+ job_id=job_id,
668
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
669
+ ttl=int(ttl.total_seconds()) if ttl else None,
670
+ retry=retry,
671
+ repeat=repeat,
672
+ meta=meta,
673
+ **job_kwargs,
674
+ )
675
+ logger.info(
676
+ f"Enqueued job {job.id} ({func.__name__}) on queue '{queue_name}'. Scheduled to run at {run_at}."
677
+ )
678
+ elif run_in:
679
+ # Schedule the job to run after a delay
680
+ run_in = duration_parser.parse(run_in) if isinstance(run_in, str) else run_in
681
+ run_in = dt.timedelta(seconds=run_in) if isinstance(run_in, (int, float)) else run_in
682
+ job = queue.enqueue_in(
683
+ run_in,
684
+ func,
685
+ args=func_args,
686
+ kwargs=func_kwargs,
687
+ job_id=job_id,
688
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
689
+ ttl=int(ttl.total_seconds()) if ttl else None,
690
+ retry=retry,
691
+ repeat=repeat,
692
+ meta=meta,
693
+ **job_kwargs,
694
+ )
695
+ logger.info(
696
+ f"Enqueued job {job.id} ({func.__name__}) on queue '{queue_name}'. Scheduled to run in {precisedelta(run_in)}."
697
+ )
698
+ else:
699
+ # Enqueue the job for immediate execution
700
+ job = queue.enqueue(
701
+ func,
702
+ args=func_args,
703
+ kwargs=func_kwargs,
704
+ job_id=job_id,
705
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
706
+ ttl=int(ttl.total_seconds()) if ttl else None,
707
+ retry=retry,
708
+ repeat=repeat,
709
+ meta=meta,
710
+ **job_kwargs,
711
+ )
712
+ logger.info(
713
+ f"Enqueued job {job.id} ({func.__name__}) on queue '{queue_name}'"
714
+ )
715
+ return job
716
+
717
+ def run_job(
718
+ self,
719
+ func: Callable,
720
+ func_args: tuple | None = None,
721
+ func_kwargs: dict[str, Any] | None = None,
722
+ job_id: str | None = None,
723
+ result_ttl: float | dt.timedelta | None = None,
724
+ ttl: float | dt.timedelta | None = None,
725
+ queue_name: str | None = None,
726
+ retry: int | dict | None = None,
727
+ repeat: int | dict | None = None,
728
+ meta: dict | None = None,
729
+ **job_kwargs,
730
+ ) -> Any:
731
+ """Run a job immediately and return its result.
732
+
733
+ This method is a wrapper around add_job that waits for the job to complete
734
+ and returns its result.
735
+
736
+ Args:
737
+ func: Function to execute. Must be importable from the worker process.
738
+ func_args: Positional arguments to pass to the function.
739
+ func_kwargs: Keyword arguments to pass to the function.
740
+ job_id: Optional unique identifier for the job.
741
+ result_ttl: Time to live for the job result.
742
+ ttl: Maximum time the job can exist.
743
+ queue_name: Name of the queue to use.
744
+ retry: Number of retries or retry configuration.
745
+ repeat: Number of repetitions or repeat configuration.
746
+ meta: Additional metadata to store with the job.
747
+ **job_kwargs: Additional arguments for RQ's Job class.
748
+
749
+ Returns:
750
+ Any: The result returned by the executed function.
751
+
752
+ Raises:
753
+ Exception: Any exception raised by the executed function.
754
+ TimeoutError: If the job times out before completion.
755
+
756
+ Example:
757
+ ```python
758
+ def add(x: int, y: int) -> int:
759
+ return x + y
760
+
761
+ # Run job and get result immediately
762
+ result = worker.run_job(
763
+ add,
764
+ func_args=(1, 2),
765
+ retry=3 # Retry up to 3 times on failure
766
+ )
767
+ assert result == 3
768
+ ```
769
+ """
770
+ job = self.add_job(
771
+ func=func,
772
+ func_args=func_args,
773
+ func_kwargs=func_kwargs,
774
+ job_id=job_id,
775
+ result_ttl=result_ttl,
776
+ ttl=ttl,
777
+ queue_name=queue_name,
778
+ retry=retry,
779
+ repeat=repeat,
780
+ meta=meta,
781
+ **job_kwargs,
782
+ )
783
+ while not job.is_finished:
784
+ job.refresh()
785
+ time.sleep(0.1)
786
+ return job.result
787
+
788
+ def _get_job_queue_name(self, job: str | Job) -> str | None:
789
+ """Get the queue name for a job.
790
+
791
+ Args:
792
+ job: Job ID or Job object.
793
+
794
+ Returns:
795
+ str | None: Name of the queue containing the job, or None if not found.
796
+ """
797
+ job_id = job if isinstance(job, str) else job.id
798
+ for queue_name in self.job_ids:
799
+ if job_id in self.job_ids[queue_name]:
800
+ return queue_name
801
+ return None
802
+
803
+ def get_jobs(
804
+ self, queue_name: str | list[str] | None = None
805
+ ) -> dict[str, list[Job]]:
806
+ """Get all jobs from specified queues.
807
+
808
+ Args:
809
+ queue_name: Optional queue name or list of queue names to get jobs from.
810
+ If None, gets jobs from all queues.
811
+
812
+ Returns:
813
+ dict[str, list[Job]]: Dictionary mapping queue names to lists of jobs.
814
+
815
+ Example:
816
+ ```python
817
+ # Get jobs from all queues
818
+ jobs = worker.get_jobs()
819
+ for queue_name, queue_jobs in jobs.items():
820
+ print(f"Queue {queue_name}: {len(queue_jobs)} jobs")
821
+
822
+ # Get jobs from specific queues
823
+ jobs = worker.get_jobs(["high", "default"])
824
+ ```
825
+ """
826
+ if queue_name is None:
827
+ queue_name = self._queue_names
828
+ elif isinstance(queue_name, str):
829
+ queue_name = [queue_name]
830
+ jobs = {
831
+ queue_name: self._queues[queue_name].get_jobs() for queue_name in queue_name
832
+ }
833
+ return jobs
834
+
835
+ def get_job(self, job_id: str) -> Job | None:
836
+ """Get a specific job by its ID.
837
+
838
+ Args:
839
+ job_id: Unique identifier of the job to retrieve.
840
+
841
+ Returns:
842
+ Job | None: The job object if found, None otherwise.
843
+
844
+ Example:
845
+ ```python
846
+ job = worker.get_job("550e8400-e29b-41d4-a716-446655440000")
847
+ if job:
848
+ print(f"Job status: {job.get_status()}")
849
+ ```
850
+ """
851
+ queue_name = self._get_job_queue_name(job=job_id)
852
+ if queue_name is None:
853
+ logger.error(f"Job {job_id} not found in any queue")
854
+ return None
855
+ job = self._queues[queue_name].fetch_job(job_id)
856
+ if job is None:
857
+ logger.error(f"Job {job_id} not found in queue '{queue_name}'")
858
+ return None
859
+ return job
860
+
861
+ def get_job_result(self, job: str | Job, delete_result: bool = False) -> Any:
862
+ """Get the result of a completed job.
863
+
864
+ Args:
865
+ job: Job ID or Job object.
866
+ delete_result: If True, deletes the job and its result after retrieval.
867
+
868
+ Returns:
869
+ Any: The result of the job if available.
870
+
871
+ Example:
872
+ ```python
873
+ # Get result and keep the job
874
+ result = worker.get_job_result("job-123")
875
+
876
+ # Get result and clean up
877
+ result = worker.get_job_result("job-123", delete_result=True)
878
+ ```
879
+ """
880
+ if isinstance(job, str):
881
+ job = self.get_job(job_id=job)
882
+
883
+ if job is None:
884
+ logger.error(f"Job {job} not found in any queue")
885
+ return None
886
+
887
+ if delete_result:
888
+ self.delete_job(job)
889
+
890
+ return job.result
891
+
892
+ def cancel_job(self, job: str | Job) -> bool:
893
+ """
894
+ Cancel a job in the queue.
895
+
896
+ Args:
897
+ job: Job ID or Job object
898
+
899
+ Returns:
900
+ bool: True if the job was canceled, False otherwise
901
+ """
902
+ if isinstance(job, str):
903
+ job = self.get_job(job_id=job)
904
+ if job is None:
905
+ logger.error(f"Job {job} not found in any queue")
906
+ return False
907
+
908
+ job.cancel()
909
+ logger.info(f"Canceled job {job.id} from queue '{job.origin}'")
910
+ return True
911
+
912
+ def delete_job(self, job: str | Job, ttl: int = 0, **kwargs) -> bool:
913
+ """
914
+ Remove a job from the queue.
915
+
916
+ Args:
917
+ job: Job ID or Job object
918
+ ttl: Optional time to live for the job (in seconds). 0 means no TTL.
919
+ Remove the job immediately.
920
+ **kwargs: Additional parameters for the job removal
921
+
922
+ Returns:
923
+ bool: True if the job was removed, False otherwise
924
+ """
925
+ if isinstance(job, str):
926
+ job = self.get_job(job)
927
+ if job is None:
928
+ return False
929
+ if ttl:
930
+ job.cleanup(ttl=ttl, **kwargs)
931
+ logger.info(
932
+ f"Removed job {job.id} from queue '{job.origin}' with TTL {ttl}"
933
+ )
934
+ else:
935
+ job.delete(**kwargs)
936
+ logger.info(f"Removed job {job.id} from queue '{job.origin}'")
937
+
938
+ return True
939
+
940
+ def cancel_all_jobs(self, queue_name: str | None = None) -> None:
941
+ """
942
+ Cancel all jobs in a queue.
943
+
944
+ Args:
945
+ queue_name (str | None): Optional name of the queue to cancel jobs from.
946
+ If None, cancels jobs from all queues.
947
+ """
948
+ if queue_name is None:
949
+ queue_name = self._queue_names
950
+ elif isinstance(queue_name, str):
951
+ queue_name = [queue_name]
952
+
953
+ for queue_name in queue_name:
954
+ if queue_name not in self._queue_names:
955
+ logger.warning(f"Queue '{queue_name}' not found, skipping")
956
+ continue
957
+
958
+ for job in self.get_jobs(queue_name=queue_name):
959
+ self.cancel_job(job)
960
+
961
+ def delete_all_jobs(self, queue_name: str | None = None, ttl: int = 0) -> None:
962
+ """
963
+ Remove all jobs from a queue.
964
+
965
+ Args:
966
+ queue_name (str | None): Optional name of the queue to remove jobs from.
967
+ If None, removes jobs from all queues.
968
+ ttl: Optional time to live for the job (in seconds). 0 means no TTL.
969
+ Remove the job immediately.
970
+
971
+ """
972
+ if queue_name is None:
973
+ queue_name = self._queue_names
974
+ elif isinstance(queue_name, str):
975
+ queue_name = [queue_name]
976
+
977
+ for queue_name in queue_name:
978
+ if queue_name not in self._queue_names:
979
+ logger.warning(f"Queue '{queue_name}' not found, skipping")
980
+ continue
981
+
982
+ for job in self.get_jobs(queue_name=queue_name):
983
+ self.delete_job(job, ttl=ttl)
984
+
985
+ @property
986
+ def job_ids(self):
987
+ """Get all job IDs from all queues.
988
+
989
+ Returns:
990
+ dict[str, list[str]]: Dictionary mapping queue names to lists of job IDs.
991
+
992
+ Example:
993
+ ```python
994
+ all_ids = worker.job_ids
995
+ for queue_name, ids in all_ids.items():
996
+ print(f"Queue {queue_name}: {len(ids)} jobs")
997
+ ```
998
+ """
999
+ job_ids = {}
1000
+ for queue_name in self._queue_names:
1001
+ job_ids[queue_name] = self._queues[queue_name].job_ids
1002
+
1003
+ return job_ids
1004
+
1005
+ @property
1006
+ def jobs(self):
1007
+ """Get all jobs from all queues.
1008
+
1009
+ Returns:
1010
+ dict[str, list[Job]]: Dictionary mapping queue names to lists of jobs.
1011
+
1012
+ Example:
1013
+ ```python
1014
+ all_jobs = worker.jobs
1015
+ for queue_name, queue_jobs in all_jobs.items():
1016
+ print(f"Queue {queue_name}: {len(queue_jobs)} jobs")
1017
+ ```
1018
+ """
1019
+ jobs = {}
1020
+ for queue_name in self._queue_names:
1021
+ jobs[queue_name] = self._queues[queue_name].get_jobs()
1022
+
1023
+ return jobs
1024
+
1025
+ ### Schedules ###
1026
+
1027
+ def add_schedule(
1028
+ self,
1029
+ func: Callable,
1030
+ func_args: tuple | None = None,
1031
+ func_kwargs: dict[str, Any] | None = None,
1032
+ cron: str | None = None, # Cron expression for scheduling
1033
+ interval: int | None = None, # Interval in seconds
1034
+ date: dt.datetime | None = None, # Date to run the job
1035
+ schedule_id: str | None = None,
1036
+ **schedule_kwargs,
1037
+ ) -> Job:
1038
+ """Schedule a job for repeated or one-time execution.
1039
+
1040
+ Args:
1041
+ func: Function to execute. Must be importable from the worker process.
1042
+ func_args: Positional arguments to pass to the function.
1043
+ func_kwargs: Keyword arguments to pass to the function.
1044
+ cron: Cron expression for scheduling (e.g. "0 * * * *" for hourly).
1045
+ interval: Interval in seconds for recurring execution.
1046
+ date: Specific datetime for one-time execution.
1047
+ schedule_id: Optional unique identifier for the schedule.
1048
+ **schedule_kwargs: Additional scheduling parameters:
1049
+ - repeat: Number of repetitions (int or dict)
1050
+ - result_ttl: Time to live for results (float or timedelta)
1051
+ - ttl: Time to live for the schedule (float or timedelta)
1052
+ - use_local_time_zone: Whether to use local time (bool)
1053
+ - queue_name: Queue to use for the scheduled jobs
1054
+
1055
+ Returns:
1056
+ Job: The scheduled job instance.
1057
+
1058
+ Raises:
1059
+ ValueError: If no scheduling method specified or invalid cron expression.
1060
+ RuntimeError: If Redis connection fails.
1061
+
1062
+ Example:
1063
+ ```python
1064
+ def my_task(msg: str) -> None:
1065
+ print(f"Task: {msg}")
1066
+
1067
+ # Schedule with cron (every hour)
1068
+ job = worker.add_schedule(
1069
+ my_task,
1070
+ func_kwargs={"msg": "Hourly check"},
1071
+ cron="0 * * * *"
1072
+ )
1073
+
1074
+ # Schedule with interval (every 5 minutes)
1075
+ job = worker.add_schedule(
1076
+ my_task,
1077
+ func_kwargs={"msg": "Regular check"},
1078
+ interval=300
1079
+ )
1080
+
1081
+ # Schedule for specific time
1082
+ tomorrow = dt.datetime.now() + dt.timedelta(days=1)
1083
+ job = worker.add_schedule(
1084
+ my_task,
1085
+ func_kwargs={"msg": "One-time task"},
1086
+ date=tomorrow
1087
+ )
1088
+ ```
1089
+ """
1090
+ schedule_id = schedule_id or str(uuid.uuid4())
1091
+ func_args = func_args or ()
1092
+ func_kwargs = func_kwargs or {}
1093
+
1094
+ # Use the specified scheduler or default to the first one
1095
+
1096
+ scheduler = self._scheduler
1097
+
1098
+ use_local_time_zone = schedule_kwargs.get("use_local_time_zone", True)
1099
+ repeat = schedule_kwargs.get("repeat", None)
1100
+ result_ttl = schedule_kwargs.get("result_ttl", None)
1101
+ ttl = schedule_kwargs.get("ttl", None)
1102
+ if cron:
1103
+ schedule = scheduler.cron(
1104
+ cron_string=cron,
1105
+ func=func,
1106
+ args=func_args,
1107
+ kwargs=func_kwargs,
1108
+ id=schedule_id,
1109
+ repeat=repeat, # Infinite by default
1110
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
1111
+ ttl=int(ttl.total_seconds()) if ttl else None,
1112
+ use_local_time_zone=use_local_time_zone,
1113
+ queue_name=self._scheduler_name,
1114
+ meta={"cron": cron},
1115
+ **schedule_kwargs,
1116
+ )
1117
+ logger.info(
1118
+ f"Scheduled job {schedule.id} ({func.__name__}) with cron '{get_description(cron)}'"
1119
+ )
1120
+
1121
+ if interval:
1122
+ schedule = scheduler.schedule(
1123
+ scheduled_time=dt.datetime.now(dt.timezone.utc),
1124
+ func=func,
1125
+ args=func_args,
1126
+ kwargs=func_kwargs,
1127
+ interval=interval,
1128
+ id=schedule_id,
1129
+ repeat=repeat, # Infinite by default
1130
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
1131
+ ttl=int(ttl.total_seconds()) if ttl else None,
1132
+ queue_name=self._scheduler_name,
1133
+ meta={"interval": interval},
1134
+ )
1135
+ logger.info(
1136
+ f"Scheduled job {schedule.id} ({func.__name__}) with interval '{precisedelta(interval)}'"
1137
+ )
1138
+
1139
+ if date:
1140
+ schedule = scheduler.schedule(
1141
+ scheduled_time=date,
1142
+ func=func,
1143
+ args=func_args,
1144
+ kwargs=func_kwargs,
1145
+ id=schedule_id,
1146
+ repeat=1, # Infinite by default
1147
+ result_ttl=int(result_ttl.total_seconds()) if result_ttl else None,
1148
+ ttl=int(ttl.total_seconds()) if ttl else None,
1149
+ queue_name=self._scheduler_name,
1150
+ meta={"date": date},
1151
+ )
1152
+ logger.info(
1153
+ f"Scheduled job {schedule.id} ({func.__name__}) to run at '{date}'"
1154
+ )
1155
+
1156
+ return schedule
1157
+
1158
+ def _get_schedule_queue_name(self, schedule: str | Job) -> str | None:
1159
+ """Get the queue name for a schedule.
1160
+
1161
+ Args:
1162
+ schedule: Schedule ID or Job object.
1163
+
1164
+ Returns:
1165
+ str | None: Name of the scheduler queue.
1166
+ """
1167
+ return self._scheduler_name
1168
+
1169
+ def get_schedules(
1170
+ self,
1171
+ until: Any | None = None,
1172
+ with_times: bool = False,
1173
+ offset: Any | None = None,
1174
+ length: Any | None = None,
1175
+ ) -> dict[str, list[Job]]:
1176
+ """Get all schedules from the scheduler.
1177
+
1178
+ Args:
1179
+ until: Get schedules until this time.
1180
+ with_times: Include next run times in the results.
1181
+ offset: Number of schedules to skip.
1182
+ length: Maximum number of schedules to return.
1183
+
1184
+ Returns:
1185
+ dict[str, list[Job]]: Dictionary mapping queue names to lists of schedules.
1186
+
1187
+ Example:
1188
+ ```python
1189
+ # Get all schedules
1190
+ schedules = worker.get_schedules()
1191
+
1192
+ # Get next 10 schedules with run times
1193
+ schedules = worker.get_schedules(
1194
+ with_times=True,
1195
+ length=10
1196
+ )
1197
+ ```
1198
+ """
1199
+ schedules = list(
1200
+ self._scheduler.get_jobs(
1201
+ until=until, with_times=with_times, offset=offset, length=length
1202
+ )
1203
+ )
1204
+ if not schedules:
1205
+ logger.info("No schedules found")
1206
+ return []
1207
+ return schedules
1208
+
1209
+ def get_schedule(self, schedule_id: str) -> Job | None:
1210
+ """
1211
+ Get a schedule by its ID.
1212
+
1213
+ Args:
1214
+ schedule_id: ID of the schedule
1215
+
1216
+ Returns:
1217
+ Job | None: Schedule object if found, None otherwise
1218
+ """
1219
+ schedule = self.get_job(job_id=schedule_id)
1220
+ return schedule
1221
+
1222
+ def _get_schedule_results(self, schedule: str | Job) -> list[Result]:
1223
+ """Get all results from a schedule's execution history.
1224
+
1225
+ Args:
1226
+ schedule: Schedule ID or Job object.
1227
+
1228
+ Returns:
1229
+ list[Result]: List of all results from the schedule's executions.
1230
+
1231
+ Raises:
1232
+ ValueError: If schedule not found.
1233
+ """
1234
+ if isinstance(schedule, str):
1235
+ schedule = self.get_schedule(schedule_id=schedule)
1236
+
1237
+ if schedule is None:
1238
+ logger.error(f"Schedule {schedule} not found in any queue")
1239
+ return None
1240
+
1241
+ return [res.return_value for res in schedule.results()]
1242
+
1243
+ def get_schedule_latest_result(
1244
+ self, schedule: str | Job, delete_result: bool = False
1245
+ ) -> Any:
1246
+ """Get the most recent result of a schedule.
1247
+
1248
+ Args:
1249
+ schedule: Schedule ID or Job object.
1250
+ delete_result: If True, deletes the schedule and results after retrieval.
1251
+
1252
+ Returns:
1253
+ Any: The most recent result of the schedule if available.
1254
+
1255
+ Example:
1256
+ ```python
1257
+ # Get latest result
1258
+ result = worker.get_schedule_latest_result("schedule-123")
1259
+
1260
+ # Get result and clean up
1261
+ result = worker.get_schedule_latest_result(
1262
+ "schedule-123",
1263
+ delete_result=True
1264
+ )
1265
+ ```
1266
+ """
1267
+ result = self._get_schedule_result(schedule=schedule)[-1]
1268
+
1269
+ if delete_result:
1270
+ self.delete_schedule(schedule)
1271
+
1272
+ return result
1273
+
1274
+ def get_schedule_result(
1275
+ self, schedule: str | Job, index: int | list[str] | slice | str
1276
+ ) -> list[Result]:
1277
+ """Get specific results from a schedule's execution history.
1278
+
1279
+ Args:
1280
+ schedule: Schedule ID or Job object.
1281
+ index: Which results to retrieve. Can be:
1282
+ - int: Specific index
1283
+ - list[str]: List of indices
1284
+ - slice: Range of indices
1285
+ - str: "all", "latest", or "earliest"
1286
+
1287
+ Returns:
1288
+ list[Result]: List of requested results.
1289
+
1290
+ Example:
1291
+ ```python
1292
+ # Get all results
1293
+ results = worker.get_schedule_result("schedule-123", "all")
1294
+
1295
+ # Get latest result
1296
+ result = worker.get_schedule_result("schedule-123", "latest")
1297
+
1298
+ # Get specific results
1299
+ results = worker.get_schedule_result("schedule-123", [0, 2, 4])
1300
+
1301
+ # Get range of results
1302
+ results = worker.get_schedule_result("schedule-123", slice(0, 5))
1303
+ ```
1304
+ """
1305
+ results = self._get_schedule_results(schedule=schedule)
1306
+ if not results:
1307
+ return []
1308
+
1309
+ if isinstance(index, str):
1310
+ if ":" in index:
1311
+ index = [int(i) for i in index.split(":")]
1312
+ index = slice(index[0], index[1])
1313
+ return [result for result in results[index]]
1314
+
1315
+ if index == "all":
1316
+ return [result for result in results]
1317
+ if index == "latest":
1318
+ return results[-1]
1319
+ if index == "earliest":
1320
+ return results[0]
1321
+
1322
+ elif isinstance(index, list):
1323
+ return [results[i].return_value for i in index if i < len(results)]
1324
+ elif isinstance(index, slice):
1325
+ return [result.return_value for result in results[index]]
1326
+ elif isinstance(index, int):
1327
+ if index >= len(results):
1328
+ logger.error(f"Index {index} out of range for schedule {schedule.id}")
1329
+ return []
1330
+ return results[index].return_value
1331
+
1332
+ def cancel_schedule(self, schedule: str | Job) -> bool:
1333
+ """Cancel a schedule.
1334
+
1335
+ This method stops any future executions of the schedule without removing
1336
+ past results.
1337
+
1338
+ Args:
1339
+ schedule: Schedule ID or Job object to cancel.
1340
+
1341
+ Returns:
1342
+ bool: True if successfully canceled, False if schedule not found.
1343
+
1344
+ Example:
1345
+ ```python
1346
+ # Cancel by ID
1347
+ worker.cancel_schedule("schedule-123")
1348
+
1349
+ # Cancel using job object
1350
+ schedule = worker.get_schedule("schedule-123")
1351
+ if schedule:
1352
+ worker.cancel_schedule(schedule)
1353
+ ```
1354
+ """
1355
+ if schedule is None:
1356
+ logger.error(f"Schedule {schedule} not found")
1357
+ return False
1358
+
1359
+ self._scheduler.cancel(schedule)
1360
+ logger.info(
1361
+ f"Canceled schedule {schedule.id if isinstance(schedule, Job) else schedule}"
1362
+ )
1363
+ return True
1364
+
1365
+ def cancel_all_schedules(self) -> None:
1366
+ """Cancel all schedules in the scheduler.
1367
+
1368
+ This method stops all future executions of all schedules without removing
1369
+ past results.
1370
+
1371
+ Example:
1372
+ ```python
1373
+ # Stop all scheduled jobs
1374
+ worker.cancel_all_schedules()
1375
+ ```
1376
+ """
1377
+ for job in self._scheduler.get_jobs():
1378
+ self._scheduler.cancel(job.id)
1379
+ logger.info(f"Canceled schedule {job.id} ")
1380
+ logger.info("Canceled all schedules from all queues.")
1381
+
1382
+ def delete_schedule(self, schedule: str | Job) -> bool:
1383
+ """Delete a schedule and optionally its results.
1384
+
1385
+ This method removes the schedule and optionally its execution history
1386
+ from Redis.
1387
+
1388
+ Args:
1389
+ schedule: Schedule ID or Job object to delete.
1390
+
1391
+ Returns:
1392
+ bool: True if successfully deleted, False if schedule not found.
1393
+
1394
+ Example:
1395
+ ```python
1396
+ # Delete schedule and its history
1397
+ worker.delete_schedule("schedule-123")
1398
+ ```
1399
+ """
1400
+ return self.delete_job(schedule)
1401
+
1402
+ def delete_all_schedules(self) -> None:
1403
+ """Delete all schedules and their results.
1404
+
1405
+ This method removes all schedules and their execution histories from Redis.
1406
+
1407
+ Example:
1408
+ ```python
1409
+ # Remove all schedules and their histories
1410
+ worker.delete_all_schedules()
1411
+ ```
1412
+ """
1413
+ for schedule in self.schedule_ids:
1414
+ self.delete_schedule(schedule)
1415
+ logger.info(f"Deleted schedule {schedule}")
1416
+ logger.info("Deleted all schedules from all queues.")
1417
+
1418
+ @property
1419
+ def schedules(self):
1420
+ """Get all schedules from all schedulers.
1421
+
1422
+ Returns:
1423
+ list[Job]: List of all scheduled jobs.
1424
+
1425
+ Example:
1426
+ ```python
1427
+ all_schedules = worker.schedules
1428
+ print(f"Total schedules: {len(all_schedules)}")
1429
+ ```
1430
+ """
1431
+ schedules = self.get_schedules()
1432
+
1433
+ return schedules
1434
+
1435
+ @property
1436
+ def schedule_ids(self):
1437
+ """Get all schedule IDs.
1438
+
1439
+ Returns:
1440
+ list[str]: List of unique identifiers for all schedules.
1441
+
1442
+ Example:
1443
+ ```python
1444
+ ids = worker.schedule_ids
1445
+ print(f"Schedule IDs: {', '.join(ids)}")
1446
+ ```
1447
+ """
1448
+ schedule_ids = [schedule.id for schedule in self.schedules]
1449
+ return schedule_ids