FlowerPower 0.9.13.1__py3-none-any.whl → 1.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. flowerpower/__init__.py +17 -2
  2. flowerpower/cfg/__init__.py +201 -149
  3. flowerpower/cfg/base.py +122 -24
  4. flowerpower/cfg/pipeline/__init__.py +254 -0
  5. flowerpower/cfg/pipeline/adapter.py +66 -0
  6. flowerpower/cfg/pipeline/run.py +40 -11
  7. flowerpower/cfg/pipeline/schedule.py +69 -79
  8. flowerpower/cfg/project/__init__.py +149 -0
  9. flowerpower/cfg/project/adapter.py +57 -0
  10. flowerpower/cfg/project/job_queue.py +165 -0
  11. flowerpower/cli/__init__.py +92 -37
  12. flowerpower/cli/job_queue.py +878 -0
  13. flowerpower/cli/mqtt.py +32 -1
  14. flowerpower/cli/pipeline.py +559 -406
  15. flowerpower/cli/utils.py +29 -18
  16. flowerpower/flowerpower.py +12 -8
  17. flowerpower/fs/__init__.py +20 -2
  18. flowerpower/fs/base.py +350 -26
  19. flowerpower/fs/ext.py +797 -216
  20. flowerpower/fs/storage_options.py +1097 -55
  21. flowerpower/io/base.py +13 -18
  22. flowerpower/io/loader/__init__.py +28 -0
  23. flowerpower/io/loader/deltatable.py +7 -10
  24. flowerpower/io/metadata.py +1 -0
  25. flowerpower/io/saver/__init__.py +28 -0
  26. flowerpower/io/saver/deltatable.py +4 -3
  27. flowerpower/job_queue/__init__.py +252 -0
  28. flowerpower/job_queue/apscheduler/__init__.py +11 -0
  29. flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
  30. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
  31. flowerpower/job_queue/apscheduler/manager.py +1063 -0
  32. flowerpower/job_queue/apscheduler/setup.py +524 -0
  33. flowerpower/job_queue/apscheduler/trigger.py +169 -0
  34. flowerpower/job_queue/apscheduler/utils.py +309 -0
  35. flowerpower/job_queue/base.py +382 -0
  36. flowerpower/job_queue/rq/__init__.py +10 -0
  37. flowerpower/job_queue/rq/_trigger.py +37 -0
  38. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
  39. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
  40. flowerpower/job_queue/rq/manager.py +1449 -0
  41. flowerpower/job_queue/rq/setup.py +150 -0
  42. flowerpower/job_queue/rq/utils.py +69 -0
  43. flowerpower/pipeline/__init__.py +5 -0
  44. flowerpower/pipeline/base.py +118 -0
  45. flowerpower/pipeline/io.py +407 -0
  46. flowerpower/pipeline/job_queue.py +505 -0
  47. flowerpower/pipeline/manager.py +1586 -0
  48. flowerpower/pipeline/registry.py +560 -0
  49. flowerpower/pipeline/runner.py +560 -0
  50. flowerpower/pipeline/visualizer.py +142 -0
  51. flowerpower/plugins/mqtt/__init__.py +12 -0
  52. flowerpower/plugins/mqtt/cfg.py +16 -0
  53. flowerpower/plugins/mqtt/manager.py +789 -0
  54. flowerpower/settings.py +110 -0
  55. flowerpower/utils/logging.py +21 -0
  56. flowerpower/utils/misc.py +57 -9
  57. flowerpower/utils/sql.py +122 -24
  58. flowerpower/utils/templates.py +2 -142
  59. flowerpower-1.0.0b2.dist-info/METADATA +324 -0
  60. flowerpower-1.0.0b2.dist-info/RECORD +94 -0
  61. flowerpower/_web/__init__.py +0 -61
  62. flowerpower/_web/routes/config.py +0 -103
  63. flowerpower/_web/routes/pipelines.py +0 -173
  64. flowerpower/_web/routes/scheduler.py +0 -136
  65. flowerpower/cfg/pipeline/tracker.py +0 -14
  66. flowerpower/cfg/project/open_telemetry.py +0 -8
  67. flowerpower/cfg/project/tracker.py +0 -11
  68. flowerpower/cfg/project/worker.py +0 -19
  69. flowerpower/cli/scheduler.py +0 -309
  70. flowerpower/cli/web.py +0 -44
  71. flowerpower/event_handler.py +0 -23
  72. flowerpower/mqtt.py +0 -609
  73. flowerpower/pipeline.py +0 -2499
  74. flowerpower/scheduler.py +0 -680
  75. flowerpower/tui.py +0 -79
  76. flowerpower/utils/datastore.py +0 -186
  77. flowerpower/utils/eventbroker.py +0 -127
  78. flowerpower/utils/executor.py +0 -58
  79. flowerpower/utils/trigger.py +0 -140
  80. flowerpower-0.9.13.1.dist-info/METADATA +0 -586
  81. flowerpower-0.9.13.1.dist-info/RECORD +0 -76
  82. /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
  83. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/WHEEL +0 -0
  84. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/entry_points.txt +0 -0
  85. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,878 @@
1
+ import typer
2
+ from .. import settings
3
+ from ..job_queue import JobQueue # Adjust import as needed
4
+ from .utils import parse_dict_or_list_param
5
+ from ..utils.logging import setup_logging
6
+ from loguru import logger
7
+
8
+ # Create a Typer app for job queue management commands
9
+ app = typer.Typer(help="Job queue management commands")
10
+
11
+ setup_logging(
12
+ level=settings.LOG_LEVEL)
13
+
14
+ @app.command()
15
+ def start_worker(
16
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
17
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
18
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
19
+ background: bool = typer.Option(False, "--background", "-b", help="Run the worker in the background"),
20
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
21
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
22
+ num_workers: int | None = typer.Option(None, "--num-workers", "-n", help="Number of worker processes to start (pool mode)"),
23
+ ):
24
+ """
25
+ Start a worker or worker pool to process jobs.
26
+
27
+ This command starts a worker process (or a pool of worker processes) that will
28
+ execute jobs from the queue. The worker will continue running until stopped
29
+ or can be run in the background.
30
+
31
+ Args:
32
+ type: Type of job queue backend (rq, apscheduler)
33
+ name: Name of the scheduler configuration to use
34
+ base_dir: Base directory for the scheduler configuration
35
+ background: Run the worker in the background
36
+ storage_options: Storage options as JSON or key=value pairs
37
+ log_level: Logging level (debug, info, warning, error, critical)
38
+ num_workers: Number of worker processes to start (pool mode)
39
+
40
+ Examples:
41
+ # Start a worker with default settings
42
+ $ flowerpower job-queue start-worker
43
+
44
+ # Start a worker for a specific backend type
45
+ $ flowerpower job-queue start-worker --type rq
46
+
47
+ # Start a worker pool with 4 processes
48
+ $ flowerpower job-queue start-worker --num-workers 4
49
+
50
+ # Run a worker in the background
51
+ $ flowerpower job-queue start-worker --background
52
+
53
+ # Set a specific logging level
54
+ $ flowerpower job-queue start-worker --log-level debug
55
+ """
56
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
57
+
58
+ with JobQueue(
59
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
60
+ ) as worker:
61
+ if num_workers:
62
+ num_workers = worker.cfg.backend.num_workers
63
+
64
+ if num_workers and num_workers > 1:
65
+ worker.start_worker_pool(
66
+ num_workers=num_workers, background=background
67
+ )
68
+ else:
69
+ worker.start_worker(
70
+ background=background
71
+ )
72
+
73
+ @app.command()
74
+ def start_scheduler(
75
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
76
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
77
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
78
+ background: bool = typer.Option(False, "--background", "-b", help="Run the scheduler in the background"),
79
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
80
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
81
+ interval: int = typer.Option(60, "--interval", "-i", help="Interval for checking jobs in seconds (RQ only)"),
82
+ ):
83
+ """
84
+ Start the scheduler process for queued jobs.
85
+
86
+ This command starts a scheduler that manages queued jobs and scheduled tasks.
87
+ Note that this is only needed for RQ workers, as APScheduler workers have
88
+ their own built-in scheduler.
89
+
90
+ Args:
91
+ type: Type of job queue backend (rq, apscheduler)
92
+ name: Name of the scheduler configuration to use
93
+ base_dir: Base directory for the scheduler configuration
94
+ background: Run the scheduler in the background
95
+ storage_options: Storage options as JSON or key=value pairs
96
+ log_level: Logging level (debug, info, warning, error, critical)
97
+ interval: Interval for checking jobs in seconds (RQ only)
98
+
99
+ Examples:
100
+ # Start a scheduler with default settings
101
+ $ flowerpower job-queue start-scheduler
102
+
103
+ # Start a scheduler for a specific backend type
104
+ $ flowerpower job-queue start-scheduler --type rq
105
+
106
+ # Run a scheduler in the background
107
+ $ flowerpower job-queue start-scheduler --background
108
+
109
+ # Set a specific scheduler check interval (RQ only)
110
+ $ flowerpower job-queue start-scheduler --interval 30
111
+ """
112
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
113
+
114
+ with JobQueue(
115
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
116
+ ) as worker:
117
+ if worker.cfg.backend.type != "rq":
118
+ logger.info(f"No scheduler needed for {worker.cfg.backend.type} workers. Skipping.")
119
+ return
120
+
121
+ worker.start_scheduler(background=background, interval=interval)
122
+
123
+
124
+ # @app.command()
125
+ # def cancel_all_jobs(
126
+ # type: str | None = None,
127
+ # queue_name: str | None = None,
128
+ # name: str | None = None,
129
+ # base_dir: str | None = None,
130
+ # storage_options: str | None = None,
131
+ # log_level: str = "info",
132
+ # ):
133
+ # """
134
+ # Cancel all jobs from the scheduler.
135
+
136
+ # Note: This is different from deleting jobs as it only stops them from running but keeps their history.
137
+
138
+ # Args:
139
+ # type: Type of the job queue (rq, apscheduler)
140
+ # queue_name: Name of the queue (RQ only)
141
+ # name: Name of the scheduler
142
+ # base_dir: Base directory for the scheduler
143
+ # storage_options: Storage options as JSON or key=value pairs
144
+ # log_level: Logging level
145
+ # """
146
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
147
+
148
+ # with JobQueue(
149
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
150
+ # ) as worker:
151
+ # if worker.cfg.backend.type != "rq":
152
+ # logger.info(f"Job cancellation is not supported for {worker.cfg.backend.type} workers. Skipping.")
153
+ # return
154
+
155
+ # worker.cancel_all_jobs(queue_name=queue_name)
156
+
157
+ # @app.command()
158
+ # def cancel_all_schedules(
159
+ # type: str | None = None,
160
+ # name: str | None = None,
161
+ # base_dir: str | None = None,
162
+ # storage_options: str | None = None,
163
+ # log_level: str = "info",
164
+ # ):
165
+ # """
166
+ # Cancel all schedules from the scheduler.
167
+
168
+ # Note: This is different from deleting schedules as it only stops them from running but keeps their configuration.
169
+
170
+ # Args:
171
+ # type: Type of the job queue (rq, apscheduler)
172
+ # name: Name of the scheduler
173
+ # base_dir: Base directory for the scheduler
174
+ # storage_options: Storage options as JSON or key=value pairs
175
+ # log_level: Logging level
176
+ # """
177
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
178
+
179
+ # with JobQueue(
180
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
181
+ # ) as worker:
182
+ # worker.cancel_all_schedules()
183
+
184
+ @app.command()
185
+ def cancel_job(
186
+ job_id: str = typer.Argument(..., help="ID of the job to cancel"),
187
+ all: bool = typer.Option(False, "--all", "-a", help="Cancel all jobs instead of a specific one"),
188
+ queue_name: str | None = typer.Option(None, help="Name of the queue (RQ only). If provided with --all, cancels all jobs in the queue"),
189
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
190
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
191
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
192
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
193
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
194
+ ):
195
+ """
196
+ Cancel a job or multiple jobs in the queue.
197
+
198
+ This command stops a job from executing (if it hasn't started yet) or signals
199
+ it to stop (if already running). Canceling is different from deleting as it
200
+ maintains the job history but prevents execution.
201
+
202
+ Args:
203
+ job_id: ID of the job to cancel (ignored if --all is used)
204
+ all: Cancel all jobs instead of a specific one
205
+ queue_name: For RQ only, specifies the queue to cancel jobs from
206
+ type: Type of job queue backend (rq, apscheduler)
207
+ name: Name of the scheduler configuration to use
208
+ base_dir: Base directory for the scheduler configuration
209
+ storage_options: Storage options as JSON or key=value pairs
210
+ log_level: Logging level (debug, info, warning, error, critical)
211
+
212
+ Examples:
213
+ # Cancel a specific job
214
+ $ flowerpower job-queue cancel-job job-123456
215
+
216
+ # Cancel all jobs in the default queue
217
+ $ flowerpower job-queue cancel-job --all dummy-id
218
+
219
+ # Cancel all jobs in a specific queue (RQ only)
220
+ $ flowerpower job-queue cancel-job --all dummy-id --queue-name high-priority
221
+
222
+ # Specify the backend type explicitly
223
+ $ flowerpower job-queue cancel-job job-123456 --type rq
224
+ """
225
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
226
+
227
+ with JobQueue(
228
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
229
+ ) as worker:
230
+ if worker.cfg.backend.type != "rq":
231
+ logger.info(f"Job cancellation is not supported for {worker.cfg.backend.type} workers. Skipping.")
232
+ return
233
+ if all:
234
+ count = worker.cancel_all_jobs(queue_name=queue_name if worker.cfg.backend.type == "rq" else None)
235
+ logger.info(f"Cancelled {count} jobs" + (f" in queue '{queue_name}'" if queue_name else ""))
236
+ else:
237
+ worker.cancel_job(job_id)
238
+ logger.info(f"Job {job_id} cancelled")
239
+
240
+ @app.command()
241
+ def cancel_schedule(
242
+ schedule_id: str,
243
+ all: bool = False,
244
+ type: str | None = None,
245
+ name: str | None = None,
246
+ base_dir: str | None = None,
247
+ storage_options: str | None = None,
248
+ log_level: str = "info",
249
+ ):
250
+ """
251
+ Cancel a specific schedule.
252
+
253
+ Note: This is different from deleting a schedule as it only stops it from running but keeps its configuration.
254
+
255
+ Args:
256
+ schedule_id: ID of the schedule to cancel
257
+ all: If True, cancel all schedules
258
+ type: Type of the job queue (rq, apscheduler)
259
+ name: Name of the scheduler
260
+ base_dir: Base directory for the scheduler
261
+ storage_options: Storage options as JSON or key=value pairs
262
+ log_level: Logging level
263
+ """
264
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
265
+
266
+ with JobQueue(
267
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
268
+ ) as worker:
269
+ if all:
270
+ worker.cancel_all_schedules()
271
+ else:
272
+ worker.cancel_schedule(schedule_id)
273
+
274
+ # @app.command()
275
+ # def delete_all_jobs(
276
+ # type: str | None = None,
277
+ # queue_name: str | None = None,
278
+ # name: str | None = None,
279
+ # base_dir: str | None = None,
280
+ # storage_options: str | None = None,
281
+ # log_level: str = "info",
282
+ # ):
283
+ # """
284
+ # Delete all jobs from the scheduler. Note that this is different from cancelling jobs
285
+ # as it also removes job history and results.
286
+
287
+ # Args:
288
+ # queue_name: Name of the queue (RQ only)
289
+ # name: Name of the scheduler
290
+ # base_dir: Base directory for the scheduler
291
+ # storage_options: Storage options as JSON or key=value pairs
292
+ # log_level: Logging level
293
+ # """
294
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
295
+
296
+ # with JobQueue(
297
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
298
+ # ) as worker:
299
+ # worker.delete_all_jobs(queue_name=queue_name if worker.cfg.backend.type == "rq" else None)
300
+
301
+ # @app.command()
302
+ # def delete_all_schedules(
303
+ # type: str | None = None,
304
+ # name: str | None = None,
305
+ # base_dir: str | None = None,
306
+ # storage_options: str | None = None,
307
+ # log_level: str = "info",
308
+ # ):
309
+ # """
310
+ # Delete all schedules from the scheduler.
311
+
312
+ # Args:
313
+ # name: Name of the scheduler
314
+ # base_dir: Base directory for the scheduler
315
+ # storage_options: Storage options as JSON or key=value pairs
316
+ # log_level: Logging level
317
+ # """
318
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
319
+
320
+ # with JobQueue(
321
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
322
+ # ) as worker:
323
+ # worker.delete_all_schedules()
324
+
325
+ @app.command()
326
+ def delete_job(
327
+ job_id: str,
328
+ all: bool = False,
329
+ queue_name: str | None = None,
330
+ type: str | None = None,
331
+ name: str | None = None,
332
+ base_dir: str | None = None,
333
+ storage_options: str | None = None,
334
+ log_level: str = "info",
335
+ ):
336
+ """
337
+ Delete a specific job.
338
+
339
+ Args:
340
+ job_id: ID of the job to delete
341
+ all: If True, delete all jobs
342
+ queue_name: Name of the queue (RQ only). If provided and all is True, delete all jobs in the queue
343
+ type: Type of the job queue (rq, apscheduler)
344
+ name: Name of the scheduler
345
+ base_dir: Base directory for the scheduler
346
+ storage_options: Storage options as JSON or key=value pairs
347
+ log_level: Logging level
348
+ """
349
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
350
+
351
+ with JobQueue(
352
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
353
+ ) as worker:
354
+ if all:
355
+ worker.delete_all_jobs(queue_name=queue_name if worker.cfg.backend.type == "rq" else None)
356
+ else:
357
+ worker.delete_job(job_id)
358
+
359
+
360
+ @app.command()
361
+ def delete_schedule(
362
+ schedule_id: str,
363
+ all: bool = False,
364
+ type: str | None = None,
365
+ name: str | None = None,
366
+ base_dir: str | None = None,
367
+ storage_options: str | None = None,
368
+ log_level: str = "info",
369
+ ):
370
+ """
371
+ Delete a specific schedule.
372
+
373
+ Args:
374
+ schedule_id: ID of the schedule to delete
375
+ all: If True, delete all schedules
376
+ type: Type of the job queue (rq, apscheduler)
377
+ name: Name of the scheduler
378
+ base_dir: Base directory for the scheduler
379
+ storage_options: Storage options as JSON or key=value pairs
380
+ log_level: Logging level
381
+ """
382
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
383
+
384
+ with JobQueue(
385
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
386
+ ) as worker:
387
+ if all:
388
+ worker.delete_all_schedules()
389
+ else:
390
+ worker.delete_schedule(schedule_id)
391
+
392
+ # @app.command()
393
+ # def get_job(
394
+ # job_id: str,
395
+ # type: str | None = None,
396
+ # name: str | None = None,
397
+ # base_dir: str | None = None,
398
+ # storage_options: str | None = None,
399
+ # log_level: str = "info",
400
+ # ):
401
+ # """
402
+ # Get information about a specific job.
403
+
404
+ # Args:
405
+ # job_id: ID of the job
406
+ # name: Name of the scheduler
407
+ # base_dir: Base directory for the scheduler
408
+ # storage_options: Storage options as JSON or key=value pairs
409
+ # log_level: Logging level
410
+ # """
411
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
412
+
413
+ # with JobQueue(
414
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
415
+ # ) as worker:
416
+ # # show_jobs should display the job info
417
+ # worker.show_jobs(job_id=job_id)
418
+
419
+ # @app.command()
420
+ # def get_job_result(
421
+ # job_id: str,
422
+ # type: str | None = None,
423
+ # name: str | None = None,
424
+ # base_dir: str | None = None,
425
+ # storage_options: str | None = None,
426
+ # log_level: str = "info",
427
+ # wait: bool = True,
428
+ # ):
429
+ # """
430
+ # Get the result of a specific job.
431
+
432
+ # Args:
433
+ # job_id: ID of the job
434
+ # name: Name of the scheduler
435
+ # base_dir: Base directory for the scheduler
436
+ # storage_options: Storage options as JSON or key=value pairs
437
+ # log_level: Logging level
438
+ # wait: Wait for the result if job is still running (APScheduler only)
439
+ # """
440
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
441
+
442
+ # with JobQueue(
443
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
444
+ # ) as worker:
445
+ # # worker's get_job_result method will handle the result display
446
+ # worker.get_job_result(job_id, wait=wait if worker.cfg.backend.type == "apscheduler" else False)
447
+
448
+ # @app.command()
449
+ # def get_jobs(
450
+ # type: str | None = None,
451
+ # queue_name: str | None = None,
452
+ # name: str | None = None,
453
+ # base_dir: str | None = None,
454
+ # storage_options: str | None = None,
455
+ # log_level: str = "info",
456
+ # ):
457
+ # """
458
+ # List all jobs.
459
+
460
+ # Args:
461
+ # queue_name: Name of the queue (RQ only)
462
+ # name: Name of the scheduler
463
+ # base_dir: Base directory for the scheduler
464
+ # storage_options: Storage options as JSON or key=value pairs
465
+ # log_level: Logging level
466
+ # """
467
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
468
+
469
+ # with JobQueue(
470
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
471
+ # ) as worker:
472
+ # worker.show_jobs()
473
+
474
+ # @app.command()
475
+ # def get_schedule(
476
+ # schedule_id: str,
477
+ # type: str | None = None,
478
+ # name: str | None = None,
479
+ # base_dir: str | None = None,
480
+ # storage_options: str | None = None,
481
+ # log_level: str = "info",
482
+ # ):
483
+ # """
484
+ # Get information about a specific schedule.
485
+
486
+ # Args:
487
+ # schedule_id: ID of the schedule
488
+ # name: Name of the scheduler
489
+ # base_dir: Base directory for the scheduler
490
+ # storage_options: Storage options as JSON or key=value pairs
491
+ # log_level: Logging level
492
+ # """
493
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
494
+
495
+ # with JobQueue(
496
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
497
+ # ) as worker:
498
+ # # show_schedule should display the schedule info
499
+ # worker.show_schedules(schedule_id=schedule_id)
500
+
501
+ # @app.command()
502
+ # def get_schedules(
503
+ # type: str | None = None,
504
+ # name: str | None = None,
505
+ # base_dir: str | None = None,
506
+ # storage_options: str | None = None,
507
+ # log_level: str = "info",
508
+ # ):
509
+ # """
510
+ # List all schedules.
511
+
512
+ # Args:
513
+ # name: Name of the scheduler
514
+ # base_dir: Base directory for the scheduler
515
+ # storage_options: Storage options as JSON or key=value pairs
516
+ # log_level: Logging level
517
+ # """
518
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
519
+
520
+ # with JobQueue(
521
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
522
+ # ) as worker:
523
+ # worker.show_schedules()
524
+
525
+ @app.command()
526
+ def show_job_ids(
527
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
528
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
529
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
530
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
531
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
532
+ ):
533
+ """
534
+ Show all job IDs in the job queue.
535
+
536
+ This command displays all job IDs currently in the system, helping you identify
537
+ jobs for other operations like getting results, canceling, or deleting jobs.
538
+
539
+ Args:
540
+ type: Type of job queue backend (rq, apscheduler)
541
+ name: Name of the scheduler configuration to use
542
+ base_dir: Base directory for the scheduler configuration
543
+ storage_options: Storage options as JSON or key=value pairs
544
+ log_level: Logging level (debug, info, warning, error, critical)
545
+
546
+ Examples:
547
+ # Show job IDs using default settings
548
+ $ flowerpower job-queue show-job-ids
549
+
550
+ # Show job IDs for a specific queue type
551
+ $ flowerpower job-queue show-job-ids --type rq
552
+
553
+ # Show job IDs with a custom scheduler configuration
554
+ $ flowerpower job-queue show-job-ids --name my-scheduler
555
+
556
+ # Show job IDs with debug logging
557
+ $ flowerpower job-queue show-job-ids --log-level debug
558
+ """
559
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
560
+
561
+ with JobQueue(
562
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
563
+ ) as worker:
564
+ # worker's job_ids property will print the IDs
565
+ ids = worker.job_ids
566
+ # Ensure we always print something meaningful
567
+ if not ids:
568
+ logger.info("No job IDs found")
569
+ # If the worker's property doesn't already print the IDs, print them here
570
+ elif not isinstance(ids, type(None)): # Check if None was returned
571
+ for job_id in ids:
572
+ print(f"- {job_id}")
573
+
574
+
575
+ @app.command()
576
+ def show_schedule_ids(
577
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
578
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
579
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
580
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
581
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
582
+ ):
583
+ """
584
+ Show all schedule IDs in the job queue.
585
+
586
+ This command displays all schedule IDs currently in the system, helping you
587
+ identify schedules for other operations like pausing, resuming, or deleting schedules.
588
+
589
+ Args:
590
+ type: Type of job queue backend (rq, apscheduler)
591
+ name: Name of the scheduler configuration to use
592
+ base_dir: Base directory for the scheduler configuration
593
+ storage_options: Storage options as JSON or key=value pairs
594
+ log_level: Logging level (debug, info, warning, error, critical)
595
+
596
+ Examples:
597
+ # Show schedule IDs using default settings
598
+ $ flowerpower job-queue show-schedule-ids
599
+
600
+ # Show schedule IDs for a specific queue type
601
+ $ flowerpower job-queue show-schedule-ids --type apscheduler
602
+
603
+ # Show schedule IDs with a custom scheduler configuration
604
+ $ flowerpower job-queue show-schedule-ids --name my-scheduler
605
+
606
+ # Show schedule IDs with debug logging
607
+ $ flowerpower job-queue show-schedule-ids --log-level debug
608
+ """
609
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
610
+
611
+ with JobQueue(
612
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
613
+ ) as worker:
614
+ # worker's schedule_ids property will print the IDs
615
+ ids = worker.schedule_ids
616
+ # Ensure we always print something meaningful
617
+ if not ids:
618
+ logger.info("No schedule IDs found")
619
+ # If the worker's property doesn't already print the IDs, print them here
620
+ elif not isinstance(ids, type(None)): # Check if None was returned
621
+ for schedule_id in ids:
622
+ print(f"- {schedule_id}")
623
+
624
+ # @app.command()
625
+ # def pause_all_schedules(
626
+ # type: str | None = None,
627
+ # name: str | None = None,
628
+ # base_dir: str | None = None,
629
+ # storage_options: str | None = None,
630
+ # log_level: str = "info",
631
+ # ):
632
+ # """
633
+ # Pause all schedules.
634
+
635
+ # Note: This functionality is only available for APScheduler workers.
636
+
637
+ # Args:
638
+ # name: Name of the scheduler
639
+ # base_dir: Base directory for the scheduler
640
+ # storage_options: Storage options as JSON or key=value pairs
641
+ # log_level: Logging level
642
+ # """
643
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
644
+
645
+ # with JobQueue(
646
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
647
+ # ) as worker:
648
+ # if worker.cfg.backend.type != "apscheduler":
649
+ # logger.info(f"Schedule pausing is not supported for {worker.cfg.backend.type} workers.")
650
+ # return
651
+ # worker.pause_all_schedules()
652
+
653
+ @app.command()
654
+ def pause_schedule(
655
+ schedule_id: str = typer.Argument(..., help="ID of the schedule to pause"),
656
+ all: bool = typer.Option(False, "--all", "-a", help="Pause all schedules instead of a specific one"),
657
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
658
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
659
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
660
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
661
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
662
+ ):
663
+ """
664
+ Pause a schedule or multiple schedules.
665
+
666
+ This command temporarily stops a scheduled job from running while maintaining its
667
+ configuration. Paused schedules can be resumed later. Note that this functionality
668
+ is only available for APScheduler workers.
669
+
670
+ Args:
671
+ schedule_id: ID of the schedule to pause (ignored if --all is used)
672
+ all: Pause all schedules instead of a specific one
673
+ type: Type of job queue backend (rq, apscheduler)
674
+ name: Name of the scheduler configuration to use
675
+ base_dir: Base directory for the scheduler configuration
676
+ storage_options: Storage options as JSON or key=value pairs
677
+ log_level: Logging level (debug, info, warning, error, critical)
678
+
679
+ Examples:
680
+ # Pause a specific schedule
681
+ $ flowerpower job-queue pause-schedule schedule-123456
682
+
683
+ # Pause all schedules
684
+ $ flowerpower job-queue pause-schedule --all dummy-id
685
+
686
+ # Specify the backend type explicitly
687
+ $ flowerpower job-queue pause-schedule schedule-123456 --type apscheduler
688
+ """
689
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
690
+
691
+ with JobQueue(
692
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
693
+ ) as worker:
694
+ if worker.cfg.backend.type != "apscheduler":
695
+ logger.info(f"Schedule pausing is not supported for {worker.cfg.backend.type} workers.")
696
+ return
697
+ if all:
698
+ count = worker.pause_all_schedules()
699
+ logger.info(f"Paused {count} schedules")
700
+ else:
701
+ success = worker.pause_schedule(schedule_id)
702
+ if success:
703
+ logger.info(f"Schedule {schedule_id} paused successfully")
704
+ else:
705
+ logger.error(f"Failed to pause schedule {schedule_id}")
706
+
707
+
708
+ # @app.command()
709
+ # def resume_all_schedules(
710
+ # type: str | None = None,
711
+ # name: str | None = None,
712
+ # base_dir: str | None = None,
713
+ # storage_options: str | None = None,
714
+ # log_level: str = "info",
715
+ # ):
716
+ # """
717
+ # Resume all paused schedules.
718
+
719
+ # Note: This functionality is only available for APScheduler workers.
720
+
721
+ # Args:
722
+ # name: Name of the scheduler
723
+ # base_dir: Base directory for the scheduler
724
+ # storage_options: Storage options as JSON or key=value pairs
725
+ # log_level: Logging level
726
+ # """
727
+ # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
728
+
729
+ # with JobQueue(
730
+ # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
731
+ # ) as worker:
732
+ # if worker.cfg.backend.type != "apscheduler":
733
+ # logger.info(f"Schedule resuming is not supported for {worker.cfg.backend.type} workers.")
734
+ # return
735
+ # worker.resume_all_schedules()
736
+
737
+ @app.command()
738
+ def resume_schedule(
739
+ schedule_id: str = typer.Argument(..., help="ID of the schedule to resume"),
740
+ all: bool = typer.Option(False, "--all", "-a", help="Resume all schedules instead of a specific one"),
741
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
742
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
743
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
744
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
745
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
746
+ ):
747
+ """
748
+ Resume a paused schedule or multiple schedules.
749
+
750
+ This command restarts previously paused schedules, allowing them to run again according
751
+ to their original configuration. Note that this functionality is only available for
752
+ APScheduler workers.
753
+
754
+ Args:
755
+ schedule_id: ID of the schedule to resume (ignored if --all is used)
756
+ all: Resume all schedules instead of a specific one
757
+ type: Type of job queue backend (rq, apscheduler)
758
+ name: Name of the scheduler configuration to use
759
+ base_dir: Base directory for the scheduler configuration
760
+ storage_options: Storage options as JSON or key=value pairs
761
+ log_level: Logging level (debug, info, warning, error, critical)
762
+
763
+ Examples:
764
+ # Resume a specific schedule
765
+ $ flowerpower job-queue resume-schedule schedule-123456
766
+
767
+ # Resume all schedules
768
+ $ flowerpower job-queue resume-schedule --all dummy-id
769
+
770
+ # Specify the backend type explicitly
771
+ $ flowerpower job-queue resume-schedule schedule-123456 --type apscheduler
772
+
773
+ # Set a specific logging level
774
+ $ flowerpower job-queue resume-schedule schedule-123456 --log-level debug
775
+ """
776
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
777
+
778
+ with JobQueue(
779
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
780
+ ) as worker:
781
+ if worker.cfg.backend.type != "apscheduler":
782
+ logger.info(f"Schedule resuming is not supported for {worker.cfg.backend.type} workers.")
783
+ return
784
+ if all:
785
+ count = worker.resume_all_schedules()
786
+ logger.info(f"Resumed {count} schedules")
787
+ else:
788
+ success = worker.resume_schedule(schedule_id)
789
+ if success:
790
+ logger.info(f"Schedule {schedule_id} resumed successfully")
791
+ else:
792
+ logger.error(f"Failed to resume schedule {schedule_id}")
793
+
794
+ @app.command()
795
+ def show_jobs(
796
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
797
+ queue_name: str | None = typer.Option(None, help="Name of the queue to show jobs from (RQ only)"),
798
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
799
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
800
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
801
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
802
+ format: str = typer.Option("table", help="Output format (table, json, yaml)"),
803
+ ):
804
+ """
805
+ Display detailed information about all jobs in the queue.
806
+
807
+ This command shows comprehensive information about jobs including their status,
808
+ creation time, execution time, and other details in a user-friendly format.
809
+
810
+ Args:
811
+ type: Type of job queue backend (rq, apscheduler)
812
+ queue_name: Name of the queue to show jobs from (RQ only)
813
+ name: Name of the scheduler configuration to use
814
+ base_dir: Base directory for the scheduler configuration
815
+ storage_options: Storage options as JSON or key=value pairs
816
+ log_level: Logging level (debug, info, warning, error, critical)
817
+ format: Output format for the job information
818
+
819
+ Examples:
820
+ # Show all jobs using default settings
821
+ $ flowerpower job-queue show-jobs
822
+
823
+ # Show jobs for a specific queue type
824
+ $ flowerpower job-queue show-jobs --type rq
825
+
826
+ # Show jobs in a specific RQ queue
827
+ $ flowerpower job-queue show-jobs --queue-name high-priority
828
+
829
+ # Display jobs in JSON format
830
+ $ flowerpower job-queue show-jobs --format json
831
+ """
832
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
833
+
834
+ with JobQueue(
835
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
836
+ ) as worker:
837
+ worker.show_jobs(queue_name=queue_name, format=format)
838
+
839
+ @app.command()
840
+ def show_schedules(
841
+ type: str | None = typer.Option(None, help="Type of job queue backend (rq, apscheduler)"),
842
+ name: str | None = typer.Option(None, help="Name of the scheduler configuration to use"),
843
+ base_dir: str | None = typer.Option(None, help="Base directory for the scheduler configuration"),
844
+ storage_options: str | None = typer.Option(None, help="Storage options as JSON or key=value pairs"),
845
+ log_level: str = typer.Option("info", help="Logging level (debug, info, warning, error, critical)"),
846
+ format: str = typer.Option("table", help="Output format (table, json, yaml)"),
847
+ ):
848
+ """
849
+ Display detailed information about all schedules.
850
+
851
+ This command shows comprehensive information about scheduled jobs including their
852
+ timing configuration, status, and other details in a user-friendly format.
853
+
854
+ Args:
855
+ type: Type of job queue backend (rq, apscheduler)
856
+ name: Name of the scheduler configuration to use
857
+ base_dir: Base directory for the scheduler configuration
858
+ storage_options: Storage options as JSON or key=value pairs
859
+ log_level: Logging level (debug, info, warning, error, critical)
860
+ format: Output format for the schedule information
861
+
862
+ Examples:
863
+ # Show all schedules using default settings
864
+ $ flowerpower job-queue show-schedules
865
+
866
+ # Show schedules for a specific queue type
867
+ $ flowerpower job-queue show-schedules --type apscheduler
868
+
869
+ # Display schedules in JSON format
870
+ $ flowerpower job-queue show-schedules --format json
871
+ """
872
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
873
+
874
+ with JobQueue(
875
+ type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
876
+ ) as worker:
877
+ worker.show_schedules(format=format)
878
+