FlowerPower 0.20.0__py3-none-any.whl → 0.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. flowerpower/__init__.py +2 -6
  2. flowerpower/cfg/__init__.py +4 -11
  3. flowerpower/cfg/base.py +29 -25
  4. flowerpower/cfg/pipeline/__init__.py +3 -3
  5. flowerpower/cfg/pipeline/_schedule.py +32 -0
  6. flowerpower/cfg/pipeline/adapter.py +0 -5
  7. flowerpower/cfg/pipeline/builder.py +377 -0
  8. flowerpower/cfg/pipeline/run.py +89 -0
  9. flowerpower/cfg/project/__init__.py +8 -21
  10. flowerpower/cfg/project/adapter.py +0 -12
  11. flowerpower/cli/__init__.py +2 -28
  12. flowerpower/cli/pipeline.py +10 -4
  13. flowerpower/flowerpower.py +275 -585
  14. flowerpower/pipeline/base.py +19 -10
  15. flowerpower/pipeline/io.py +52 -46
  16. flowerpower/pipeline/manager.py +149 -91
  17. flowerpower/pipeline/pipeline.py +159 -87
  18. flowerpower/pipeline/registry.py +68 -33
  19. flowerpower/pipeline/visualizer.py +4 -4
  20. flowerpower/plugins/{_io → io}/__init__.py +1 -1
  21. flowerpower/settings/__init__.py +0 -2
  22. flowerpower/settings/{backend.py → _backend.py} +0 -19
  23. flowerpower/settings/logging.py +1 -1
  24. flowerpower/utils/logging.py +24 -12
  25. flowerpower/utils/misc.py +17 -0
  26. flowerpower-0.30.0.dist-info/METADATA +451 -0
  27. flowerpower-0.30.0.dist-info/RECORD +42 -0
  28. flowerpower/cfg/pipeline/schedule.py +0 -74
  29. flowerpower/cfg/project/job_queue.py +0 -111
  30. flowerpower/cli/job_queue.py +0 -1329
  31. flowerpower/cli/mqtt.py +0 -174
  32. flowerpower/job_queue/__init__.py +0 -205
  33. flowerpower/job_queue/base.py +0 -611
  34. flowerpower/job_queue/rq/__init__.py +0 -10
  35. flowerpower/job_queue/rq/_trigger.py +0 -37
  36. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +0 -226
  37. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +0 -228
  38. flowerpower/job_queue/rq/manager.py +0 -1893
  39. flowerpower/job_queue/rq/setup.py +0 -154
  40. flowerpower/job_queue/rq/utils.py +0 -69
  41. flowerpower/mqtt.py +0 -12
  42. flowerpower/plugins/mqtt/__init__.py +0 -12
  43. flowerpower/plugins/mqtt/cfg.py +0 -17
  44. flowerpower/plugins/mqtt/manager.py +0 -962
  45. flowerpower/settings/job_queue.py +0 -31
  46. flowerpower-0.20.0.dist-info/METADATA +0 -693
  47. flowerpower-0.20.0.dist-info/RECORD +0 -58
  48. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/WHEEL +0 -0
  49. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/entry_points.txt +0 -0
  50. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/licenses/LICENSE +0 -0
  51. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/top_level.txt +0 -0
@@ -1,1329 +0,0 @@
1
- import datetime as dt
2
-
3
- import duration_parser
4
- import typer
5
- from loguru import logger
6
-
7
- from .. import settings
8
- from ..flowerpower import FlowerPowerProject
9
- from ..job_queue import JobQueueManager # Adjust import as needed
10
- from ..utils.logging import setup_logging
11
- from .utils import parse_dict_or_list_param
12
-
13
- # Create a Typer app for job queue management commands
14
- app = typer.Typer(help="Job queue management commands")
15
-
16
- setup_logging(level=settings.LOG_LEVEL)
17
-
18
-
19
- @app.command()
20
- def start_worker(
21
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
22
- name: str | None = typer.Option(
23
- None, help="Name of the scheduler configuration to use"
24
- ),
25
- base_dir: str | None = typer.Option(
26
- None, help="Base directory for the scheduler configuration"
27
- ),
28
- background: bool = typer.Option(
29
- False, "--background", "-b", help="Run the worker in the background"
30
- ),
31
- storage_options: str | None = typer.Option(
32
- None, help="Storage options as JSON or key=value pairs"
33
- ),
34
- log_level: str = typer.Option(
35
- "info", help="Logging level (debug, info, warning, error, critical)"
36
- ),
37
- num_workers: int | None = typer.Option(
38
- None,
39
- "--num-workers",
40
- "-n",
41
- help="Number of worker processes to start (pool mode)",
42
- ),
43
- ):
44
- """
45
- Start a worker or worker pool to process jobs.
46
-
47
- This command starts a worker process (or a pool of worker processes) that will
48
- execute jobs from the queue. The worker will continue running until stopped
49
- or can be run in the background.
50
-
51
- Args:
52
- type: Type of job queue backend (rq)
53
- name: Name of the scheduler configuration to use
54
- base_dir: Base directory for the scheduler configuration
55
- background: Run the worker in the background
56
- storage_options: Storage options as JSON or key=value pairs
57
- log_level: Logging level (debug, info, warning, error, critical)
58
- num_workers: Number of worker processes to start (pool mode)
59
-
60
- Examples:
61
- # Start a worker with default settings
62
- $ flowerpower job-queue start-worker
63
-
64
- # Start a worker for a specific backend type
65
- $ flowerpower job-queue start-worker --type rq
66
-
67
- # Start a worker pool with 4 processes
68
- $ flowerpower job-queue start-worker --num-workers 4
69
-
70
- # Run a worker in the background
71
- $ flowerpower job-queue start-worker --background
72
-
73
- # Set a specific logging level
74
- $ flowerpower job-queue start-worker --log-level debug
75
- """
76
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
77
-
78
- with JobQueueManager(
79
- type=type,
80
- name=name,
81
- base_dir=base_dir,
82
- storage_options=parsed_storage_options,
83
- log_level=log_level,
84
- ) as worker:
85
- if not num_workers:
86
- num_workers = worker.cfg.num_workers
87
-
88
- if num_workers and num_workers > 1:
89
- worker.start_worker_pool(num_workers=num_workers, background=background)
90
- else:
91
- worker.start_worker(background=background)
92
-
93
-
94
- # @app.command()
95
- # def cancel_all_jobs(
96
- # type: str | None = None,
97
- # queue_name: str | None = None,
98
- # name: str | None = None,
99
- # base_dir: str | None = None,
100
- # storage_options: str | None = None,
101
- # log_level: str = "info",
102
- # ):
103
- # """
104
- # Cancel all jobs from the scheduler.
105
-
106
- # Note: This is different from deleting jobs as it only stops them from running but keeps their history.
107
-
108
- # Args:
109
- # type: Type of the job queue (rq)
110
- # queue_name: Name of the queue (RQ only)
111
- # name: Name of the scheduler
112
- # base_dir: Base directory for the scheduler
113
- # storage_options: Storage options as JSON or key=value pairs
114
- # log_level: Logging level
115
- # """
116
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
117
-
118
- # with JobQueueManager(
119
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
120
- # ) as worker:
121
- # if worker.cfg.backend.type != "rq":
122
- # logger.info(f"Job cancellation is not supported for {worker.cfg.backend.type} workers. Skipping.")
123
- # return
124
-
125
- # worker.cancel_all_jobs(queue_name=queue_name)
126
-
127
- # @app.command()
128
- # def cancel_all_schedules(
129
- # type: str | None = None,
130
- # name: str | None = None,
131
- # base_dir: str | None = None,
132
- # storage_options: str | None = None,
133
- # log_level: str = "info",
134
- # ):
135
- # """
136
- # Cancel all schedules from the scheduler.
137
-
138
- # Note: This is different from deleting schedules as it only stops them from running but keeps their configuration.
139
-
140
- # Args:
141
- # type: Type of the job queue (rq)
142
- # name: Name of the scheduler
143
- # base_dir: Base directory for the scheduler
144
- # storage_options: Storage options as JSON or key=value pairs
145
- # log_level: Logging level
146
- # """
147
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
148
-
149
- # with JobQueueManager(
150
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
151
- # ) as worker:
152
- # worker.cancel_all_schedules()
153
-
154
-
155
- @app.command()
156
- def cancel_job(
157
- job_id: str = typer.Argument(..., help="ID of the job to cancel"),
158
- all: bool = typer.Option(
159
- False, "--all", "-a", help="Cancel all jobs instead of a specific one"
160
- ),
161
- queue_name: str | None = typer.Option(
162
- None,
163
- help="Name of the queue (RQ only). If provided with --all, cancels all jobs in the queue",
164
- ),
165
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
166
- name: str | None = typer.Option(
167
- None, help="Name of the scheduler configuration to use"
168
- ),
169
- base_dir: str | None = typer.Option(
170
- None, help="Base directory for the scheduler configuration"
171
- ),
172
- storage_options: str | None = typer.Option(
173
- None, help="Storage options as JSON or key=value pairs"
174
- ),
175
- log_level: str = typer.Option(
176
- "info", help="Logging level (debug, info, warning, error, critical)"
177
- ),
178
- ):
179
- """
180
- Cancel a job or multiple jobs in the queue.
181
-
182
- This command stops a job from executing (if it hasn't started yet) or signals
183
- it to stop (if already running). Canceling is different from deleting as it
184
- maintains the job history but prevents execution.
185
-
186
- Args:
187
- job_id: ID of the job to cancel (ignored if --all is used)
188
- all: Cancel all jobs instead of a specific one
189
- queue_name: For RQ only, specifies the queue to cancel jobs from
190
- type: Type of job queue backend (rq)
191
- name: Name of the scheduler configuration to use
192
- base_dir: Base directory for the scheduler configuration
193
- storage_options: Storage options as JSON or key=value pairs
194
- log_level: Logging level (debug, info, warning, error, critical)
195
-
196
- Examples:
197
- # Cancel a specific job
198
- $ flowerpower job-queue cancel-job job-123456
199
-
200
- # Cancel all jobs in the default queue
201
- $ flowerpower job-queue cancel-job --all dummy-id
202
-
203
- # Cancel all jobs in a specific queue (RQ only)
204
- $ flowerpower job-queue cancel-job --all dummy-id --queue-name high-priority
205
-
206
- # Specify the backend type explicitly
207
- $ flowerpower job-queue cancel-job job-123456 --type rq
208
- """
209
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
210
-
211
- with JobQueueManager(
212
- type=type,
213
- name=name,
214
- base_dir=base_dir,
215
- storage_options=parsed_storage_options,
216
- log_level=log_level,
217
- ) as worker:
218
- if all:
219
- count = worker.cancel_all_jobs(
220
- queue_name=queue_name if worker.cfg.backend.type == "rq" else None
221
- )
222
- logger.info(
223
- f"Cancelled {count} jobs"
224
- + (f" in queue '{queue_name}'" if queue_name else "")
225
- )
226
- else:
227
- worker.cancel_job(job_id)
228
- logger.info(f"Job {job_id} cancelled")
229
-
230
-
231
- @app.command()
232
- def cancel_schedule(
233
- schedule_id: str,
234
- all: bool = False,
235
- type: str | None = None,
236
- name: str | None = None,
237
- base_dir: str | None = None,
238
- storage_options: str | None = None,
239
- log_level: str = "info",
240
- ):
241
- """
242
- Cancel a specific schedule.
243
-
244
- Note: This is different from deleting a schedule as it only stops it from running but keeps its configuration.
245
-
246
- Args:
247
- schedule_id: ID of the schedule to cancel
248
- all: If True, cancel all schedules
249
- type: Type of the job queue (rq)
250
- name: Name of the scheduler
251
- base_dir: Base directory for the scheduler
252
- storage_options: Storage options as JSON or key=value pairs
253
- log_level: Logging level
254
- """
255
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
256
-
257
- with JobQueueManager(
258
- type=type,
259
- name=name,
260
- base_dir=base_dir,
261
- storage_options=parsed_storage_options,
262
- log_level=log_level,
263
- ) as worker:
264
- if all:
265
- worker.cancel_all_schedules()
266
- else:
267
- worker.cancel_schedule(schedule_id)
268
-
269
-
270
- # @app.command()
271
- # def delete_all_jobs(
272
- # type: str | None = None,
273
- # queue_name: str | None = None,
274
- # name: str | None = None,
275
- # base_dir: str | None = None,
276
- # storage_options: str | None = None,
277
- # log_level: str = "info",
278
- # ):
279
- # """
280
- # Delete all jobs from the scheduler. Note that this is different from cancelling jobs
281
- # as it also removes job history and results.
282
-
283
- # Args:
284
- # queue_name: Name of the queue (RQ only)
285
- # name: Name of the scheduler
286
- # base_dir: Base directory for the scheduler
287
- # storage_options: Storage options as JSON or key=value pairs
288
- # log_level: Logging level
289
- # """
290
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
291
-
292
- # with JobQueueManager(
293
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
294
- # ) as worker:
295
- # worker.delete_all_jobs(queue_name=queue_name if worker.cfg.backend.type == "rq" else None)
296
-
297
- # @app.command()
298
- # def delete_all_schedules(
299
- # type: str | None = None,
300
- # name: str | None = None,
301
- # base_dir: str | None = None,
302
- # storage_options: str | None = None,
303
- # log_level: str = "info",
304
- # ):
305
- # """
306
- # Delete all schedules from the scheduler.
307
-
308
- # Args:
309
- # name: Name of the scheduler
310
- # base_dir: Base directory for the scheduler
311
- # storage_options: Storage options as JSON or key=value pairs
312
- # log_level: Logging level
313
- # """
314
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
315
-
316
- # with JobQueueManager(
317
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
318
- # ) as worker:
319
- # worker.delete_all_schedules()
320
-
321
-
322
- @app.command()
323
- def delete_job(
324
- job_id: str,
325
- all: bool = False,
326
- queue_name: str | None = None,
327
- type: str | None = None,
328
- name: str | None = None,
329
- base_dir: str | None = None,
330
- storage_options: str | None = None,
331
- log_level: str = "info",
332
- ):
333
- """
334
- Delete a specific job.
335
-
336
- Args:
337
- job_id: ID of the job to delete
338
- all: If True, delete all jobs
339
- queue_name: Name of the queue (RQ only). If provided and all is True, delete all jobs in the queue
340
- type: Type of the job queue (rq)
341
- name: Name of the scheduler
342
- base_dir: Base directory for the scheduler
343
- storage_options: Storage options as JSON or key=value pairs
344
- log_level: Logging level
345
- """
346
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
347
-
348
- with JobQueueManager(
349
- type=type,
350
- name=name,
351
- base_dir=base_dir,
352
- storage_options=parsed_storage_options,
353
- log_level=log_level,
354
- ) as worker:
355
- if all:
356
- worker.delete_all_jobs(
357
- queue_name=queue_name if worker.cfg.backend.type == "rq" else None
358
- )
359
- else:
360
- worker.delete_job(job_id)
361
-
362
-
363
- @app.command()
364
- def delete_schedule(
365
- schedule_id: str,
366
- all: bool = False,
367
- type: str | None = None,
368
- name: str | None = None,
369
- base_dir: str | None = None,
370
- storage_options: str | None = None,
371
- log_level: str = "info",
372
- ):
373
- """
374
- Delete a specific schedule.
375
-
376
- Args:
377
- schedule_id: ID of the schedule to delete
378
- all: If True, delete all schedules
379
- type: Type of the job queue (rq)
380
- name: Name of the scheduler
381
- base_dir: Base directory for the scheduler
382
- storage_options: Storage options as JSON or key=value pairs
383
- log_level: Logging level
384
- """
385
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
386
-
387
- with JobQueueManager(
388
- type=type,
389
- name=name,
390
- base_dir=base_dir,
391
- storage_options=parsed_storage_options,
392
- log_level=log_level,
393
- ) as worker:
394
- if all:
395
- worker.delete_all_schedules()
396
- else:
397
- worker.delete_schedule(schedule_id)
398
-
399
-
400
- # @app.command()
401
- # def get_job(
402
- # job_id: str,
403
- # type: str | None = None,
404
- # name: str | None = None,
405
- # base_dir: str | None = None,
406
- # storage_options: str | None = None,
407
- # log_level: str = "info",
408
- # ):
409
- # """
410
- # Get information about a specific job.
411
-
412
- # Args:
413
- # job_id: ID of the job
414
- # name: Name of the scheduler
415
- # base_dir: Base directory for the scheduler
416
- # storage_options: Storage options as JSON or key=value pairs
417
- # log_level: Logging level
418
- # """
419
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
420
-
421
- # with JobQueueManager(
422
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
423
- # ) as worker:
424
- # # show_jobs should display the job info
425
- # worker.show_jobs(job_id=job_id)
426
-
427
- # @app.command()
428
- # def get_job_result(
429
- # job_id: str,
430
- # type: str | None = None,
431
- # name: str | None = None,
432
- # base_dir: str | None = None,
433
- # storage_options: str | None = None,
434
- # log_level: str = "info",
435
- # wait: bool = True,
436
- # ):
437
- # """
438
- # Get the result of a specific job.
439
-
440
- # Args:
441
- # job_id: ID of the job
442
- # name: Name of the scheduler
443
- # base_dir: Base directory for the scheduler
444
- # storage_options: Storage options as JSON or key=value pairs
445
- # log_level: Logging level
446
- # wait: Wait for the result if job is still running (APScheduler only)
447
- # """
448
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
449
-
450
- # with JobQueueManager(
451
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
452
- # ) as worker:
453
- # # worker's get_job_result method will handle the result display
454
- # worker.get_job_result(job_id, wait=wait if worker.cfg.backend.type == "apscheduler" else False)
455
-
456
- # @app.command()
457
- # def get_jobs(
458
- # type: str | None = None,
459
- # queue_name: str | None = None,
460
- # name: str | None = None,
461
- # base_dir: str | None = None,
462
- # storage_options: str | None = None,
463
- # log_level: str = "info",
464
- # ):
465
- # """
466
- # List all jobs.
467
-
468
- # Args:
469
- # queue_name: Name of the queue (RQ only)
470
- # name: Name of the scheduler
471
- # base_dir: Base directory for the scheduler
472
- # storage_options: Storage options as JSON or key=value pairs
473
- # log_level: Logging level
474
- # """
475
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
476
-
477
- # with JobQueueManager(
478
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
479
- # ) as worker:
480
- # worker.show_jobs()
481
-
482
- # @app.command()
483
- # def get_schedule(
484
- # schedule_id: str,
485
- # type: str | None = None,
486
- # name: str | None = None,
487
- # base_dir: str | None = None,
488
- # storage_options: str | None = None,
489
- # log_level: str = "info",
490
- # ):
491
- # """
492
- # Get information about a specific schedule.
493
-
494
- # Args:
495
- # schedule_id: ID of the schedule
496
- # name: Name of the scheduler
497
- # base_dir: Base directory for the scheduler
498
- # storage_options: Storage options as JSON or key=value pairs
499
- # log_level: Logging level
500
- # """
501
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
502
-
503
- # with JobQueueManager(
504
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
505
- # ) as worker:
506
- # # show_schedule should display the schedule info
507
- # worker.show_schedules(schedule_id=schedule_id)
508
-
509
- # @app.command()
510
- # def get_schedules(
511
- # type: str | None = None,
512
- # name: str | None = None,
513
- # base_dir: str | None = None,
514
- # storage_options: str | None = None,
515
- # log_level: str = "info",
516
- # ):
517
- # """
518
- # List all schedules.
519
-
520
- # Args:
521
- # name: Name of the scheduler
522
- # base_dir: Base directory for the scheduler
523
- # storage_options: Storage options as JSON or key=value pairs
524
- # log_level: Logging level
525
- # """
526
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
527
-
528
- # with JobQueueManager(
529
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
530
- # ) as worker:
531
- # worker.show_schedules()
532
-
533
-
534
- @app.command()
535
- def show_job_ids(
536
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
537
- name: str | None = typer.Option(
538
- None, help="Name of the scheduler configuration to use"
539
- ),
540
- base_dir: str | None = typer.Option(
541
- None, help="Base directory for the scheduler configuration"
542
- ),
543
- storage_options: str | None = typer.Option(
544
- None, help="Storage options as JSON or key=value pairs"
545
- ),
546
- log_level: str = typer.Option(
547
- "info", help="Logging level (debug, info, warning, error, critical)"
548
- ),
549
- ):
550
- """
551
- Show all job IDs in the job queue.
552
-
553
- This command displays all job IDs currently in the system, helping you identify
554
- jobs for other operations like getting results, canceling, or deleting jobs.
555
-
556
- Args:
557
- type: Type of job queue backend (rq)
558
- name: Name of the scheduler configuration to use
559
- base_dir: Base directory for the scheduler configuration
560
- storage_options: Storage options as JSON or key=value pairs
561
- log_level: Logging level (debug, info, warning, error, critical)
562
-
563
- Examples:
564
- # Show job IDs using default settings
565
- $ flowerpower job-queue show-job-ids
566
-
567
- # Show job IDs for a specific queue type
568
- $ flowerpower job-queue show-job-ids --type rq
569
-
570
- # Show job IDs with a custom scheduler configuration
571
- $ flowerpower job-queue show-job-ids --name my-scheduler
572
-
573
- # Show job IDs with debug logging
574
- $ flowerpower job-queue show-job-ids --log-level debug
575
- """
576
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
577
-
578
- with JobQueueManager(
579
- type=type,
580
- name=name,
581
- base_dir=base_dir,
582
- storage_options=parsed_storage_options,
583
- log_level=log_level,
584
- ) as worker:
585
- # worker's job_ids property will print the IDs
586
- ids = worker.job_ids
587
- # Ensure we always print something meaningful
588
- if not ids:
589
- logger.info("No job IDs found")
590
- # If the worker's property doesn't already print the IDs, print them here
591
- elif not isinstance(ids, type(None)): # Check if None was returned
592
- for job_id in ids:
593
- print(f"- {job_id}")
594
-
595
-
596
- @app.command()
597
- def show_schedule_ids(
598
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
599
- name: str | None = typer.Option(
600
- None, help="Name of the scheduler configuration to use"
601
- ),
602
- base_dir: str | None = typer.Option(
603
- None, help="Base directory for the scheduler configuration"
604
- ),
605
- storage_options: str | None = typer.Option(
606
- None, help="Storage options as JSON or key=value pairs"
607
- ),
608
- log_level: str = typer.Option(
609
- "info", help="Logging level (debug, info, warning, error, critical)"
610
- ),
611
- ):
612
- """
613
- Show all schedule IDs in the job queue.
614
-
615
- This command displays all schedule IDs currently in the system, helping you
616
- identify schedules for other operations like pausing, resuming, or deleting schedules.
617
-
618
- Args:
619
- type: Type of job queue backend (rq)
620
- name: Name of the scheduler configuration to use
621
- base_dir: Base directory for the scheduler configuration
622
- storage_options: Storage options as JSON or key=value pairs
623
- log_level: Logging level (debug, info, warning, error, critical)
624
-
625
- Examples:
626
- # Show schedule IDs using default settings
627
- $ flowerpower job-queue show-schedule-ids
628
-
629
- # Show schedule IDs for RQ
630
- $ flowerpower job-queue show-schedule-ids --type rq
631
-
632
- # Show schedule IDs with a custom scheduler configuration
633
- $ flowerpower job-queue show-schedule-ids --name my-scheduler
634
-
635
- # Show schedule IDs with debug logging
636
- $ flowerpower job-queue show-schedule-ids --log-level debug
637
- """
638
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
639
-
640
- with JobQueueManager(
641
- type=type,
642
- name=name,
643
- base_dir=base_dir,
644
- storage_options=parsed_storage_options,
645
- log_level=log_level,
646
- ) as worker:
647
- # worker's schedule_ids property will print the IDs
648
- ids = worker.schedule_ids
649
- # Ensure we always print something meaningful
650
- if not ids:
651
- logger.info("No schedule IDs found")
652
- # If the worker's property doesn't already print the IDs, print them here
653
- elif not isinstance(ids, type(None)): # Check if None was returned
654
- for schedule_id in ids:
655
- print(f"- {schedule_id}")
656
-
657
-
658
- # @app.command()
659
- # def pause_all_schedules(
660
- # type: str | None = None,
661
- # name: str | None = None,
662
- # base_dir: str | None = None,
663
- # storage_options: str | None = None,
664
- # log_level: str = "info",
665
- # ):
666
- # """
667
- # Pause all schedules.
668
-
669
- # Note: This functionality is only available for APScheduler workers.
670
-
671
- # Args:
672
- # name: Name of the scheduler
673
- # base_dir: Base directory for the scheduler
674
- # storage_options: Storage options as JSON or key=value pairs
675
- # log_level: Logging level
676
- # """
677
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
678
-
679
- # with JobQueueManager(
680
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
681
- # ) as worker:
682
- # if worker.cfg.backend.type != "apscheduler":
683
- # logger.info(f"Schedule pausing is not supported for {worker.cfg.backend.type} workers.")
684
- # return
685
- # worker.pause_all_schedules()
686
-
687
-
688
- @app.command()
689
- def pause_schedule(
690
- schedule_id: str = typer.Argument(..., help="ID of the schedule to pause"),
691
- all: bool = typer.Option(
692
- False, "--all", "-a", help="Pause all schedules instead of a specific one"
693
- ),
694
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
695
- name: str | None = typer.Option(
696
- None, help="Name of the scheduler configuration to use"
697
- ),
698
- base_dir: str | None = typer.Option(
699
- None, help="Base directory for the scheduler configuration"
700
- ),
701
- storage_options: str | None = typer.Option(
702
- None, help="Storage options as JSON or key=value pairs"
703
- ),
704
- log_level: str = typer.Option(
705
- "info", help="Logging level (debug, info, warning, error, critical)"
706
- ),
707
- ):
708
- """
709
- Pause a schedule or multiple schedules.
710
-
711
- This command temporarily stops a scheduled job from running while maintaining its
712
- configuration. Paused schedules can be resumed later. Note that this functionality
713
- is only available for APScheduler workers.
714
-
715
- Args:
716
- schedule_id: ID of the schedule to pause (ignored if --all is used)
717
- all: Pause all schedules instead of a specific one
718
- type: Type of job queue backend (rq)
719
- name: Name of the scheduler configuration to use
720
- base_dir: Base directory for the scheduler configuration
721
- storage_options: Storage options as JSON or key=value pairs
722
- log_level: Logging level (debug, info, warning, error, critical)
723
-
724
- Examples:
725
- # Pause a specific schedule
726
- $ flowerpower job-queue pause-schedule schedule-123456
727
-
728
- # Pause all schedules
729
- $ flowerpower job-queue pause-schedule --all dummy-id
730
-
731
- # Note: Schedule pausing is not supported for RQ workers
732
- """
733
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
734
-
735
- with JobQueueManager(
736
- type=type,
737
- name=name,
738
- base_dir=base_dir,
739
- storage_options=parsed_storage_options,
740
- log_level=log_level,
741
- ) as worker:
742
- logger.info("Schedule pausing is not supported for RQ workers.")
743
- return
744
- if all:
745
- count = worker.pause_all_schedules()
746
- logger.info(f"Paused {count} schedules")
747
- else:
748
- success = worker.pause_schedule(schedule_id)
749
- if success:
750
- logger.info(f"Schedule {schedule_id} paused successfully")
751
- else:
752
- logger.error(f"Failed to pause schedule {schedule_id}")
753
-
754
-
755
- # @app.command()
756
- # def resume_all_schedules(
757
- # type: str | None = None,
758
- # name: str | None = None,
759
- # base_dir: str | None = None,
760
- # storage_options: str | None = None,
761
- # log_level: str = "info",
762
- # ):
763
- # """
764
- # Resume all paused schedules.
765
-
766
- # Note: This functionality is only available for APScheduler workers.
767
-
768
- # Args:
769
- # name: Name of the scheduler
770
- # base_dir: Base directory for the scheduler
771
- # storage_options: Storage options as JSON or key=value pairs
772
- # log_level: Logging level
773
- # """
774
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
775
-
776
- # with JobQueueManager(
777
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
778
- # ) as worker:
779
- # if worker.cfg.backend.type != "apscheduler":
780
- # logger.info(f"Schedule resuming is not supported for {worker.cfg.backend.type} workers.")
781
- # return
782
- # worker.resume_all_schedules()
783
-
784
-
785
- @app.command()
786
- def resume_schedule(
787
- schedule_id: str = typer.Argument(..., help="ID of the schedule to resume"),
788
- all: bool = typer.Option(
789
- False, "--all", "-a", help="Resume all schedules instead of a specific one"
790
- ),
791
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
792
- name: str | None = typer.Option(
793
- None, help="Name of the scheduler configuration to use"
794
- ),
795
- base_dir: str | None = typer.Option(
796
- None, help="Base directory for the scheduler configuration"
797
- ),
798
- storage_options: str | None = typer.Option(
799
- None, help="Storage options as JSON or key=value pairs"
800
- ),
801
- log_level: str = typer.Option(
802
- "info", help="Logging level (debug, info, warning, error, critical)"
803
- ),
804
- ):
805
- """
806
- Resume a paused schedule or multiple schedules.
807
-
808
- This command restarts previously paused schedules, allowing them to run again according
809
- to their original configuration. Note that this functionality is only available for
810
- APScheduler workers.
811
-
812
- Args:
813
- schedule_id: ID of the schedule to resume (ignored if --all is used)
814
- all: Resume all schedules instead of a specific one
815
- type: Type of job queue backend (rq)
816
- name: Name of the scheduler configuration to use
817
- base_dir: Base directory for the scheduler configuration
818
- storage_options: Storage options as JSON or key=value pairs
819
- log_level: Logging level (debug, info, warning, error, critical)
820
-
821
- Examples:
822
- # Resume a specific schedule
823
- $ flowerpower job-queue resume-schedule schedule-123456
824
-
825
- # Resume all schedules
826
- $ flowerpower job-queue resume-schedule --all dummy-id
827
-
828
- # Note: Schedule resuming is not supported for RQ workers
829
-
830
- # Set a specific logging level
831
- $ flowerpower job-queue resume-schedule schedule-123456 --log-level debug
832
- """
833
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
834
-
835
- with JobQueueManager(
836
- type=type,
837
- name=name,
838
- base_dir=base_dir,
839
- storage_options=parsed_storage_options,
840
- log_level=log_level,
841
- ) as worker:
842
- logger.info("Schedule resuming is not supported for RQ workers.")
843
- return
844
- if all:
845
- count = worker.resume_all_schedules()
846
- logger.info(f"Resumed {count} schedules")
847
- else:
848
- success = worker.resume_schedule(schedule_id)
849
- if success:
850
- logger.info(f"Schedule {schedule_id} resumed successfully")
851
- else:
852
- logger.error(f"Failed to resume schedule {schedule_id}")
853
-
854
-
855
- @app.command()
856
- def show_jobs(
857
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
858
- queue_name: str | None = typer.Option(
859
- None, help="Name of the queue to show jobs from (RQ only)"
860
- ),
861
- name: str | None = typer.Option(
862
- None, help="Name of the scheduler configuration to use"
863
- ),
864
- base_dir: str | None = typer.Option(
865
- None, help="Base directory for the scheduler configuration"
866
- ),
867
- storage_options: str | None = typer.Option(
868
- None, help="Storage options as JSON or key=value pairs"
869
- ),
870
- log_level: str = typer.Option(
871
- "info", help="Logging level (debug, info, warning, error, critical)"
872
- ),
873
- format: str = typer.Option("table", help="Output format (table, json, yaml)"),
874
- ):
875
- """
876
- Display detailed information about all jobs in the queue.
877
-
878
- This command shows comprehensive information about jobs including their status,
879
- creation time, execution time, and other details in a user-friendly format.
880
-
881
- Args:
882
- type: Type of job queue backend (rq)
883
- queue_name: Name of the queue to show jobs from (RQ only)
884
- name: Name of the scheduler configuration to use
885
- base_dir: Base directory for the scheduler configuration
886
- storage_options: Storage options as JSON or key=value pairs
887
- log_level: Logging level (debug, info, warning, error, critical)
888
- format: Output format for the job information
889
-
890
- Examples:
891
- # Show all jobs using default settings
892
- $ flowerpower job-queue show-jobs
893
-
894
- # Show jobs for a specific queue type
895
- $ flowerpower job-queue show-jobs --type rq
896
-
897
- # Show jobs in a specific RQ queue
898
- $ flowerpower job-queue show-jobs --queue-name high-priority
899
-
900
- # Display jobs in JSON format
901
- $ flowerpower job-queue show-jobs --format json
902
- """
903
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
904
-
905
- with JobQueueManager(
906
- type=type,
907
- name=name,
908
- base_dir=base_dir,
909
- storage_options=parsed_storage_options,
910
- log_level=log_level,
911
- ) as worker:
912
- worker.show_jobs(queue_name=queue_name, format=format)
913
-
914
-
915
- @app.command()
916
- def show_schedules(
917
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
918
- name: str | None = typer.Option(
919
- None, help="Name of the scheduler configuration to use"
920
- ),
921
- base_dir: str | None = typer.Option(
922
- None, help="Base directory for the scheduler configuration"
923
- ),
924
- storage_options: str | None = typer.Option(
925
- None, help="Storage options as JSON or key=value pairs"
926
- ),
927
- log_level: str = typer.Option(
928
- "info", help="Logging level (debug, info, warning, error, critical)"
929
- ),
930
- format: str = typer.Option("table", help="Output format (table, json, yaml)"),
931
- ):
932
- """
933
- Display detailed information about all schedules.
934
-
935
- This command shows comprehensive information about scheduled jobs including their
936
- timing configuration, status, and other details in a user-friendly format.
937
-
938
- Args:
939
- type: Type of job queue backend (rq)
940
- name: Name of the scheduler configuration to use
941
- base_dir: Base directory for the scheduler configuration
942
- storage_options: Storage options as JSON or key=value pairs
943
- log_level: Logging level (debug, info, warning, error, critical)
944
- format: Output format for the schedule information
945
-
946
- Examples:
947
- # Show all schedules using default settings
948
- $ flowerpower job-queue show-schedules
949
-
950
- # Show schedules for RQ
951
- $ flowerpower job-queue show-schedules --type rq
952
-
953
- # Display schedules in JSON format
954
- $ flowerpower job-queue show-schedules --format json
955
- """
956
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
957
-
958
- with JobQueueManager(
959
- type=type,
960
- name=name,
961
- base_dir=base_dir,
962
- storage_options=parsed_storage_options,
963
- log_level=log_level,
964
- ) as worker:
965
- worker.show_schedules(format=format)
966
-
967
-
968
- @app.command()
969
- def enqueue_pipeline(
970
- name: str = typer.Argument(..., help="Name of the pipeline to enqueue"),
971
- base_dir: str | None = typer.Option(None, help="Base directory for the pipeline"),
972
- inputs: str | None = typer.Option(
973
- None, help="Input parameters as JSON, dict string, or key=value pairs"
974
- ),
975
- final_vars: str | None = typer.Option(None, help="Final variables as JSON or list"),
976
- storage_options: str | None = typer.Option(
977
- None, help="Storage options as JSON, dict string, or key=value pairs"
978
- ),
979
- log_level: str | None = typer.Option(
980
- None, help="Logging level (debug, info, warning, error, critical)"
981
- ),
982
- run_in: str | None = typer.Option(
983
- None, help="Schedule job to run after a delay (e.g., '5m', '1h', '30s')"
984
- ),
985
- run_at: str | None = typer.Option(
986
- None, help="Schedule job to run at a specific datetime (ISO format)"
987
- ),
988
- ):
989
- """
990
- Enqueue a pipeline for execution via the job queue.
991
-
992
- This command queues a pipeline for asynchronous execution using the configured
993
- job queue backend (RQ). The job can be executed immediately, after a delay,
994
- or at a specific time.
995
-
996
- Args:
997
- name: Name of the pipeline to enqueue
998
- base_dir: Base directory containing pipelines and configurations
999
- inputs: Input parameters for the pipeline
1000
- final_vars: Final variables to request from the pipeline
1001
- storage_options: Options for storage backends
1002
- log_level: Set the logging level
1003
- run_in: Delay before execution (duration format like '5m', '1h', '30s')
1004
- run_at: Specific datetime for execution (ISO format)
1005
-
1006
- Examples:
1007
- # Enqueue for immediate execution
1008
- $ flowerpower job-queue enqueue-pipeline my_pipeline
1009
-
1010
- # Enqueue with custom inputs
1011
- $ flowerpower job-queue enqueue-pipeline my_pipeline --inputs '{"data_path": "data/file.csv"}'
1012
-
1013
- # Enqueue with delay
1014
- $ flowerpower job-queue enqueue-pipeline my_pipeline --run-in "30m"
1015
-
1016
- # Enqueue for specific time
1017
- $ flowerpower job-queue enqueue-pipeline my_pipeline --run-at "2025-01-01T09:00:00"
1018
- """
1019
- parsed_inputs = parse_dict_or_list_param(inputs, "dict")
1020
- parsed_final_vars = parse_dict_or_list_param(final_vars, "list")
1021
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
1022
-
1023
- # Use FlowerPowerProject for consistency
1024
- project = FlowerPowerProject.load(
1025
- base_dir=base_dir,
1026
- storage_options=parsed_storage_options or {},
1027
- log_level=log_level,
1028
- )
1029
-
1030
- if project is None:
1031
- logger.error(f"Failed to load FlowerPower project from {base_dir or '.'}")
1032
- raise typer.Exit(1)
1033
-
1034
- if project.job_queue_manager is None:
1035
- logger.error("No job queue configured. Cannot enqueue pipeline jobs.")
1036
- raise typer.Exit(1)
1037
-
1038
- try:
1039
- # Parse run_in duration if provided
1040
- kwargs = {}
1041
- if run_in:
1042
- try:
1043
- delay_seconds = duration_parser.parse(run_in).total_seconds()
1044
- kwargs["run_in"] = delay_seconds
1045
- except Exception as e:
1046
- logger.error(f"Invalid duration format '{run_in}': {e}")
1047
- raise typer.Exit(1)
1048
-
1049
- # Parse run_at datetime if provided
1050
- if run_at:
1051
- try:
1052
- run_at_dt = dt.datetime.fromisoformat(run_at)
1053
- kwargs["run_at"] = run_at_dt
1054
- except Exception as e:
1055
- logger.error(f"Invalid datetime format '{run_at}': {e}")
1056
- raise typer.Exit(1)
1057
-
1058
- # Add pipeline execution parameters
1059
- if parsed_inputs:
1060
- kwargs["inputs"] = parsed_inputs
1061
- if parsed_final_vars:
1062
- kwargs["final_vars"] = parsed_final_vars
1063
-
1064
- job_id = project.enqueue(name, **kwargs)
1065
-
1066
- if run_in:
1067
- logger.info(
1068
- f"Pipeline '{name}' enqueued to run in {run_in}. Job ID: {job_id}"
1069
- )
1070
- elif run_at:
1071
- logger.info(
1072
- f"Pipeline '{name}' enqueued to run at {run_at}. Job ID: {job_id}"
1073
- )
1074
- else:
1075
- logger.info(
1076
- f"Pipeline '{name}' enqueued for immediate execution. Job ID: {job_id}"
1077
- )
1078
-
1079
- except Exception as e:
1080
- logger.error(f"Failed to enqueue pipeline '{name}': {e}")
1081
- raise typer.Exit(1)
1082
-
1083
-
1084
- @app.command()
1085
- def schedule_pipeline(
1086
- name: str = typer.Argument(..., help="Name of the pipeline to schedule"),
1087
- base_dir: str | None = typer.Option(None, help="Base directory for the pipeline"),
1088
- cron: str | None = typer.Option(
1089
- None, help="Cron expression for recurring execution (e.g., '0 9 * * *')"
1090
- ),
1091
- interval: str | None = typer.Option(
1092
- None, help="Interval for recurring execution (e.g., '1h', '30m')"
1093
- ),
1094
- inputs: str | None = typer.Option(
1095
- None, help="Input parameters as JSON, dict string, or key=value pairs"
1096
- ),
1097
- final_vars: str | None = typer.Option(None, help="Final variables as JSON or list"),
1098
- storage_options: str | None = typer.Option(
1099
- None, help="Storage options as JSON, dict string, or key=value pairs"
1100
- ),
1101
- log_level: str | None = typer.Option(
1102
- None, help="Logging level (debug, info, warning, error, critical)"
1103
- ),
1104
- schedule_id: str | None = typer.Option(
1105
- None, help="Unique identifier for the schedule"
1106
- ),
1107
- ):
1108
- """
1109
- Schedule a pipeline for recurring or future execution.
1110
-
1111
- This command sets up recurring or future execution of a pipeline using cron
1112
- expressions or interval-based scheduling via the configured job queue backend.
1113
-
1114
- Args:
1115
- name: Name of the pipeline to schedule
1116
- base_dir: Base directory containing pipelines and configurations
1117
- cron: Cron expression for scheduling (e.g., '0 9 * * *' for 9 AM daily)
1118
- interval: Interval for recurring execution (duration format)
1119
- inputs: Input parameters for the pipeline
1120
- final_vars: Final variables to request from the pipeline
1121
- storage_options: Options for storage backends
1122
- log_level: Set the logging level
1123
- schedule_id: Custom identifier for the schedule
1124
-
1125
- Examples:
1126
- # Schedule daily at 9 AM
1127
- $ flowerpower job-queue schedule-pipeline my_pipeline --cron "0 9 * * *"
1128
-
1129
- # Schedule every 30 minutes
1130
- $ flowerpower job-queue schedule-pipeline my_pipeline --interval "30m"
1131
-
1132
- # Schedule with custom inputs and ID
1133
- $ flowerpower job-queue schedule-pipeline my_pipeline --cron "0 0 * * *" \\
1134
- --inputs '{"env": "prod"}' --schedule-id "nightly-prod"
1135
- """
1136
- if not cron and not interval:
1137
- logger.error("Either --cron or --interval must be specified")
1138
- raise typer.Exit(1)
1139
-
1140
- if cron and interval:
1141
- logger.error("Cannot specify both --cron and --interval")
1142
- raise typer.Exit(1)
1143
-
1144
- parsed_inputs = parse_dict_or_list_param(inputs, "dict")
1145
- parsed_final_vars = parse_dict_or_list_param(final_vars, "list")
1146
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
1147
-
1148
- # Use FlowerPowerProject for consistency
1149
- project = FlowerPowerProject.load(
1150
- base_dir=base_dir,
1151
- storage_options=parsed_storage_options or {},
1152
- log_level=log_level,
1153
- )
1154
-
1155
- if project is None:
1156
- logger.error(f"Failed to load FlowerPower project from {base_dir or '.'}")
1157
- raise typer.Exit(1)
1158
-
1159
- if project.job_queue_manager is None:
1160
- logger.error("No job queue configured. Cannot schedule pipeline jobs.")
1161
- raise typer.Exit(1)
1162
-
1163
- try:
1164
- # Prepare schedule parameters
1165
- kwargs = {}
1166
- if cron:
1167
- kwargs["cron"] = cron
1168
- if interval:
1169
- try:
1170
- interval_seconds = duration_parser.parse(interval).total_seconds()
1171
- kwargs["interval"] = {"seconds": interval_seconds}
1172
- except Exception as e:
1173
- logger.error(f"Invalid interval format '{interval}': {e}")
1174
- raise typer.Exit(1)
1175
-
1176
- if schedule_id:
1177
- kwargs["schedule_id"] = schedule_id
1178
- if parsed_inputs:
1179
- kwargs["inputs"] = parsed_inputs
1180
- if parsed_final_vars:
1181
- kwargs["final_vars"] = parsed_final_vars
1182
-
1183
- schedule_result = project.schedule(name, **kwargs)
1184
-
1185
- if cron:
1186
- logger.info(
1187
- f"Pipeline '{name}' scheduled with cron '{cron}'. Schedule ID: {schedule_result}"
1188
- )
1189
- elif interval:
1190
- logger.info(
1191
- f"Pipeline '{name}' scheduled every {interval}. Schedule ID: {schedule_result}"
1192
- )
1193
-
1194
- except Exception as e:
1195
- logger.error(f"Failed to schedule pipeline '{name}': {e}")
1196
- raise typer.Exit(1)
1197
-
1198
-
1199
- @app.command()
1200
- def run_job(
1201
- job_id: str = typer.Argument(..., help="ID of the job to run"),
1202
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
1203
- name: str | None = typer.Option(
1204
- None, help="Name of the scheduler configuration to use"
1205
- ),
1206
- base_dir: str | None = typer.Option(
1207
- None, help="Base directory for the scheduler configuration"
1208
- ),
1209
- storage_options: str | None = typer.Option(
1210
- None, help="Storage options as JSON or key=value pairs"
1211
- ),
1212
- log_level: str = typer.Option(
1213
- "info", help="Logging level (debug, info, warning, error, critical)"
1214
- ),
1215
- ):
1216
- """
1217
- Execute a specific job by its ID.
1218
-
1219
- This command runs a job that has been previously enqueued in the job queue.
1220
- The job will be executed immediately regardless of its original schedule.
1221
-
1222
- Args:
1223
- job_id: ID of the job to run
1224
- type: Type of job queue backend (rq)
1225
- name: Name of the scheduler configuration to use
1226
- base_dir: Base directory for the scheduler configuration
1227
- storage_options: Storage options as JSON or key=value pairs
1228
- log_level: Logging level (debug, info, warning, error, critical)
1229
-
1230
- Examples:
1231
- # Run a specific job
1232
- $ flowerpower job-queue run-job job-123456
1233
-
1234
- # Run a job with a specific backend type
1235
- $ flowerpower job-queue run-job job-123456 --type rq
1236
-
1237
- # Run a job with debug logging
1238
- $ flowerpower job-queue run-job job-123456 --log-level debug
1239
- """
1240
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
1241
-
1242
- with JobQueueManager(
1243
- type=type,
1244
- name=name,
1245
- base_dir=base_dir,
1246
- storage_options=parsed_storage_options,
1247
- log_level=log_level,
1248
- ) as worker:
1249
- try:
1250
- worker.run_job(job_id)
1251
- logger.info(f"Job '{job_id}' finished running.")
1252
- except Exception as e:
1253
- logger.error(f"Failed to run job '{job_id}': {e}")
1254
- raise typer.Exit(1)
1255
-
1256
-
1257
- @app.command()
1258
- def list_schedules(
1259
- type: str | None = typer.Option(None, help="Type of job queue backend (rq)"),
1260
- name: str | None = typer.Option(
1261
- None, help="Name of the scheduler configuration to use"
1262
- ),
1263
- base_dir: str | None = typer.Option(
1264
- None, help="Base directory for the scheduler configuration"
1265
- ),
1266
- storage_options: str | None = typer.Option(
1267
- None, help="Storage options as JSON or key=value pairs"
1268
- ),
1269
- log_level: str = typer.Option(
1270
- "info", help="Logging level (debug, info, warning, error, critical)"
1271
- ),
1272
- format: str = typer.Option("table", help="Output format (table, json, yaml)"),
1273
- show_status: bool = typer.Option(
1274
- True, help="Show schedule status (active, paused, etc.)"
1275
- ),
1276
- show_next_run: bool = typer.Option(True, help="Show next scheduled execution time"),
1277
- ):
1278
- """
1279
- List all schedules with detailed status information.
1280
-
1281
- This command provides enhanced schedule listing showing trigger configuration,
1282
- status, next run time, and execution history. This is an enhanced version of
1283
- show-schedules with more detailed information.
1284
-
1285
- Args:
1286
- type: Type of job queue backend (rq)
1287
- name: Name of the scheduler configuration to use
1288
- base_dir: Base directory for the scheduler configuration
1289
- storage_options: Storage options as JSON or key=value pairs
1290
- log_level: Logging level (debug, info, warning, error, critical)
1291
- format: Output format for the schedule information
1292
- show_status: Include schedule status information
1293
- show_next_run: Include next execution time information
1294
-
1295
- Examples:
1296
- # List all schedules with full details
1297
- $ flowerpower job-queue list-schedules
1298
-
1299
- # List schedules in JSON format
1300
- $ flowerpower job-queue list-schedules --format json
1301
-
1302
- # List schedules without status information
1303
- $ flowerpower job-queue list-schedules --no-show-status
1304
-
1305
- # List schedules for a specific backend
1306
- $ flowerpower job-queue list-schedules --type rq
1307
- """
1308
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
1309
-
1310
- with JobQueueManager(
1311
- type=type,
1312
- name=name,
1313
- base_dir=base_dir,
1314
- storage_options=parsed_storage_options,
1315
- log_level=log_level,
1316
- ) as worker:
1317
- # This will use the enhanced show_schedules method with additional options
1318
- try:
1319
- worker.show_schedules(
1320
- format=format,
1321
- show_status=show_status,
1322
- show_next_run=show_next_run,
1323
- )
1324
- except TypeError:
1325
- # Fallback if the show_schedules method doesn't support new parameters
1326
- logger.warning(
1327
- "Using basic schedule listing (enhanced options not supported)"
1328
- )
1329
- worker.show_schedules(format=format)