FlowerPower 0.11.6.20__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. flowerpower/__init__.py +2 -6
  2. flowerpower/cfg/__init__.py +7 -14
  3. flowerpower/cfg/base.py +29 -25
  4. flowerpower/cfg/pipeline/__init__.py +8 -6
  5. flowerpower/cfg/pipeline/_schedule.py +32 -0
  6. flowerpower/cfg/pipeline/adapter.py +0 -5
  7. flowerpower/cfg/pipeline/builder.py +377 -0
  8. flowerpower/cfg/pipeline/run.py +36 -0
  9. flowerpower/cfg/project/__init__.py +11 -24
  10. flowerpower/cfg/project/adapter.py +0 -12
  11. flowerpower/cli/__init__.py +2 -21
  12. flowerpower/cli/cfg.py +0 -3
  13. flowerpower/cli/mqtt.py +0 -6
  14. flowerpower/cli/pipeline.py +22 -415
  15. flowerpower/cli/utils.py +0 -1
  16. flowerpower/flowerpower.py +345 -146
  17. flowerpower/pipeline/__init__.py +2 -0
  18. flowerpower/pipeline/base.py +21 -12
  19. flowerpower/pipeline/io.py +58 -54
  20. flowerpower/pipeline/manager.py +165 -726
  21. flowerpower/pipeline/pipeline.py +643 -0
  22. flowerpower/pipeline/registry.py +285 -18
  23. flowerpower/pipeline/visualizer.py +5 -6
  24. flowerpower/plugins/io/__init__.py +8 -0
  25. flowerpower/plugins/mqtt/__init__.py +7 -11
  26. flowerpower/settings/__init__.py +0 -2
  27. flowerpower/settings/{backend.py → _backend.py} +0 -21
  28. flowerpower/settings/logging.py +1 -1
  29. flowerpower/utils/logging.py +24 -12
  30. flowerpower/utils/misc.py +17 -256
  31. flowerpower/utils/monkey.py +1 -83
  32. flowerpower-0.21.0.dist-info/METADATA +463 -0
  33. flowerpower-0.21.0.dist-info/RECORD +44 -0
  34. flowerpower/cfg/pipeline/schedule.py +0 -74
  35. flowerpower/cfg/project/job_queue.py +0 -238
  36. flowerpower/cli/job_queue.py +0 -1061
  37. flowerpower/fs/__init__.py +0 -29
  38. flowerpower/fs/base.py +0 -662
  39. flowerpower/fs/ext.py +0 -2143
  40. flowerpower/fs/storage_options.py +0 -1420
  41. flowerpower/job_queue/__init__.py +0 -294
  42. flowerpower/job_queue/apscheduler/__init__.py +0 -11
  43. flowerpower/job_queue/apscheduler/_setup/datastore.py +0 -110
  44. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +0 -93
  45. flowerpower/job_queue/apscheduler/manager.py +0 -1051
  46. flowerpower/job_queue/apscheduler/setup.py +0 -554
  47. flowerpower/job_queue/apscheduler/trigger.py +0 -169
  48. flowerpower/job_queue/apscheduler/utils.py +0 -311
  49. flowerpower/job_queue/base.py +0 -413
  50. flowerpower/job_queue/rq/__init__.py +0 -10
  51. flowerpower/job_queue/rq/_trigger.py +0 -37
  52. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +0 -226
  53. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +0 -231
  54. flowerpower/job_queue/rq/manager.py +0 -1582
  55. flowerpower/job_queue/rq/setup.py +0 -154
  56. flowerpower/job_queue/rq/utils.py +0 -69
  57. flowerpower/mqtt.py +0 -12
  58. flowerpower/pipeline/job_queue.py +0 -583
  59. flowerpower/pipeline/runner.py +0 -603
  60. flowerpower/plugins/io/base.py +0 -2520
  61. flowerpower/plugins/io/helpers/datetime.py +0 -298
  62. flowerpower/plugins/io/helpers/polars.py +0 -875
  63. flowerpower/plugins/io/helpers/pyarrow.py +0 -570
  64. flowerpower/plugins/io/helpers/sql.py +0 -202
  65. flowerpower/plugins/io/loader/__init__.py +0 -28
  66. flowerpower/plugins/io/loader/csv.py +0 -37
  67. flowerpower/plugins/io/loader/deltatable.py +0 -190
  68. flowerpower/plugins/io/loader/duckdb.py +0 -19
  69. flowerpower/plugins/io/loader/json.py +0 -37
  70. flowerpower/plugins/io/loader/mqtt.py +0 -159
  71. flowerpower/plugins/io/loader/mssql.py +0 -26
  72. flowerpower/plugins/io/loader/mysql.py +0 -26
  73. flowerpower/plugins/io/loader/oracle.py +0 -26
  74. flowerpower/plugins/io/loader/parquet.py +0 -35
  75. flowerpower/plugins/io/loader/postgres.py +0 -26
  76. flowerpower/plugins/io/loader/pydala.py +0 -19
  77. flowerpower/plugins/io/loader/sqlite.py +0 -23
  78. flowerpower/plugins/io/metadata.py +0 -244
  79. flowerpower/plugins/io/saver/__init__.py +0 -28
  80. flowerpower/plugins/io/saver/csv.py +0 -36
  81. flowerpower/plugins/io/saver/deltatable.py +0 -186
  82. flowerpower/plugins/io/saver/duckdb.py +0 -19
  83. flowerpower/plugins/io/saver/json.py +0 -36
  84. flowerpower/plugins/io/saver/mqtt.py +0 -28
  85. flowerpower/plugins/io/saver/mssql.py +0 -26
  86. flowerpower/plugins/io/saver/mysql.py +0 -26
  87. flowerpower/plugins/io/saver/oracle.py +0 -26
  88. flowerpower/plugins/io/saver/parquet.py +0 -36
  89. flowerpower/plugins/io/saver/postgres.py +0 -26
  90. flowerpower/plugins/io/saver/pydala.py +0 -20
  91. flowerpower/plugins/io/saver/sqlite.py +0 -24
  92. flowerpower/plugins/mqtt/cfg.py +0 -17
  93. flowerpower/plugins/mqtt/manager.py +0 -962
  94. flowerpower/settings/job_queue.py +0 -87
  95. flowerpower/utils/scheduler.py +0 -311
  96. flowerpower-0.11.6.20.dist-info/METADATA +0 -537
  97. flowerpower-0.11.6.20.dist-info/RECORD +0 -102
  98. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/WHEEL +0 -0
  99. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/entry_points.txt +0 -0
  100. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/licenses/LICENSE +0 -0
  101. {flowerpower-0.11.6.20.dist-info → flowerpower-0.21.0.dist-info}/top_level.txt +0 -0
@@ -1,1061 +0,0 @@
1
- import typer
2
- from loguru import logger
3
-
4
- from .. import settings
5
- from ..job_queue import JobQueueManager # Adjust import as needed
6
- from ..utils.logging import setup_logging
7
- from .utils import parse_dict_or_list_param
8
-
9
- # Create a Typer app for job queue management commands
10
- app = typer.Typer(help="Job queue management commands")
11
-
12
- setup_logging(level=settings.LOG_LEVEL)
13
-
14
-
15
- @app.command()
16
- def start_worker(
17
- type: str | None = typer.Option(
18
- None, help="Type of job queue backend (rq, apscheduler)"
19
- ),
20
- name: str | None = typer.Option(
21
- None, help="Name of the scheduler configuration to use"
22
- ),
23
- base_dir: str | None = typer.Option(
24
- None, help="Base directory for the scheduler configuration"
25
- ),
26
- background: bool = typer.Option(
27
- False, "--background", "-b", help="Run the worker in the background"
28
- ),
29
- storage_options: str | None = typer.Option(
30
- None, help="Storage options as JSON or key=value pairs"
31
- ),
32
- log_level: str = typer.Option(
33
- "info", help="Logging level (debug, info, warning, error, critical)"
34
- ),
35
- num_workers: int | None = typer.Option(
36
- None,
37
- "--num-workers",
38
- "-n",
39
- help="Number of worker processes to start (pool mode)",
40
- ),
41
- ):
42
- """
43
- Start a worker or worker pool to process jobs.
44
-
45
- This command starts a worker process (or a pool of worker processes) that will
46
- execute jobs from the queue. The worker will continue running until stopped
47
- or can be run in the background.
48
-
49
- Args:
50
- type: Type of job queue backend (rq, apscheduler)
51
- name: Name of the scheduler configuration to use
52
- base_dir: Base directory for the scheduler configuration
53
- background: Run the worker in the background
54
- storage_options: Storage options as JSON or key=value pairs
55
- log_level: Logging level (debug, info, warning, error, critical)
56
- num_workers: Number of worker processes to start (pool mode)
57
-
58
- Examples:
59
- # Start a worker with default settings
60
- $ flowerpower job-queue start-worker
61
-
62
- # Start a worker for a specific backend type
63
- $ flowerpower job-queue start-worker --type rq
64
-
65
- # Start a worker pool with 4 processes
66
- $ flowerpower job-queue start-worker --num-workers 4
67
-
68
- # Run a worker in the background
69
- $ flowerpower job-queue start-worker --background
70
-
71
- # Set a specific logging level
72
- $ flowerpower job-queue start-worker --log-level debug
73
- """
74
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
75
-
76
- with JobQueueManager(
77
- type=type,
78
- name=name,
79
- base_dir=base_dir,
80
- storage_options=parsed_storage_options,
81
- log_level=log_level,
82
- ) as worker:
83
- if num_workers:
84
- num_workers = worker.cfg.num_workers
85
-
86
- if num_workers and num_workers > 1:
87
- worker.start_worker_pool(num_workers=num_workers, background=background)
88
- else:
89
- worker.start_worker(background=background)
90
-
91
-
92
- @app.command()
93
- def start_scheduler(
94
- type: str | None = typer.Option(
95
- None, help="Type of job queue backend (rq, apscheduler)"
96
- ),
97
- name: str | None = typer.Option(
98
- None, help="Name of the scheduler configuration to use"
99
- ),
100
- base_dir: str | None = typer.Option(
101
- None, help="Base directory for the scheduler configuration"
102
- ),
103
- background: bool = typer.Option(
104
- False, "--background", "-b", help="Run the scheduler in the background"
105
- ),
106
- storage_options: str | None = typer.Option(
107
- None, help="Storage options as JSON or key=value pairs"
108
- ),
109
- log_level: str = typer.Option(
110
- "info", help="Logging level (debug, info, warning, error, critical)"
111
- ),
112
- interval: int = typer.Option(
113
- 60, "--interval", "-i", help="Interval for checking jobs in seconds (RQ only)"
114
- ),
115
- ):
116
- """
117
- Start the scheduler process for queued jobs.
118
-
119
- This command starts a scheduler that manages queued jobs and scheduled tasks.
120
- Note that this is only needed for RQ workers, as APScheduler workers have
121
- their own built-in scheduler.
122
-
123
- Args:
124
- type: Type of job queue backend (rq, apscheduler)
125
- name: Name of the scheduler configuration to use
126
- base_dir: Base directory for the scheduler configuration
127
- background: Run the scheduler in the background
128
- storage_options: Storage options as JSON or key=value pairs
129
- log_level: Logging level (debug, info, warning, error, critical)
130
- interval: Interval for checking jobs in seconds (RQ only)
131
-
132
- Examples:
133
- # Start a scheduler with default settings
134
- $ flowerpower job-queue start-scheduler
135
-
136
- # Start a scheduler for a specific backend type
137
- $ flowerpower job-queue start-scheduler --type rq
138
-
139
- # Run a scheduler in the background
140
- $ flowerpower job-queue start-scheduler --background
141
-
142
- # Set a specific scheduler check interval (RQ only)
143
- $ flowerpower job-queue start-scheduler --interval 30
144
- """
145
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
146
-
147
- with JobQueueManager(
148
- type=type,
149
- name=name,
150
- base_dir=base_dir,
151
- storage_options=parsed_storage_options,
152
- log_level=log_level,
153
- ) as worker:
154
- if worker.cfg.backend.type != "rq":
155
- logger.info(
156
- f"No scheduler needed for {worker.cfg.backend.type} workers. Skipping."
157
- )
158
- return
159
-
160
- worker.start_scheduler(background=background, interval=interval)
161
-
162
-
163
- # @app.command()
164
- # def cancel_all_jobs(
165
- # type: str | None = None,
166
- # queue_name: str | None = None,
167
- # name: str | None = None,
168
- # base_dir: str | None = None,
169
- # storage_options: str | None = None,
170
- # log_level: str = "info",
171
- # ):
172
- # """
173
- # Cancel all jobs from the scheduler.
174
-
175
- # Note: This is different from deleting jobs as it only stops them from running but keeps their history.
176
-
177
- # Args:
178
- # type: Type of the job queue (rq, apscheduler)
179
- # queue_name: Name of the queue (RQ only)
180
- # name: Name of the scheduler
181
- # base_dir: Base directory for the scheduler
182
- # storage_options: Storage options as JSON or key=value pairs
183
- # log_level: Logging level
184
- # """
185
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
186
-
187
- # with JobQueueManager(
188
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
189
- # ) as worker:
190
- # if worker.cfg.backend.type != "rq":
191
- # logger.info(f"Job cancellation is not supported for {worker.cfg.backend.type} workers. Skipping.")
192
- # return
193
-
194
- # worker.cancel_all_jobs(queue_name=queue_name)
195
-
196
- # @app.command()
197
- # def cancel_all_schedules(
198
- # type: str | None = None,
199
- # name: str | None = None,
200
- # base_dir: str | None = None,
201
- # storage_options: str | None = None,
202
- # log_level: str = "info",
203
- # ):
204
- # """
205
- # Cancel all schedules from the scheduler.
206
-
207
- # Note: This is different from deleting schedules as it only stops them from running but keeps their configuration.
208
-
209
- # Args:
210
- # type: Type of the job queue (rq, apscheduler)
211
- # name: Name of the scheduler
212
- # base_dir: Base directory for the scheduler
213
- # storage_options: Storage options as JSON or key=value pairs
214
- # log_level: Logging level
215
- # """
216
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
217
-
218
- # with JobQueueManager(
219
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
220
- # ) as worker:
221
- # worker.cancel_all_schedules()
222
-
223
-
224
- @app.command()
225
- def cancel_job(
226
- job_id: str = typer.Argument(..., help="ID of the job to cancel"),
227
- all: bool = typer.Option(
228
- False, "--all", "-a", help="Cancel all jobs instead of a specific one"
229
- ),
230
- queue_name: str | None = typer.Option(
231
- None,
232
- help="Name of the queue (RQ only). If provided with --all, cancels all jobs in the queue",
233
- ),
234
- type: str | None = typer.Option(
235
- None, help="Type of job queue backend (rq, apscheduler)"
236
- ),
237
- name: str | None = typer.Option(
238
- None, help="Name of the scheduler configuration to use"
239
- ),
240
- base_dir: str | None = typer.Option(
241
- None, help="Base directory for the scheduler configuration"
242
- ),
243
- storage_options: str | None = typer.Option(
244
- None, help="Storage options as JSON or key=value pairs"
245
- ),
246
- log_level: str = typer.Option(
247
- "info", help="Logging level (debug, info, warning, error, critical)"
248
- ),
249
- ):
250
- """
251
- Cancel a job or multiple jobs in the queue.
252
-
253
- This command stops a job from executing (if it hasn't started yet) or signals
254
- it to stop (if already running). Canceling is different from deleting as it
255
- maintains the job history but prevents execution.
256
-
257
- Args:
258
- job_id: ID of the job to cancel (ignored if --all is used)
259
- all: Cancel all jobs instead of a specific one
260
- queue_name: For RQ only, specifies the queue to cancel jobs from
261
- type: Type of job queue backend (rq, apscheduler)
262
- name: Name of the scheduler configuration to use
263
- base_dir: Base directory for the scheduler configuration
264
- storage_options: Storage options as JSON or key=value pairs
265
- log_level: Logging level (debug, info, warning, error, critical)
266
-
267
- Examples:
268
- # Cancel a specific job
269
- $ flowerpower job-queue cancel-job job-123456
270
-
271
- # Cancel all jobs in the default queue
272
- $ flowerpower job-queue cancel-job --all dummy-id
273
-
274
- # Cancel all jobs in a specific queue (RQ only)
275
- $ flowerpower job-queue cancel-job --all dummy-id --queue-name high-priority
276
-
277
- # Specify the backend type explicitly
278
- $ flowerpower job-queue cancel-job job-123456 --type rq
279
- """
280
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
281
-
282
- with JobQueueManager(
283
- type=type,
284
- name=name,
285
- base_dir=base_dir,
286
- storage_options=parsed_storage_options,
287
- log_level=log_level,
288
- ) as worker:
289
- if worker.cfg.backend.type != "rq":
290
- logger.info(
291
- f"Job cancellation is not supported for {worker.cfg.backend.type} workers. Skipping."
292
- )
293
- return
294
- if all:
295
- count = worker.cancel_all_jobs(
296
- queue_name=queue_name if worker.cfg.backend.type == "rq" else None
297
- )
298
- logger.info(
299
- f"Cancelled {count} jobs"
300
- + (f" in queue '{queue_name}'" if queue_name else "")
301
- )
302
- else:
303
- worker.cancel_job(job_id)
304
- logger.info(f"Job {job_id} cancelled")
305
-
306
-
307
- @app.command()
308
- def cancel_schedule(
309
- schedule_id: str,
310
- all: bool = False,
311
- type: str | None = None,
312
- name: str | None = None,
313
- base_dir: str | None = None,
314
- storage_options: str | None = None,
315
- log_level: str = "info",
316
- ):
317
- """
318
- Cancel a specific schedule.
319
-
320
- Note: This is different from deleting a schedule as it only stops it from running but keeps its configuration.
321
-
322
- Args:
323
- schedule_id: ID of the schedule to cancel
324
- all: If True, cancel all schedules
325
- type: Type of the job queue (rq, apscheduler)
326
- name: Name of the scheduler
327
- base_dir: Base directory for the scheduler
328
- storage_options: Storage options as JSON or key=value pairs
329
- log_level: Logging level
330
- """
331
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
332
-
333
- with JobQueueManager(
334
- type=type,
335
- name=name,
336
- base_dir=base_dir,
337
- storage_options=parsed_storage_options,
338
- log_level=log_level,
339
- ) as worker:
340
- if all:
341
- worker.cancel_all_schedules()
342
- else:
343
- worker.cancel_schedule(schedule_id)
344
-
345
-
346
- # @app.command()
347
- # def delete_all_jobs(
348
- # type: str | None = None,
349
- # queue_name: str | None = None,
350
- # name: str | None = None,
351
- # base_dir: str | None = None,
352
- # storage_options: str | None = None,
353
- # log_level: str = "info",
354
- # ):
355
- # """
356
- # Delete all jobs from the scheduler. Note that this is different from cancelling jobs
357
- # as it also removes job history and results.
358
-
359
- # Args:
360
- # queue_name: Name of the queue (RQ only)
361
- # name: Name of the scheduler
362
- # base_dir: Base directory for the scheduler
363
- # storage_options: Storage options as JSON or key=value pairs
364
- # log_level: Logging level
365
- # """
366
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
367
-
368
- # with JobQueueManager(
369
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
370
- # ) as worker:
371
- # worker.delete_all_jobs(queue_name=queue_name if worker.cfg.backend.type == "rq" else None)
372
-
373
- # @app.command()
374
- # def delete_all_schedules(
375
- # type: str | None = None,
376
- # name: str | None = None,
377
- # base_dir: str | None = None,
378
- # storage_options: str | None = None,
379
- # log_level: str = "info",
380
- # ):
381
- # """
382
- # Delete all schedules from the scheduler.
383
-
384
- # Args:
385
- # name: Name of the scheduler
386
- # base_dir: Base directory for the scheduler
387
- # storage_options: Storage options as JSON or key=value pairs
388
- # log_level: Logging level
389
- # """
390
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
391
-
392
- # with JobQueueManager(
393
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
394
- # ) as worker:
395
- # worker.delete_all_schedules()
396
-
397
-
398
- @app.command()
399
- def delete_job(
400
- job_id: str,
401
- all: bool = False,
402
- queue_name: str | None = None,
403
- type: str | None = None,
404
- name: str | None = None,
405
- base_dir: str | None = None,
406
- storage_options: str | None = None,
407
- log_level: str = "info",
408
- ):
409
- """
410
- Delete a specific job.
411
-
412
- Args:
413
- job_id: ID of the job to delete
414
- all: If True, delete all jobs
415
- queue_name: Name of the queue (RQ only). If provided and all is True, delete all jobs in the queue
416
- type: Type of the job queue (rq, apscheduler)
417
- name: Name of the scheduler
418
- base_dir: Base directory for the scheduler
419
- storage_options: Storage options as JSON or key=value pairs
420
- log_level: Logging level
421
- """
422
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
423
-
424
- with JobQueueManager(
425
- type=type,
426
- name=name,
427
- base_dir=base_dir,
428
- storage_options=parsed_storage_options,
429
- log_level=log_level,
430
- ) as worker:
431
- if all:
432
- worker.delete_all_jobs(
433
- queue_name=queue_name if worker.cfg.backend.type == "rq" else None
434
- )
435
- else:
436
- worker.delete_job(job_id)
437
-
438
-
439
- @app.command()
440
- def delete_schedule(
441
- schedule_id: str,
442
- all: bool = False,
443
- type: str | None = None,
444
- name: str | None = None,
445
- base_dir: str | None = None,
446
- storage_options: str | None = None,
447
- log_level: str = "info",
448
- ):
449
- """
450
- Delete a specific schedule.
451
-
452
- Args:
453
- schedule_id: ID of the schedule to delete
454
- all: If True, delete all schedules
455
- type: Type of the job queue (rq, apscheduler)
456
- name: Name of the scheduler
457
- base_dir: Base directory for the scheduler
458
- storage_options: Storage options as JSON or key=value pairs
459
- log_level: Logging level
460
- """
461
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
462
-
463
- with JobQueueManager(
464
- type=type,
465
- name=name,
466
- base_dir=base_dir,
467
- storage_options=parsed_storage_options,
468
- log_level=log_level,
469
- ) as worker:
470
- if all:
471
- worker.delete_all_schedules()
472
- else:
473
- worker.delete_schedule(schedule_id)
474
-
475
-
476
- # @app.command()
477
- # def get_job(
478
- # job_id: str,
479
- # type: str | None = None,
480
- # name: str | None = None,
481
- # base_dir: str | None = None,
482
- # storage_options: str | None = None,
483
- # log_level: str = "info",
484
- # ):
485
- # """
486
- # Get information about a specific job.
487
-
488
- # Args:
489
- # job_id: ID of the job
490
- # name: Name of the scheduler
491
- # base_dir: Base directory for the scheduler
492
- # storage_options: Storage options as JSON or key=value pairs
493
- # log_level: Logging level
494
- # """
495
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
496
-
497
- # with JobQueueManager(
498
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
499
- # ) as worker:
500
- # # show_jobs should display the job info
501
- # worker.show_jobs(job_id=job_id)
502
-
503
- # @app.command()
504
- # def get_job_result(
505
- # job_id: str,
506
- # type: str | None = None,
507
- # name: str | None = None,
508
- # base_dir: str | None = None,
509
- # storage_options: str | None = None,
510
- # log_level: str = "info",
511
- # wait: bool = True,
512
- # ):
513
- # """
514
- # Get the result of a specific job.
515
-
516
- # Args:
517
- # job_id: ID of the job
518
- # name: Name of the scheduler
519
- # base_dir: Base directory for the scheduler
520
- # storage_options: Storage options as JSON or key=value pairs
521
- # log_level: Logging level
522
- # wait: Wait for the result if job is still running (APScheduler only)
523
- # """
524
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
525
-
526
- # with JobQueueManager(
527
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
528
- # ) as worker:
529
- # # worker's get_job_result method will handle the result display
530
- # worker.get_job_result(job_id, wait=wait if worker.cfg.backend.type == "apscheduler" else False)
531
-
532
- # @app.command()
533
- # def get_jobs(
534
- # type: str | None = None,
535
- # queue_name: str | None = None,
536
- # name: str | None = None,
537
- # base_dir: str | None = None,
538
- # storage_options: str | None = None,
539
- # log_level: str = "info",
540
- # ):
541
- # """
542
- # List all jobs.
543
-
544
- # Args:
545
- # queue_name: Name of the queue (RQ only)
546
- # name: Name of the scheduler
547
- # base_dir: Base directory for the scheduler
548
- # storage_options: Storage options as JSON or key=value pairs
549
- # log_level: Logging level
550
- # """
551
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
552
-
553
- # with JobQueueManager(
554
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
555
- # ) as worker:
556
- # worker.show_jobs()
557
-
558
- # @app.command()
559
- # def get_schedule(
560
- # schedule_id: str,
561
- # type: str | None = None,
562
- # name: str | None = None,
563
- # base_dir: str | None = None,
564
- # storage_options: str | None = None,
565
- # log_level: str = "info",
566
- # ):
567
- # """
568
- # Get information about a specific schedule.
569
-
570
- # Args:
571
- # schedule_id: ID of the schedule
572
- # name: Name of the scheduler
573
- # base_dir: Base directory for the scheduler
574
- # storage_options: Storage options as JSON or key=value pairs
575
- # log_level: Logging level
576
- # """
577
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
578
-
579
- # with JobQueueManager(
580
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
581
- # ) as worker:
582
- # # show_schedule should display the schedule info
583
- # worker.show_schedules(schedule_id=schedule_id)
584
-
585
- # @app.command()
586
- # def get_schedules(
587
- # type: str | None = None,
588
- # name: str | None = None,
589
- # base_dir: str | None = None,
590
- # storage_options: str | None = None,
591
- # log_level: str = "info",
592
- # ):
593
- # """
594
- # List all schedules.
595
-
596
- # Args:
597
- # name: Name of the scheduler
598
- # base_dir: Base directory for the scheduler
599
- # storage_options: Storage options as JSON or key=value pairs
600
- # log_level: Logging level
601
- # """
602
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
603
-
604
- # with JobQueueManager(
605
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
606
- # ) as worker:
607
- # worker.show_schedules()
608
-
609
-
610
- @app.command()
611
- def show_job_ids(
612
- type: str | None = typer.Option(
613
- None, help="Type of job queue backend (rq, apscheduler)"
614
- ),
615
- name: str | None = typer.Option(
616
- None, help="Name of the scheduler configuration to use"
617
- ),
618
- base_dir: str | None = typer.Option(
619
- None, help="Base directory for the scheduler configuration"
620
- ),
621
- storage_options: str | None = typer.Option(
622
- None, help="Storage options as JSON or key=value pairs"
623
- ),
624
- log_level: str = typer.Option(
625
- "info", help="Logging level (debug, info, warning, error, critical)"
626
- ),
627
- ):
628
- """
629
- Show all job IDs in the job queue.
630
-
631
- This command displays all job IDs currently in the system, helping you identify
632
- jobs for other operations like getting results, canceling, or deleting jobs.
633
-
634
- Args:
635
- type: Type of job queue backend (rq, apscheduler)
636
- name: Name of the scheduler configuration to use
637
- base_dir: Base directory for the scheduler configuration
638
- storage_options: Storage options as JSON or key=value pairs
639
- log_level: Logging level (debug, info, warning, error, critical)
640
-
641
- Examples:
642
- # Show job IDs using default settings
643
- $ flowerpower job-queue show-job-ids
644
-
645
- # Show job IDs for a specific queue type
646
- $ flowerpower job-queue show-job-ids --type rq
647
-
648
- # Show job IDs with a custom scheduler configuration
649
- $ flowerpower job-queue show-job-ids --name my-scheduler
650
-
651
- # Show job IDs with debug logging
652
- $ flowerpower job-queue show-job-ids --log-level debug
653
- """
654
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
655
-
656
- with JobQueueManager(
657
- type=type,
658
- name=name,
659
- base_dir=base_dir,
660
- storage_options=parsed_storage_options,
661
- log_level=log_level,
662
- ) as worker:
663
- # worker's job_ids property will print the IDs
664
- ids = worker.job_ids
665
- # Ensure we always print something meaningful
666
- if not ids:
667
- logger.info("No job IDs found")
668
- # If the worker's property doesn't already print the IDs, print them here
669
- elif not isinstance(ids, type(None)): # Check if None was returned
670
- for job_id in ids:
671
- print(f"- {job_id}")
672
-
673
-
674
- @app.command()
675
- def show_schedule_ids(
676
- type: str | None = typer.Option(
677
- None, help="Type of job queue backend (rq, apscheduler)"
678
- ),
679
- name: str | None = typer.Option(
680
- None, help="Name of the scheduler configuration to use"
681
- ),
682
- base_dir: str | None = typer.Option(
683
- None, help="Base directory for the scheduler configuration"
684
- ),
685
- storage_options: str | None = typer.Option(
686
- None, help="Storage options as JSON or key=value pairs"
687
- ),
688
- log_level: str = typer.Option(
689
- "info", help="Logging level (debug, info, warning, error, critical)"
690
- ),
691
- ):
692
- """
693
- Show all schedule IDs in the job queue.
694
-
695
- This command displays all schedule IDs currently in the system, helping you
696
- identify schedules for other operations like pausing, resuming, or deleting schedules.
697
-
698
- Args:
699
- type: Type of job queue backend (rq, apscheduler)
700
- name: Name of the scheduler configuration to use
701
- base_dir: Base directory for the scheduler configuration
702
- storage_options: Storage options as JSON or key=value pairs
703
- log_level: Logging level (debug, info, warning, error, critical)
704
-
705
- Examples:
706
- # Show schedule IDs using default settings
707
- $ flowerpower job-queue show-schedule-ids
708
-
709
- # Show schedule IDs for a specific queue type
710
- $ flowerpower job-queue show-schedule-ids --type apscheduler
711
-
712
- # Show schedule IDs with a custom scheduler configuration
713
- $ flowerpower job-queue show-schedule-ids --name my-scheduler
714
-
715
- # Show schedule IDs with debug logging
716
- $ flowerpower job-queue show-schedule-ids --log-level debug
717
- """
718
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
719
-
720
- with JobQueueManager(
721
- type=type,
722
- name=name,
723
- base_dir=base_dir,
724
- storage_options=parsed_storage_options,
725
- log_level=log_level,
726
- ) as worker:
727
- # worker's schedule_ids property will print the IDs
728
- ids = worker.schedule_ids
729
- # Ensure we always print something meaningful
730
- if not ids:
731
- logger.info("No schedule IDs found")
732
- # If the worker's property doesn't already print the IDs, print them here
733
- elif not isinstance(ids, type(None)): # Check if None was returned
734
- for schedule_id in ids:
735
- print(f"- {schedule_id}")
736
-
737
-
738
- # @app.command()
739
- # def pause_all_schedules(
740
- # type: str | None = None,
741
- # name: str | None = None,
742
- # base_dir: str | None = None,
743
- # storage_options: str | None = None,
744
- # log_level: str = "info",
745
- # ):
746
- # """
747
- # Pause all schedules.
748
-
749
- # Note: This functionality is only available for APScheduler workers.
750
-
751
- # Args:
752
- # name: Name of the scheduler
753
- # base_dir: Base directory for the scheduler
754
- # storage_options: Storage options as JSON or key=value pairs
755
- # log_level: Logging level
756
- # """
757
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
758
-
759
- # with JobQueueManager(
760
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
761
- # ) as worker:
762
- # if worker.cfg.backend.type != "apscheduler":
763
- # logger.info(f"Schedule pausing is not supported for {worker.cfg.backend.type} workers.")
764
- # return
765
- # worker.pause_all_schedules()
766
-
767
-
768
- @app.command()
769
- def pause_schedule(
770
- schedule_id: str = typer.Argument(..., help="ID of the schedule to pause"),
771
- all: bool = typer.Option(
772
- False, "--all", "-a", help="Pause all schedules instead of a specific one"
773
- ),
774
- type: str | None = typer.Option(
775
- None, help="Type of job queue backend (rq, apscheduler)"
776
- ),
777
- name: str | None = typer.Option(
778
- None, help="Name of the scheduler configuration to use"
779
- ),
780
- base_dir: str | None = typer.Option(
781
- None, help="Base directory for the scheduler configuration"
782
- ),
783
- storage_options: str | None = typer.Option(
784
- None, help="Storage options as JSON or key=value pairs"
785
- ),
786
- log_level: str = typer.Option(
787
- "info", help="Logging level (debug, info, warning, error, critical)"
788
- ),
789
- ):
790
- """
791
- Pause a schedule or multiple schedules.
792
-
793
- This command temporarily stops a scheduled job from running while maintaining its
794
- configuration. Paused schedules can be resumed later. Note that this functionality
795
- is only available for APScheduler workers.
796
-
797
- Args:
798
- schedule_id: ID of the schedule to pause (ignored if --all is used)
799
- all: Pause all schedules instead of a specific one
800
- type: Type of job queue backend (rq, apscheduler)
801
- name: Name of the scheduler configuration to use
802
- base_dir: Base directory for the scheduler configuration
803
- storage_options: Storage options as JSON or key=value pairs
804
- log_level: Logging level (debug, info, warning, error, critical)
805
-
806
- Examples:
807
- # Pause a specific schedule
808
- $ flowerpower job-queue pause-schedule schedule-123456
809
-
810
- # Pause all schedules
811
- $ flowerpower job-queue pause-schedule --all dummy-id
812
-
813
- # Specify the backend type explicitly
814
- $ flowerpower job-queue pause-schedule schedule-123456 --type apscheduler
815
- """
816
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
817
-
818
- with JobQueueManager(
819
- type=type,
820
- name=name,
821
- base_dir=base_dir,
822
- storage_options=parsed_storage_options,
823
- log_level=log_level,
824
- ) as worker:
825
- if worker.cfg.backend.type != "apscheduler":
826
- logger.info(
827
- f"Schedule pausing is not supported for {worker.cfg.backend.type} workers."
828
- )
829
- return
830
- if all:
831
- count = worker.pause_all_schedules()
832
- logger.info(f"Paused {count} schedules")
833
- else:
834
- success = worker.pause_schedule(schedule_id)
835
- if success:
836
- logger.info(f"Schedule {schedule_id} paused successfully")
837
- else:
838
- logger.error(f"Failed to pause schedule {schedule_id}")
839
-
840
-
841
- # @app.command()
842
- # def resume_all_schedules(
843
- # type: str | None = None,
844
- # name: str | None = None,
845
- # base_dir: str | None = None,
846
- # storage_options: str | None = None,
847
- # log_level: str = "info",
848
- # ):
849
- # """
850
- # Resume all paused schedules.
851
-
852
- # Note: This functionality is only available for APScheduler workers.
853
-
854
- # Args:
855
- # name: Name of the scheduler
856
- # base_dir: Base directory for the scheduler
857
- # storage_options: Storage options as JSON or key=value pairs
858
- # log_level: Logging level
859
- # """
860
- # parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
861
-
862
- # with JobQueueManager(
863
- # type=type, name=name, base_dir=base_dir, storage_options=parsed_storage_options, log_level=log_level
864
- # ) as worker:
865
- # if worker.cfg.backend.type != "apscheduler":
866
- # logger.info(f"Schedule resuming is not supported for {worker.cfg.backend.type} workers.")
867
- # return
868
- # worker.resume_all_schedules()
869
-
870
-
871
- @app.command()
872
- def resume_schedule(
873
- schedule_id: str = typer.Argument(..., help="ID of the schedule to resume"),
874
- all: bool = typer.Option(
875
- False, "--all", "-a", help="Resume all schedules instead of a specific one"
876
- ),
877
- type: str | None = typer.Option(
878
- None, help="Type of job queue backend (rq, apscheduler)"
879
- ),
880
- name: str | None = typer.Option(
881
- None, help="Name of the scheduler configuration to use"
882
- ),
883
- base_dir: str | None = typer.Option(
884
- None, help="Base directory for the scheduler configuration"
885
- ),
886
- storage_options: str | None = typer.Option(
887
- None, help="Storage options as JSON or key=value pairs"
888
- ),
889
- log_level: str = typer.Option(
890
- "info", help="Logging level (debug, info, warning, error, critical)"
891
- ),
892
- ):
893
- """
894
- Resume a paused schedule or multiple schedules.
895
-
896
- This command restarts previously paused schedules, allowing them to run again according
897
- to their original configuration. Note that this functionality is only available for
898
- APScheduler workers.
899
-
900
- Args:
901
- schedule_id: ID of the schedule to resume (ignored if --all is used)
902
- all: Resume all schedules instead of a specific one
903
- type: Type of job queue backend (rq, apscheduler)
904
- name: Name of the scheduler configuration to use
905
- base_dir: Base directory for the scheduler configuration
906
- storage_options: Storage options as JSON or key=value pairs
907
- log_level: Logging level (debug, info, warning, error, critical)
908
-
909
- Examples:
910
- # Resume a specific schedule
911
- $ flowerpower job-queue resume-schedule schedule-123456
912
-
913
- # Resume all schedules
914
- $ flowerpower job-queue resume-schedule --all dummy-id
915
-
916
- # Specify the backend type explicitly
917
- $ flowerpower job-queue resume-schedule schedule-123456 --type apscheduler
918
-
919
- # Set a specific logging level
920
- $ flowerpower job-queue resume-schedule schedule-123456 --log-level debug
921
- """
922
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
923
-
924
- with JobQueueManager(
925
- type=type,
926
- name=name,
927
- base_dir=base_dir,
928
- storage_options=parsed_storage_options,
929
- log_level=log_level,
930
- ) as worker:
931
- if worker.cfg.backend.type != "apscheduler":
932
- logger.info(
933
- f"Schedule resuming is not supported for {worker.cfg.backend.type} workers."
934
- )
935
- return
936
- if all:
937
- count = worker.resume_all_schedules()
938
- logger.info(f"Resumed {count} schedules")
939
- else:
940
- success = worker.resume_schedule(schedule_id)
941
- if success:
942
- logger.info(f"Schedule {schedule_id} resumed successfully")
943
- else:
944
- logger.error(f"Failed to resume schedule {schedule_id}")
945
-
946
-
947
- @app.command()
948
- def show_jobs(
949
- type: str | None = typer.Option(
950
- None, help="Type of job queue backend (rq, apscheduler)"
951
- ),
952
- queue_name: str | None = typer.Option(
953
- None, help="Name of the queue to show jobs from (RQ only)"
954
- ),
955
- name: str | None = typer.Option(
956
- None, help="Name of the scheduler configuration to use"
957
- ),
958
- base_dir: str | None = typer.Option(
959
- None, help="Base directory for the scheduler configuration"
960
- ),
961
- storage_options: str | None = typer.Option(
962
- None, help="Storage options as JSON or key=value pairs"
963
- ),
964
- log_level: str = typer.Option(
965
- "info", help="Logging level (debug, info, warning, error, critical)"
966
- ),
967
- format: str = typer.Option("table", help="Output format (table, json, yaml)"),
968
- ):
969
- """
970
- Display detailed information about all jobs in the queue.
971
-
972
- This command shows comprehensive information about jobs including their status,
973
- creation time, execution time, and other details in a user-friendly format.
974
-
975
- Args:
976
- type: Type of job queue backend (rq, apscheduler)
977
- queue_name: Name of the queue to show jobs from (RQ only)
978
- name: Name of the scheduler configuration to use
979
- base_dir: Base directory for the scheduler configuration
980
- storage_options: Storage options as JSON or key=value pairs
981
- log_level: Logging level (debug, info, warning, error, critical)
982
- format: Output format for the job information
983
-
984
- Examples:
985
- # Show all jobs using default settings
986
- $ flowerpower job-queue show-jobs
987
-
988
- # Show jobs for a specific queue type
989
- $ flowerpower job-queue show-jobs --type rq
990
-
991
- # Show jobs in a specific RQ queue
992
- $ flowerpower job-queue show-jobs --queue-name high-priority
993
-
994
- # Display jobs in JSON format
995
- $ flowerpower job-queue show-jobs --format json
996
- """
997
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
998
-
999
- with JobQueueManager(
1000
- type=type,
1001
- name=name,
1002
- base_dir=base_dir,
1003
- storage_options=parsed_storage_options,
1004
- log_level=log_level,
1005
- ) as worker:
1006
- worker.show_jobs(queue_name=queue_name, format=format)
1007
-
1008
-
1009
- @app.command()
1010
- def show_schedules(
1011
- type: str | None = typer.Option(
1012
- None, help="Type of job queue backend (rq, apscheduler)"
1013
- ),
1014
- name: str | None = typer.Option(
1015
- None, help="Name of the scheduler configuration to use"
1016
- ),
1017
- base_dir: str | None = typer.Option(
1018
- None, help="Base directory for the scheduler configuration"
1019
- ),
1020
- storage_options: str | None = typer.Option(
1021
- None, help="Storage options as JSON or key=value pairs"
1022
- ),
1023
- log_level: str = typer.Option(
1024
- "info", help="Logging level (debug, info, warning, error, critical)"
1025
- ),
1026
- format: str = typer.Option("table", help="Output format (table, json, yaml)"),
1027
- ):
1028
- """
1029
- Display detailed information about all schedules.
1030
-
1031
- This command shows comprehensive information about scheduled jobs including their
1032
- timing configuration, status, and other details in a user-friendly format.
1033
-
1034
- Args:
1035
- type: Type of job queue backend (rq, apscheduler)
1036
- name: Name of the scheduler configuration to use
1037
- base_dir: Base directory for the scheduler configuration
1038
- storage_options: Storage options as JSON or key=value pairs
1039
- log_level: Logging level (debug, info, warning, error, critical)
1040
- format: Output format for the schedule information
1041
-
1042
- Examples:
1043
- # Show all schedules using default settings
1044
- $ flowerpower job-queue show-schedules
1045
-
1046
- # Show schedules for a specific queue type
1047
- $ flowerpower job-queue show-schedules --type apscheduler
1048
-
1049
- # Display schedules in JSON format
1050
- $ flowerpower job-queue show-schedules --format json
1051
- """
1052
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
1053
-
1054
- with JobQueueManager(
1055
- type=type,
1056
- name=name,
1057
- base_dir=base_dir,
1058
- storage_options=parsed_storage_options,
1059
- log_level=log_level,
1060
- ) as worker:
1061
- worker.show_schedules(format=format)