FlowerPower 0.9.12.4__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. flowerpower/__init__.py +17 -2
  2. flowerpower/cfg/__init__.py +201 -149
  3. flowerpower/cfg/base.py +122 -24
  4. flowerpower/cfg/pipeline/__init__.py +254 -0
  5. flowerpower/cfg/pipeline/adapter.py +66 -0
  6. flowerpower/cfg/pipeline/run.py +40 -11
  7. flowerpower/cfg/pipeline/schedule.py +69 -79
  8. flowerpower/cfg/project/__init__.py +149 -0
  9. flowerpower/cfg/project/adapter.py +57 -0
  10. flowerpower/cfg/project/job_queue.py +165 -0
  11. flowerpower/cli/__init__.py +92 -35
  12. flowerpower/cli/job_queue.py +878 -0
  13. flowerpower/cli/mqtt.py +49 -4
  14. flowerpower/cli/pipeline.py +576 -381
  15. flowerpower/cli/utils.py +55 -0
  16. flowerpower/flowerpower.py +12 -7
  17. flowerpower/fs/__init__.py +20 -2
  18. flowerpower/fs/base.py +350 -26
  19. flowerpower/fs/ext.py +797 -216
  20. flowerpower/fs/storage_options.py +1097 -55
  21. flowerpower/io/base.py +13 -18
  22. flowerpower/io/loader/__init__.py +28 -0
  23. flowerpower/io/loader/deltatable.py +7 -10
  24. flowerpower/io/metadata.py +1 -0
  25. flowerpower/io/saver/__init__.py +28 -0
  26. flowerpower/io/saver/deltatable.py +4 -3
  27. flowerpower/job_queue/__init__.py +252 -0
  28. flowerpower/job_queue/apscheduler/__init__.py +11 -0
  29. flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
  30. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
  31. flowerpower/job_queue/apscheduler/manager.py +1063 -0
  32. flowerpower/job_queue/apscheduler/setup.py +524 -0
  33. flowerpower/job_queue/apscheduler/trigger.py +169 -0
  34. flowerpower/job_queue/apscheduler/utils.py +309 -0
  35. flowerpower/job_queue/base.py +382 -0
  36. flowerpower/job_queue/rq/__init__.py +10 -0
  37. flowerpower/job_queue/rq/_trigger.py +37 -0
  38. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
  39. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
  40. flowerpower/job_queue/rq/manager.py +1449 -0
  41. flowerpower/job_queue/rq/setup.py +150 -0
  42. flowerpower/job_queue/rq/utils.py +69 -0
  43. flowerpower/pipeline/__init__.py +5 -0
  44. flowerpower/pipeline/base.py +118 -0
  45. flowerpower/pipeline/io.py +407 -0
  46. flowerpower/pipeline/job_queue.py +505 -0
  47. flowerpower/pipeline/manager.py +1586 -0
  48. flowerpower/pipeline/registry.py +560 -0
  49. flowerpower/pipeline/runner.py +560 -0
  50. flowerpower/pipeline/visualizer.py +142 -0
  51. flowerpower/plugins/mqtt/__init__.py +12 -0
  52. flowerpower/plugins/mqtt/cfg.py +16 -0
  53. flowerpower/plugins/mqtt/manager.py +789 -0
  54. flowerpower/settings.py +110 -0
  55. flowerpower/utils/logging.py +21 -0
  56. flowerpower/utils/misc.py +57 -9
  57. flowerpower/utils/sql.py +122 -24
  58. flowerpower/utils/templates.py +18 -142
  59. flowerpower/web/app.py +0 -0
  60. flowerpower-1.0.0b1.dist-info/METADATA +324 -0
  61. flowerpower-1.0.0b1.dist-info/RECORD +94 -0
  62. {flowerpower-0.9.12.4.dist-info → flowerpower-1.0.0b1.dist-info}/WHEEL +1 -1
  63. flowerpower/cfg/pipeline/tracker.py +0 -14
  64. flowerpower/cfg/project/open_telemetry.py +0 -8
  65. flowerpower/cfg/project/tracker.py +0 -11
  66. flowerpower/cfg/project/worker.py +0 -19
  67. flowerpower/cli/scheduler.py +0 -309
  68. flowerpower/event_handler.py +0 -23
  69. flowerpower/mqtt.py +0 -525
  70. flowerpower/pipeline.py +0 -2419
  71. flowerpower/scheduler.py +0 -680
  72. flowerpower/tui.py +0 -79
  73. flowerpower/utils/datastore.py +0 -186
  74. flowerpower/utils/eventbroker.py +0 -127
  75. flowerpower/utils/executor.py +0 -58
  76. flowerpower/utils/trigger.py +0 -140
  77. flowerpower-0.9.12.4.dist-info/METADATA +0 -575
  78. flowerpower-0.9.12.4.dist-info/RECORD +0 -70
  79. /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
  80. {flowerpower-0.9.12.4.dist-info → flowerpower-1.0.0b1.dist-info}/entry_points.txt +0 -0
  81. {flowerpower-0.9.12.4.dist-info → flowerpower-1.0.0b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,37 @@
1
+ from typing import Any, Dict
2
+
3
+ from ..base import BaseTrigger
4
+
5
+
6
+ class RQTrigger(BaseTrigger):
7
+ """
8
+ RQTrigger adapts trigger logic for the RQ worker backend.
9
+
10
+ Inherits from BaseTrigger and provides a trigger instance
11
+ in dictionary format suitable for RQ scheduling.
12
+ """
13
+
14
+ def __init__(self, trigger_type: str):
15
+ super().__init__(trigger_type)
16
+
17
+ def get_trigger_instance(self, **kwargs) -> Dict[str, Any]:
18
+ """
19
+ Get trigger parameters for RQ Scheduler.
20
+
21
+ Args:
22
+ **kwargs: Keyword arguments for the trigger
23
+
24
+ Returns:
25
+ Dict[str, Any]: A dictionary with trigger configuration
26
+ """
27
+ # RQ doesn't have specific trigger classes like APScheduler.
28
+ # Instead, we'll return a dictionary with parameters that can
29
+ # be used by RQSchedulerBackend to schedule jobs appropriately.
30
+
31
+ result = {"type": self.trigger_type, **kwargs}
32
+
33
+ # For cron triggers, handle crontab string specifically
34
+ if self.trigger_type == "cron" and "crontab" in kwargs:
35
+ result["crontab"] = kwargs["crontab"]
36
+
37
+ return result
@@ -0,0 +1,226 @@
1
+ # Monkey patch as early as possible
2
+ try:
3
+ from gevent import monkey
4
+
5
+ monkey.patch_all()
6
+ import gevent
7
+ import gevent.pool
8
+
9
+ GEVENT_AVAILABLE = True
10
+ except ImportError:
11
+ GEVENT_AVAILABLE = False
12
+ raise ImportError(
13
+ "Gevent is required for GeventWorker. Please install it with 'pip install gevent'."
14
+ )
15
+
16
+
17
+ import datetime as dt
18
+
19
+ from loguru import logger
20
+ from rq import worker
21
+ from rq.exceptions import DequeueTimeout
22
+ from rq.job import JobStatus
23
+ from rq.worker import StopRequested
24
+
25
+ from flowerpower.utils.logging import setup_logging
26
+
27
+ # Use utcnow directly for simplicity
28
+ utcnow = dt.datetime.utcnow
29
+ setup_logging("INFO")
30
+
31
+
32
+ class GeventWorker(worker.Worker):
33
+ """
34
+ A variation of the RQ Worker that uses Gevent to perform jobs concurrently
35
+ within a single worker process using greenlets.
36
+
37
+ Ideal for I/O bound tasks, offering very lightweight concurrency.
38
+ Jobs share the same memory space within the worker process.
39
+
40
+ Requires gevent to be installed and monkey-patching to be applied
41
+ (done automatically when this module is imported).
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ queues,
47
+ name=None,
48
+ max_greenlets=1000,
49
+ default_result_ttl=500,
50
+ connection=None,
51
+ exc_handler=None,
52
+ exception_handlers=None,
53
+ default_worker_ttl=None,
54
+ job_class=None,
55
+ queue_class=None,
56
+ log_job_description=True,
57
+ job_monitoring_interval=30,
58
+ disable_default_exception_handler=False,
59
+ prepare_for_work=True,
60
+ maintenance_interval=600,
61
+ ):
62
+ super().__init__(
63
+ queues,
64
+ name=name,
65
+ default_result_ttl=default_result_ttl,
66
+ connection=connection,
67
+ exc_handler=exc_handler,
68
+ exception_handlers=exception_handlers,
69
+ default_worker_ttl=default_worker_ttl,
70
+ job_class=job_class,
71
+ queue_class=queue_class,
72
+ log_job_description=log_job_description,
73
+ job_monitoring_interval=job_monitoring_interval,
74
+ disable_default_exception_handler=disable_default_exception_handler,
75
+ prepare_for_work=prepare_for_work,
76
+ maintenance_interval=maintenance_interval,
77
+ )
78
+
79
+ self.max_greenlets = max_greenlets
80
+ self._pool = None
81
+ self.log = logger
82
+ logger.info(f"GeventWorker initialized with max_greenlets={self.max_greenlets}")
83
+
84
+ def work(
85
+ self,
86
+ burst=False,
87
+ logging_level="INFO",
88
+ date_format=worker.DEFAULT_LOGGING_DATE_FORMAT,
89
+ log_format=worker.DEFAULT_LOGGING_FORMAT,
90
+ max_jobs=None,
91
+ with_scheduler=False,
92
+ ):
93
+ """Starts the worker's main loop using gevent for concurrent job execution."""
94
+ self._install_signal_handlers()
95
+ did_perform_work = False
96
+ self.register_birth()
97
+ self.log.info("Worker %s: started, version %s", self.key, worker.VERSION)
98
+ self.set_state(worker.WorkerStatus.STARTED)
99
+
100
+ self._pool = gevent.pool.Pool(self.max_greenlets)
101
+ processed_jobs = 0
102
+
103
+ try:
104
+ while True:
105
+ if self._stop_requested or (
106
+ max_jobs is not None and processed_jobs >= max_jobs
107
+ ):
108
+ break
109
+
110
+ self.run_maintenance_tasks()
111
+
112
+ # Wait for space in the greenlet pool if it's full
113
+ if self._pool.full():
114
+ gevent.sleep(0.1) # Yield to other greenlets
115
+ continue
116
+
117
+ try:
118
+ result = self.dequeue_job_and_maintain_ttl(timeout=1)
119
+ except DequeueTimeout:
120
+ if burst:
121
+ break
122
+ gevent.sleep(0.1)
123
+ continue
124
+ except StopRequested:
125
+ break
126
+ except Exception:
127
+ self.log.error("Error during dequeue:", exc_info=True)
128
+ gevent.sleep(1)
129
+ continue
130
+
131
+ if result is None:
132
+ if burst:
133
+ did_perform_work = True
134
+ break
135
+ gevent.sleep(0.1)
136
+ continue
137
+
138
+ job, queue = result
139
+ self.log.info("Processing job %s: %s", job.id, job.description)
140
+
141
+ try:
142
+ # Spawn job execution in the gevent pool
143
+ greenlet = self._pool.spawn(self.execute_job, job, queue)
144
+ # Optional: Add error callback
145
+ greenlet.link_exception(
146
+ lambda g: self.log.error(
147
+ f"Error in greenlet for job {job.id}", exc_info=g.exception
148
+ )
149
+ )
150
+ except Exception as e:
151
+ self.log.error(f"Failed to spawn job {job.id}: {e}", exc_info=True)
152
+ continue
153
+
154
+ did_perform_work = True
155
+ processed_jobs += 1
156
+
157
+ finally:
158
+ if self._pool:
159
+ self.log.info("Waiting for active greenlets to complete...")
160
+ self._pool.join(timeout=30) # Wait up to 30 seconds for jobs to finish
161
+ self._pool.kill() # Kill any remaining greenlets
162
+ self.register_death()
163
+
164
+ return did_perform_work
165
+
166
+ def set_job_status(self, job, status):
167
+ """Sets the job status."""
168
+ if job:
169
+ job.set_status(status)
170
+
171
+ def handle_job_success(self, job, queue, started_job_registry):
172
+ """Handles job completion."""
173
+ try:
174
+ if started_job_registry:
175
+ try:
176
+ started_job_registry.remove(job)
177
+ except NotImplementedError:
178
+ pass
179
+ job.ended_at = utcnow()
180
+ job.set_status(JobStatus.FINISHED)
181
+ except Exception as e:
182
+ self.log.error(f"Error handling job success for {job.id}: {e}")
183
+
184
+ def handle_job_failure(self, job, queue, started_job_registry, exc_info=None):
185
+ """Handles job failure."""
186
+ try:
187
+ if started_job_registry:
188
+ try:
189
+ started_job_registry.remove(job)
190
+ except NotImplementedError:
191
+ pass
192
+ job.ended_at = utcnow()
193
+ job.set_status(JobStatus.FAILED)
194
+ except Exception as e:
195
+ self.log.error(f"Error handling job failure for {job.id}: {e}")
196
+
197
+ def execute_job(self, job, queue):
198
+ """Execute a job in a greenlet."""
199
+ job_id = job.id if job else "unknown"
200
+
201
+ try:
202
+ self.set_job_status(job, JobStatus.STARTED)
203
+ started_job_registry = queue.started_job_registry
204
+
205
+ try:
206
+ started_job_registry.add(
207
+ job,
208
+ self.job_monitoring_interval * 1000
209
+ if self.job_monitoring_interval
210
+ else -1,
211
+ )
212
+ except NotImplementedError:
213
+ pass
214
+
215
+ rv = job.perform()
216
+ self.handle_job_success(
217
+ job=job, queue=queue, started_job_registry=started_job_registry
218
+ )
219
+ return rv
220
+
221
+ except Exception as e:
222
+ self.log.error(f"Job {job_id} failed: {e}", exc_info=True)
223
+ self.handle_job_failure(
224
+ job=job, queue=queue, started_job_registry=started_job_registry
225
+ )
226
+ raise
@@ -0,0 +1,231 @@
1
+ # filepath: /Volumes/WD_Blue_1TB/coding/libs/flowerpower/src/flowerpower/worker/rq/concurrent_workers.py
2
+ import concurrent.futures
3
+ import datetime as dt
4
+ import logging
5
+ import os
6
+ import threading
7
+ import time
8
+ import traceback
9
+ from concurrent.futures import ThreadPoolExecutor
10
+
11
+ from loguru import logger
12
+ from rq import worker
13
+ from rq.exceptions import DequeueTimeout
14
+ from rq.job import JobStatus
15
+ from rq.worker import StopRequested
16
+
17
+ from flowerpower.utils.logging import setup_logging
18
+
19
+ utcnow = dt.datetime.utcnow
20
+ setup_logging("INFO")
21
+
22
+
23
+ class ThreadWorker(worker.Worker):
24
+ """
25
+ A variation of the RQ Worker that uses a ThreadPoolExecutor to perform
26
+ jobs concurrently within a single worker process.
27
+
28
+ Ideal for I/O bound tasks where the GIL is released during waits.
29
+ Jobs share the same memory space within the worker process.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ queues,
35
+ name=None,
36
+ max_threads=None,
37
+ default_result_ttl=500,
38
+ connection=None,
39
+ exc_handler=None,
40
+ exception_handlers=None,
41
+ default_worker_ttl=None,
42
+ job_class=None,
43
+ queue_class=None,
44
+ log_job_description=True,
45
+ job_monitoring_interval=30,
46
+ disable_default_exception_handler=False,
47
+ prepare_for_work=True,
48
+ maintenance_interval=600,
49
+ ):
50
+ super().__init__(
51
+ queues,
52
+ name=name,
53
+ default_result_ttl=default_result_ttl,
54
+ connection=connection,
55
+ exc_handler=exc_handler,
56
+ exception_handlers=exception_handlers,
57
+ default_worker_ttl=default_worker_ttl,
58
+ job_class=job_class,
59
+ queue_class=queue_class,
60
+ log_job_description=log_job_description,
61
+ job_monitoring_interval=job_monitoring_interval,
62
+ disable_default_exception_handler=disable_default_exception_handler,
63
+ prepare_for_work=prepare_for_work,
64
+ maintenance_interval=maintenance_interval,
65
+ )
66
+
67
+ self.max_threads = (
68
+ max_threads if max_threads is not None else (os.cpu_count() or 1) * 5
69
+ )
70
+ self._executor = None
71
+ self._futures = set()
72
+ self.log = logger
73
+
74
+ def work(
75
+ self,
76
+ burst=False,
77
+ logging_level="INFO",
78
+ date_format=worker.DEFAULT_LOGGING_DATE_FORMAT,
79
+ log_format=worker.DEFAULT_LOGGING_FORMAT,
80
+ max_jobs=None,
81
+ with_scheduler=False,
82
+ ):
83
+ """Starts the worker's main loop."""
84
+ self._install_signal_handlers()
85
+ did_perform_work = False
86
+ self.register_birth()
87
+ self.log.info("Worker %s: started, version %s", self.key, worker.VERSION)
88
+ self.set_state(worker.WorkerStatus.STARTED)
89
+
90
+ self._executor = ThreadPoolExecutor(max_workers=self.max_threads)
91
+ self._futures = set()
92
+ processed_jobs = 0
93
+
94
+ try:
95
+ while True:
96
+ if self._stop_requested or (
97
+ max_jobs is not None and processed_jobs >= max_jobs
98
+ ):
99
+ break
100
+
101
+ self.run_maintenance_tasks()
102
+
103
+ # Wait for space in the thread pool if it's full
104
+ if len(self._futures) >= self.max_threads:
105
+ done, self._futures = concurrent.futures.wait(
106
+ self._futures,
107
+ timeout=1.0,
108
+ return_when=concurrent.futures.FIRST_COMPLETED,
109
+ )
110
+ for future in done:
111
+ try:
112
+ future.result()
113
+ except Exception as e:
114
+ self.log.error(
115
+ f"Error in completed job: {e}", exc_info=True
116
+ )
117
+ continue
118
+
119
+ try:
120
+ result = self.dequeue_job_and_maintain_ttl(timeout=1)
121
+ except DequeueTimeout:
122
+ if burst:
123
+ break
124
+ time.sleep(0.1)
125
+ continue
126
+ except StopRequested:
127
+ break
128
+ except Exception:
129
+ self.log.error("Error during dequeue:", exc_info=True)
130
+ time.sleep(1)
131
+ continue
132
+
133
+ if result is None:
134
+ if burst:
135
+ did_perform_work = True
136
+ break
137
+ time.sleep(0.1)
138
+ continue
139
+
140
+ job, queue = result
141
+ self.log.info("Processing job %s: %s", job.id, job.description)
142
+
143
+ try:
144
+ future = self._executor.submit(self.execute_job, job, queue)
145
+ self._futures.add(future)
146
+ future.add_done_callback(
147
+ lambda f, jid=job.id: self._handle_job_completion(f, jid)
148
+ )
149
+ except Exception as e:
150
+ self.log.error(f"Failed to submit job {job.id}: {e}", exc_info=True)
151
+ continue
152
+
153
+ did_perform_work = True
154
+ processed_jobs += 1
155
+
156
+ finally:
157
+ if self._executor:
158
+ self._executor.shutdown(wait=True)
159
+ self.register_death()
160
+
161
+ return did_perform_work
162
+
163
+ def _handle_job_completion(self, future, job_id):
164
+ """Handle completion of a job future, including logging any errors."""
165
+ self._futures.discard(future)
166
+ try:
167
+ future.result()
168
+ except Exception as e:
169
+ self.log.error(f"Error in job {job_id}: {e}", exc_info=True)
170
+
171
+ def set_job_status(self, job, status):
172
+ """Sets the job status."""
173
+ if job:
174
+ job.set_status(status)
175
+
176
+ def handle_job_success(self, job, queue, started_job_registry):
177
+ """Handles job completion."""
178
+ try:
179
+ if started_job_registry:
180
+ try:
181
+ started_job_registry.remove(job)
182
+ except NotImplementedError:
183
+ pass
184
+ job.ended_at = utcnow()
185
+ job.set_status(JobStatus.FINISHED)
186
+ except Exception as e:
187
+ self.log.error(f"Error handling job success for {job.id}: {e}")
188
+
189
+ # def handle_job_failure(self, job, queue, started_job_registry, exec_string=None):
190
+ # """Handles job failure."""
191
+ # try:
192
+ # if started_job_registry:
193
+ # try:
194
+ # started_job_registry.remove(job)
195
+ # except NotImplementedError:
196
+ # pass
197
+ # job.ended_at = utcnow()
198
+ # job.set_status(JobStatus.FAILED)
199
+ # except Exception as e:
200
+ # self.log.error(f"Error handling job failure for {job.id}: {e}")
201
+
202
+ def execute_job(self, job, queue):
203
+ """Execute a job in a worker thread."""
204
+ job_id = job.id if job else "unknown"
205
+
206
+ try:
207
+ self.set_job_status(job, JobStatus.STARTED)
208
+ started_job_registry = queue.started_job_registry
209
+
210
+ try:
211
+ started_job_registry.add(
212
+ job,
213
+ self.job_monitoring_interval * 1000
214
+ if self.job_monitoring_interval
215
+ else -1,
216
+ )
217
+ except NotImplementedError:
218
+ pass
219
+
220
+ rv = job.perform()
221
+ self.handle_job_success(
222
+ job=job, queue=queue, started_job_registry=started_job_registry
223
+ )
224
+ return rv
225
+
226
+ except Exception as e:
227
+ self.log.error(f"Job {job_id} failed: {e}", exc_info=True)
228
+ self.handle_job_failure(
229
+ job=job, queue=queue, started_job_registry=started_job_registry
230
+ )
231
+ raise