FlowerPower 0.9.13.1__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. flowerpower/__init__.py +17 -2
  2. flowerpower/cfg/__init__.py +201 -149
  3. flowerpower/cfg/base.py +122 -24
  4. flowerpower/cfg/pipeline/__init__.py +254 -0
  5. flowerpower/cfg/pipeline/adapter.py +66 -0
  6. flowerpower/cfg/pipeline/run.py +40 -11
  7. flowerpower/cfg/pipeline/schedule.py +69 -79
  8. flowerpower/cfg/project/__init__.py +149 -0
  9. flowerpower/cfg/project/adapter.py +57 -0
  10. flowerpower/cfg/project/job_queue.py +165 -0
  11. flowerpower/cli/__init__.py +92 -37
  12. flowerpower/cli/job_queue.py +878 -0
  13. flowerpower/cli/mqtt.py +32 -1
  14. flowerpower/cli/pipeline.py +559 -406
  15. flowerpower/cli/utils.py +29 -18
  16. flowerpower/flowerpower.py +12 -8
  17. flowerpower/fs/__init__.py +20 -2
  18. flowerpower/fs/base.py +350 -26
  19. flowerpower/fs/ext.py +797 -216
  20. flowerpower/fs/storage_options.py +1097 -55
  21. flowerpower/io/base.py +13 -18
  22. flowerpower/io/loader/__init__.py +28 -0
  23. flowerpower/io/loader/deltatable.py +7 -10
  24. flowerpower/io/metadata.py +1 -0
  25. flowerpower/io/saver/__init__.py +28 -0
  26. flowerpower/io/saver/deltatable.py +4 -3
  27. flowerpower/job_queue/__init__.py +252 -0
  28. flowerpower/job_queue/apscheduler/__init__.py +11 -0
  29. flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
  30. flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
  31. flowerpower/job_queue/apscheduler/manager.py +1063 -0
  32. flowerpower/job_queue/apscheduler/setup.py +524 -0
  33. flowerpower/job_queue/apscheduler/trigger.py +169 -0
  34. flowerpower/job_queue/apscheduler/utils.py +309 -0
  35. flowerpower/job_queue/base.py +382 -0
  36. flowerpower/job_queue/rq/__init__.py +10 -0
  37. flowerpower/job_queue/rq/_trigger.py +37 -0
  38. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
  39. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
  40. flowerpower/job_queue/rq/manager.py +1449 -0
  41. flowerpower/job_queue/rq/setup.py +150 -0
  42. flowerpower/job_queue/rq/utils.py +69 -0
  43. flowerpower/pipeline/__init__.py +5 -0
  44. flowerpower/pipeline/base.py +118 -0
  45. flowerpower/pipeline/io.py +407 -0
  46. flowerpower/pipeline/job_queue.py +505 -0
  47. flowerpower/pipeline/manager.py +1586 -0
  48. flowerpower/pipeline/registry.py +560 -0
  49. flowerpower/pipeline/runner.py +560 -0
  50. flowerpower/pipeline/visualizer.py +142 -0
  51. flowerpower/plugins/mqtt/__init__.py +12 -0
  52. flowerpower/plugins/mqtt/cfg.py +16 -0
  53. flowerpower/plugins/mqtt/manager.py +789 -0
  54. flowerpower/settings.py +110 -0
  55. flowerpower/utils/logging.py +21 -0
  56. flowerpower/utils/misc.py +57 -9
  57. flowerpower/utils/sql.py +122 -24
  58. flowerpower/utils/templates.py +2 -142
  59. flowerpower-1.0.0b1.dist-info/METADATA +324 -0
  60. flowerpower-1.0.0b1.dist-info/RECORD +94 -0
  61. flowerpower/_web/__init__.py +0 -61
  62. flowerpower/_web/routes/config.py +0 -103
  63. flowerpower/_web/routes/pipelines.py +0 -173
  64. flowerpower/_web/routes/scheduler.py +0 -136
  65. flowerpower/cfg/pipeline/tracker.py +0 -14
  66. flowerpower/cfg/project/open_telemetry.py +0 -8
  67. flowerpower/cfg/project/tracker.py +0 -11
  68. flowerpower/cfg/project/worker.py +0 -19
  69. flowerpower/cli/scheduler.py +0 -309
  70. flowerpower/cli/web.py +0 -44
  71. flowerpower/event_handler.py +0 -23
  72. flowerpower/mqtt.py +0 -609
  73. flowerpower/pipeline.py +0 -2499
  74. flowerpower/scheduler.py +0 -680
  75. flowerpower/tui.py +0 -79
  76. flowerpower/utils/datastore.py +0 -186
  77. flowerpower/utils/eventbroker.py +0 -127
  78. flowerpower/utils/executor.py +0 -58
  79. flowerpower/utils/trigger.py +0 -140
  80. flowerpower-0.9.13.1.dist-info/METADATA +0 -586
  81. flowerpower-0.9.13.1.dist-info/RECORD +0 -76
  82. /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
  83. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/WHEEL +0 -0
  84. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/entry_points.txt +0 -0
  85. {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,150 @@
1
+ from dataclasses import dataclass, field
2
+
3
+ import redis
4
+
5
+ from ..base import BaseBackend
6
+
7
+ # Enums for RQ DataStore and EventBroker types
8
+ # class RQBackendType(BackendType):
9
+ # REDIS = "redis"
10
+ # MEMORY = "memory"
11
+
12
+
13
+ @dataclass # (slots=True)
14
+ class RQBackend(BaseBackend):
15
+ """RQ Backend implementation for Redis Queue (RQ) job storage and queuing.
16
+
17
+ This class provides a Redis-based backend for RQ job storage and queue management.
18
+ It supports both Redis and in-memory storage options for development/testing.
19
+
20
+ Args:
21
+ queues (str | list[str] | None): Names of queues to create. Defaults to ["default"].
22
+ num_workers (int): Number of worker processes to use. Defaults to 1.
23
+
24
+ Attributes:
25
+ type (str): Backend type, either "redis" or "memory". Inherited from BaseBackend.
26
+ uri (str): Connection URI. Inherited from BaseBackend.
27
+ result_namespace (str): Namespace for storing job results in Redis.
28
+ _client (redis.Redis | dict): Redis client or dict for memory storage.
29
+
30
+ Raises:
31
+ ValueError: If an invalid backend type is specified.
32
+
33
+ Example:
34
+ ```python
35
+ # Create Redis backend with default queue
36
+ backend = RQBackend(
37
+ type="redis",
38
+ uri="redis://localhost:6379/0"
39
+ )
40
+
41
+ # Create Redis backend with multiple queues
42
+ backend = RQBackend(
43
+ type="redis",
44
+ uri="redis://localhost:6379/0",
45
+ queues=["high", "default", "low"]
46
+ )
47
+
48
+ # Create in-memory backend for testing
49
+ backend = RQBackend(type="memory", queues=["test"])
50
+ ```
51
+ """
52
+
53
+ queues: str | list[str] | None = field(default_factory=lambda: ["default"])
54
+ num_workers: int = field(default=1)
55
+
56
+ def __post_init__(self) -> None:
57
+ """Initialize and validate the backend configuration.
58
+
59
+ This method is called automatically after instance creation. It:
60
+ 1. Sets default type to "redis" if not specified
61
+ 2. Calls parent class initialization
62
+ 3. Validates backend type
63
+ 4. Sets default result namespace
64
+
65
+ Raises:
66
+ ValueError: If an unsupported backend type is specified.
67
+ Only "redis" and "memory" types are supported.
68
+ """
69
+ if self.type is None:
70
+ self.type = "redis"
71
+ super().__post_init__()
72
+
73
+ if not self.type.is_memory_type and not self.type.is_redis_type:
74
+ raise ValueError(
75
+ f"Invalid backend type: {self.type}. Valid types: {[self.type.REDIS, self.type.MEMORY]}"
76
+ )
77
+
78
+ self.result_namespace = getattr(self, "result_namespace", "flowerpower:results")
79
+
80
+ def setup(self) -> None:
81
+ """Set up the Redis client or in-memory storage.
82
+
83
+ This method initializes the backend storage based on the configured type.
84
+ For Redis, it creates a Redis client with the specified connection parameters.
85
+ For in-memory storage, it creates a simple dictionary.
86
+
87
+ Raises:
88
+ ValueError: If an unsupported backend type is specified.
89
+ redis.RedisError: If Redis connection fails.
90
+
91
+ Example:
92
+ ```python
93
+ backend = RQBackend(
94
+ type="redis",
95
+ host="localhost",
96
+ port=6379,
97
+ password="secret",
98
+ database="0",
99
+ ssl=True
100
+ )
101
+ backend.setup()
102
+ ```
103
+ """
104
+ # Use connection info from BaseBackend to create Redis client
105
+ if self.type.is_redis_type:
106
+ # Parse db from database or default to 0
107
+ db = 0
108
+ if self.database is not None:
109
+ try:
110
+ db = int(self.database)
111
+ except Exception:
112
+ db = 0
113
+ self._client = redis.Redis(
114
+ host=self.host or self.type.default_host,
115
+ port=self.port or self.type.default_port,
116
+ db=db,
117
+ password=self.password,
118
+ ssl=self.ssl,
119
+ )
120
+ elif self.type.is_memory_type:
121
+ # Simple in-memory dict for testing
122
+ self._client = {}
123
+ else:
124
+ raise ValueError(f"Unsupported RQBackend type: {self.type}")
125
+
126
+ @property
127
+ def client(self) -> redis.Redis | dict:
128
+ """Get the initialized storage client.
129
+
130
+ This property provides access to the Redis client or in-memory dictionary,
131
+ initializing it if needed.
132
+
133
+ Returns:
134
+ redis.Redis | dict: The Redis client for Redis backend,
135
+ or dictionary for in-memory backend.
136
+
137
+ Example:
138
+ ```python
139
+ backend = RQBackend(type="redis", uri="redis://localhost:6379/0")
140
+ redis_client = backend.client # Gets Redis client
141
+ redis_client.set("key", "value")
142
+
143
+ backend = RQBackend(type="memory")
144
+ mem_dict = backend.client # Gets dict for testing
145
+ mem_dict["key"] = "value"
146
+ ```
147
+ """
148
+ if self._client is None:
149
+ self.setup()
150
+ return self._client
@@ -0,0 +1,69 @@
1
+ from rich.console import Console
2
+ from rich.table import Table
3
+ from rq import Queue
4
+ from rq_scheduler import Scheduler
5
+
6
+
7
+ def show_schedules(scheduler: Scheduler) -> None:
8
+ """
9
+ Display the schedules in a user-friendly format.
10
+
11
+ Args:
12
+ scheduler (Scheduler): An instance of rq_scheduler.Scheduler.
13
+ """
14
+ console = Console()
15
+ table = Table(title="Scheduled Jobs")
16
+
17
+ table.add_column("ID", style="cyan")
18
+ table.add_column("Function", style="green")
19
+ table.add_column("Schedule", style="yellow")
20
+ table.add_column("Next Run", style="magenta")
21
+
22
+ for job in scheduler.get_jobs():
23
+ # Determine schedule type and format
24
+ schedule_type = "Unknown"
25
+ if hasattr(job, "meta"):
26
+ if job.meta.get("cron"):
27
+ schedule_type = f"Cron: {job.meta['cron']}"
28
+ elif job.meta.get("interval"):
29
+ schedule_type = f"Interval: {job.meta['interval']}s"
30
+
31
+ next_run = (
32
+ job.scheduled_at.strftime("%Y-%m-%d %H:%M:%S")
33
+ if hasattr(job, "scheduled_at") and job.scheduled_at
34
+ else "Unknown"
35
+ )
36
+
37
+ table.add_row(job.id, job.func_name, schedule_type, next_run)
38
+
39
+ console.print(table)
40
+
41
+
42
+ def show_jobs(queue: Queue) -> None:
43
+ """
44
+ Display the jobs in a user-friendly format.
45
+
46
+ Args:
47
+ queue (Queue): An instance of rq.Queue.
48
+ """
49
+ console = Console()
50
+ table = Table(title="Jobs")
51
+
52
+ table.add_column("ID", style="cyan")
53
+ table.add_column("Function", style="green")
54
+ table.add_column("Status", style="yellow")
55
+ table.add_column("Enqueued At", style="magenta")
56
+ table.add_column("Result", style="blue")
57
+
58
+ for job in queue.get_jobs():
59
+ table.add_row(
60
+ job.id,
61
+ job.func_name,
62
+ job.get_status(),
63
+ job.enqueued_at.strftime("%Y-%m-%d %H:%M:%S")
64
+ if job.enqueued_at
65
+ else "Unknown",
66
+ str(job.result) if job.result else "None",
67
+ )
68
+
69
+ console.print(table)
@@ -0,0 +1,5 @@
1
+ from .manager import PipelineManager
2
+
3
+ __all__ = [
4
+ "PipelineManager",
5
+ ]
@@ -0,0 +1,118 @@
1
+ import importlib
2
+ import posixpath
3
+ import sys
4
+ from types import TracebackType
5
+
6
+ from loguru import logger
7
+ from munch import Munch
8
+
9
+ from ..cfg import PipelineConfig, ProjectConfig
10
+ from ..fs import AbstractFileSystem, BaseStorageOptions, get_filesystem
11
+ from ..utils.logging import setup_logging
12
+
13
+ setup_logging()
14
+
15
+
16
+ def load_module(name: str, reload: bool = False):
17
+ """
18
+ Load a module.
19
+
20
+ Args:
21
+ name (str): The name of the module.
22
+
23
+ Returns:
24
+ module: The loaded module.
25
+ """
26
+ if name in sys.modules:
27
+ if reload:
28
+ return importlib.reload(sys.modules[name])
29
+ return sys.modules[name]
30
+ return importlib.import_module(name)
31
+
32
+
33
+ class BasePipeline:
34
+ """
35
+ Base class for all pipelines.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ base_dir: str | None = None,
41
+ storage_options: dict | Munch | BaseStorageOptions = {},
42
+ fs: AbstractFileSystem | None = None,
43
+ cfg_dir: str = "conf",
44
+ pipelines_dir: str = "pipelines",
45
+ job_queue_type: str | None = None, # New parameter for worker backend
46
+ ):
47
+ self._base_dir = base_dir
48
+ self._storage_options = storage_options
49
+ if fs is None:
50
+ fs = get_filesystem(self._base_dir, **self._storage_options)
51
+ self._fs = fs
52
+ self._cfg_dir = cfg_dir
53
+ self._pipelines_dir = pipelines_dir
54
+ self._job_queue_type = job_queue_type
55
+
56
+ try:
57
+ self._fs.makedirs(f"{self._cfg_dir}/pipelines", exist_ok=True)
58
+ self._fs.makedirs(self._pipelines_dir, exist_ok=True)
59
+ except Exception as e:
60
+ logger.error(f"Error creating directories: {e}")
61
+
62
+ self._add_modules_path()
63
+
64
+ def __enter__(self) -> "BasePipeline":
65
+ return self
66
+
67
+ def __exit__(
68
+ self,
69
+ exc_type: type[BaseException] | None,
70
+ exc_val: BaseException | None,
71
+ exc_tb: TracebackType | None,
72
+ ) -> None:
73
+ pass
74
+
75
+ def _add_modules_path(self):
76
+ """
77
+ Sync the filesystem.
78
+
79
+ Returns:
80
+ None
81
+ """
82
+ if self._fs.is_cache_fs:
83
+ self._fs.sync()
84
+
85
+ modules_path = posixpath.join(self._fs.path, self._pipelines_dir)
86
+ if modules_path not in sys.path:
87
+ sys.path.insert(0, modules_path)
88
+
89
+ def _load_project_cfg(self) -> ProjectConfig:
90
+ """
91
+ Load the project configuration.
92
+
93
+ Returns:
94
+ ProjectConfig: The loaded project configuration.
95
+ """
96
+ return ProjectConfig.load(
97
+ base_dir=self._base_dir,
98
+ job_queue_type=self._job_queue_type,
99
+ fs=self._fs,
100
+ storage_options=self._storage_options,
101
+ )
102
+
103
+ def _load_pipeline_cfg(self, name: str) -> PipelineConfig:
104
+ """
105
+ Load the pipeline configuration.
106
+
107
+ Args:
108
+ name (str): The name of the pipeline.
109
+
110
+ Returns:
111
+ PipelineConfig: The loaded pipeline configuration.
112
+ """
113
+ return PipelineConfig.load(
114
+ base_dir=self._base_dir,
115
+ name=name,
116
+ fs=self._fs,
117
+ storage_options=self._storage_options,
118
+ )