FlowerPower 0.20.0__py3-none-any.whl → 0.30.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowerpower/__init__.py +2 -6
- flowerpower/cfg/__init__.py +4 -11
- flowerpower/cfg/base.py +29 -25
- flowerpower/cfg/pipeline/__init__.py +3 -3
- flowerpower/cfg/pipeline/_schedule.py +32 -0
- flowerpower/cfg/pipeline/adapter.py +0 -5
- flowerpower/cfg/pipeline/builder.py +377 -0
- flowerpower/cfg/pipeline/run.py +89 -0
- flowerpower/cfg/project/__init__.py +8 -21
- flowerpower/cfg/project/adapter.py +0 -12
- flowerpower/cli/__init__.py +2 -28
- flowerpower/cli/pipeline.py +10 -4
- flowerpower/flowerpower.py +275 -585
- flowerpower/pipeline/base.py +19 -10
- flowerpower/pipeline/io.py +52 -46
- flowerpower/pipeline/manager.py +149 -91
- flowerpower/pipeline/pipeline.py +159 -87
- flowerpower/pipeline/registry.py +68 -33
- flowerpower/pipeline/visualizer.py +4 -4
- flowerpower/plugins/{_io → io}/__init__.py +1 -1
- flowerpower/settings/__init__.py +0 -2
- flowerpower/settings/{backend.py → _backend.py} +0 -19
- flowerpower/settings/logging.py +1 -1
- flowerpower/utils/logging.py +24 -12
- flowerpower/utils/misc.py +17 -0
- flowerpower-0.30.0.dist-info/METADATA +451 -0
- flowerpower-0.30.0.dist-info/RECORD +42 -0
- flowerpower/cfg/pipeline/schedule.py +0 -74
- flowerpower/cfg/project/job_queue.py +0 -111
- flowerpower/cli/job_queue.py +0 -1329
- flowerpower/cli/mqtt.py +0 -174
- flowerpower/job_queue/__init__.py +0 -205
- flowerpower/job_queue/base.py +0 -611
- flowerpower/job_queue/rq/__init__.py +0 -10
- flowerpower/job_queue/rq/_trigger.py +0 -37
- flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +0 -226
- flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +0 -228
- flowerpower/job_queue/rq/manager.py +0 -1893
- flowerpower/job_queue/rq/setup.py +0 -154
- flowerpower/job_queue/rq/utils.py +0 -69
- flowerpower/mqtt.py +0 -12
- flowerpower/plugins/mqtt/__init__.py +0 -12
- flowerpower/plugins/mqtt/cfg.py +0 -17
- flowerpower/plugins/mqtt/manager.py +0 -962
- flowerpower/settings/job_queue.py +0 -31
- flowerpower-0.20.0.dist-info/METADATA +0 -693
- flowerpower-0.20.0.dist-info/RECORD +0 -58
- {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/WHEEL +0 -0
- {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/entry_points.txt +0 -0
- {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/licenses/LICENSE +0 -0
- {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/top_level.txt +0 -0
flowerpower/cli/mqtt.py
DELETED
@@ -1,174 +0,0 @@
|
|
1
|
-
import importlib
|
2
|
-
import sys
|
3
|
-
|
4
|
-
import typer
|
5
|
-
|
6
|
-
from ..plugins.mqtt import run_pipeline_on_message as run_pipeline_on_message_
|
7
|
-
from ..plugins.mqtt import start_listener as start_listener_
|
8
|
-
from .utils import load_hook, parse_dict_or_list_param
|
9
|
-
|
10
|
-
app = typer.Typer(help="MQTT management commands")
|
11
|
-
|
12
|
-
|
13
|
-
@app.command()
|
14
|
-
def start_listener(
|
15
|
-
on_message: str,
|
16
|
-
topic: str,
|
17
|
-
base_dir: str,
|
18
|
-
host: str = "localhost",
|
19
|
-
port: int = 1883,
|
20
|
-
username: str | None = None,
|
21
|
-
password: str | None = None,
|
22
|
-
):
|
23
|
-
"""Start an MQTT client to listen to messages on a topic
|
24
|
-
|
25
|
-
The connection to the MQTT broker is established using the provided configuration o a
|
26
|
-
MQTT event broker defined in the project configuration file `conf/project.yml`.
|
27
|
-
If not configuration is found, you have to provide the connection parameters,
|
28
|
-
such as `host`, `port`, `username`, and `password`.
|
29
|
-
|
30
|
-
The `on_message` module should contain a function `on_message` that will be called
|
31
|
-
with the message payload as argument.
|
32
|
-
|
33
|
-
Args:
|
34
|
-
on_message: Name of the module containing the on_message function
|
35
|
-
topic: MQTT topic to listen to
|
36
|
-
base_dir: Base directory for the module
|
37
|
-
host: MQTT broker host
|
38
|
-
port: MQTT broker port
|
39
|
-
username: MQTT broker username
|
40
|
-
password: MQTT broker password
|
41
|
-
|
42
|
-
Examples:
|
43
|
-
$ flowerpower mqtt start_listener --on-message my_module --topic my_topic --base-dir /path/to/module
|
44
|
-
"""
|
45
|
-
sys.path.append(base_dir)
|
46
|
-
on_message_module = importlib.import_module(on_message)
|
47
|
-
start_listener_(
|
48
|
-
on_message=on_message_module.on_message,
|
49
|
-
topic=topic,
|
50
|
-
base_dir=base_dir,
|
51
|
-
host=host,
|
52
|
-
port=port,
|
53
|
-
username=username,
|
54
|
-
password=password,
|
55
|
-
background=False,
|
56
|
-
)
|
57
|
-
|
58
|
-
|
59
|
-
@app.command()
|
60
|
-
def run_pipeline_on_message(
|
61
|
-
name: str,
|
62
|
-
topic: str | None = None,
|
63
|
-
executor: str | None = None,
|
64
|
-
base_dir: str | None = None,
|
65
|
-
inputs: str | None = None,
|
66
|
-
final_vars: str | None = None,
|
67
|
-
config: str | None = None,
|
68
|
-
with_tracker: bool = False,
|
69
|
-
with_opentelemetry: bool = False,
|
70
|
-
with_progressbar: bool = False,
|
71
|
-
storage_options: str | None = None,
|
72
|
-
as_job: bool = False,
|
73
|
-
host: str | None = None,
|
74
|
-
port: int | None = None,
|
75
|
-
username: str | None = None,
|
76
|
-
password: str | None = None,
|
77
|
-
clean_session: bool = True,
|
78
|
-
qos: int = 0,
|
79
|
-
client_id: str | None = None,
|
80
|
-
client_id_suffix: str | None = None,
|
81
|
-
config_hook: str | None = None,
|
82
|
-
max_retries: int = typer.Option(
|
83
|
-
3, help="Maximum number of retry attempts if pipeline execution fails"
|
84
|
-
),
|
85
|
-
retry_delay: float = typer.Option(
|
86
|
-
1.0, help="Base delay between retries in seconds"
|
87
|
-
),
|
88
|
-
jitter_factor: float = typer.Option(
|
89
|
-
0.1, help="Random factor (0-1) applied to delay for jitter"
|
90
|
-
),
|
91
|
-
):
|
92
|
-
"""Run a pipeline on a message
|
93
|
-
|
94
|
-
This command sets up an MQTT listener that executes a pipeline whenever a message is
|
95
|
-
received on the specified topic. The pipeline can be configured to retry on failure
|
96
|
-
using exponential backoff with jitter for better resilience.
|
97
|
-
|
98
|
-
Args:
|
99
|
-
name: Name of the pipeline
|
100
|
-
topic: MQTT topic to listen to
|
101
|
-
executor: Name of the executor
|
102
|
-
base_dir: Base directory for the pipeline
|
103
|
-
inputs: Inputs as JSON or key=value pairs or dict string
|
104
|
-
final_vars: Final variables as JSON or list
|
105
|
-
config: Config for the hamilton pipeline executor
|
106
|
-
with_tracker: Enable tracking with hamilton ui
|
107
|
-
with_opentelemetry: Enable OpenTelemetry tracing
|
108
|
-
with_progressbar: Enable progress bar
|
109
|
-
storage_options: Storage options as JSON, dict string or key=value pairs
|
110
|
-
as_job: Run as a job in the scheduler
|
111
|
-
host: MQTT broker host
|
112
|
-
port: MQTT broker port
|
113
|
-
username: MQTT broker username
|
114
|
-
password: MQTT broker password
|
115
|
-
clean_session: Whether to start a clean session with the broker
|
116
|
-
qos: MQTT Quality of Service level (0, 1, or 2)
|
117
|
-
client_id: Custom MQTT client identifier
|
118
|
-
client_id_suffix: Optional suffix to append to client_id
|
119
|
-
config_hook: Function to process incoming messages into pipeline config
|
120
|
-
max_retries: Maximum number of retry attempts if pipeline execution fails
|
121
|
-
retry_delay: Base delay between retries in seconds
|
122
|
-
jitter_factor: Random factor (0-1) applied to delay for jitter
|
123
|
-
|
124
|
-
Examples:
|
125
|
-
# Basic usage with a specific topic
|
126
|
-
$ flowerpower mqtt run-pipeline-on-message my_pipeline --topic sensors/data
|
127
|
-
|
128
|
-
# Configure retries for resilience
|
129
|
-
$ flowerpower mqtt run-pipeline-on-message my_pipeline --topic sensors/data --max-retries 5 --retry-delay 2.0
|
130
|
-
|
131
|
-
# Run as a job with custom MQTT settings
|
132
|
-
$ flowerpower mqtt run-pipeline-on-message my_pipeline --topic events/process --as-job --qos 2 --host mqtt.example.com
|
133
|
-
|
134
|
-
# Use a config hook to process messages
|
135
|
-
$ flowerpower mqtt run-pipeline-on-message my_pipeline --topic data/incoming --config-hook process_message
|
136
|
-
|
137
|
-
|
138
|
-
"""
|
139
|
-
|
140
|
-
parsed_inputs = parse_dict_or_list_param(inputs, "dict")
|
141
|
-
parsed_config = parse_dict_or_list_param(config, "dict")
|
142
|
-
parsed_final_vars = parse_dict_or_list_param(final_vars, "list")
|
143
|
-
parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
|
144
|
-
|
145
|
-
config_hook_function = None
|
146
|
-
if config_hook:
|
147
|
-
config_hook_function = load_hook(name, config_hook, base_dir, storage_options)
|
148
|
-
|
149
|
-
run_pipeline_on_message_(
|
150
|
-
name=name,
|
151
|
-
topic=topic,
|
152
|
-
executor=executor,
|
153
|
-
base_dir=base_dir,
|
154
|
-
inputs=parsed_inputs,
|
155
|
-
final_vars=parsed_final_vars,
|
156
|
-
config=parsed_config,
|
157
|
-
with_tracker=with_tracker,
|
158
|
-
with_opentelemetry=with_opentelemetry,
|
159
|
-
with_progressbar=with_progressbar,
|
160
|
-
storage_options=parsed_storage_options,
|
161
|
-
as_job=as_job,
|
162
|
-
host=host,
|
163
|
-
port=port,
|
164
|
-
username=username,
|
165
|
-
password=password,
|
166
|
-
clean_session=clean_session,
|
167
|
-
qos=qos,
|
168
|
-
client_id=client_id,
|
169
|
-
client_id_suffix=client_id_suffix,
|
170
|
-
config_hook=config_hook_function,
|
171
|
-
max_retries=max_retries,
|
172
|
-
retry_delay=retry_delay,
|
173
|
-
jitter_factor=jitter_factor,
|
174
|
-
)
|
@@ -1,205 +0,0 @@
|
|
1
|
-
import importlib
|
2
|
-
from typing import Any, Optional
|
3
|
-
|
4
|
-
from fsspec_utils import AbstractFileSystem
|
5
|
-
from loguru import logger
|
6
|
-
|
7
|
-
from ..cfg.project import ProjectConfig
|
8
|
-
from ..utils.logging import setup_logging
|
9
|
-
|
10
|
-
if importlib.util.find_spec("rq"):
|
11
|
-
from .rq import RQBackend, RQManager
|
12
|
-
else:
|
13
|
-
RQBackend = None
|
14
|
-
RQManager = None
|
15
|
-
from .base import BaseBackend, BaseJobQueueManager
|
16
|
-
|
17
|
-
setup_logging()
|
18
|
-
|
19
|
-
|
20
|
-
class JobQueueBackend:
|
21
|
-
"""A factory class for creating backend instances for different job queue types.
|
22
|
-
|
23
|
-
This class provides a unified interface for creating backend instances that handle
|
24
|
-
the storage, queuing, and event management for different job queue types. Each backend
|
25
|
-
type provides specific implementations for:
|
26
|
-
- Job storage and persistence
|
27
|
-
- Queue management
|
28
|
-
- Event handling and communication
|
29
|
-
- Result storage
|
30
|
-
|
31
|
-
Example:
|
32
|
-
```python
|
33
|
-
# Create RQ backend with Redis
|
34
|
-
rq_backend = JobQueueBackend(
|
35
|
-
job_queue_type="rq",
|
36
|
-
uri="redis://localhost:6379/0",
|
37
|
-
queues=["high", "default", "low"]
|
38
|
-
)
|
39
|
-
|
40
|
-
```
|
41
|
-
"""
|
42
|
-
|
43
|
-
def __new__(
|
44
|
-
cls,
|
45
|
-
job_queue_type: str,
|
46
|
-
**kwargs,
|
47
|
-
) -> BaseBackend:
|
48
|
-
"""Create a new backend instance based on the specified job queue type.
|
49
|
-
|
50
|
-
Args:
|
51
|
-
job_queue_type: The type of backend to create. Valid values are:
|
52
|
-
- "rq": Redis Queue backend using Redis
|
53
|
-
**kwargs: Backend-specific configuration options:
|
54
|
-
For RQ:
|
55
|
-
- uri (str): Redis connection URI
|
56
|
-
- queues (list[str]): List of queue names
|
57
|
-
- result_ttl (int): Time to live for results in seconds
|
58
|
-
|
59
|
-
Returns:
|
60
|
-
BaseBackend: An instance of RQBackend depending on
|
61
|
-
the specified job queue type.
|
62
|
-
|
63
|
-
Raises:
|
64
|
-
ValueError: If an invalid job queue type is specified.
|
65
|
-
RuntimeError: If backend initialization fails due to configuration
|
66
|
-
or connection issues.
|
67
|
-
|
68
|
-
Example:
|
69
|
-
```python
|
70
|
-
# Create RQ backend
|
71
|
-
rq_backend = Backend(
|
72
|
-
job_queue_type="rq",
|
73
|
-
uri="redis://localhost:6379/0",
|
74
|
-
queues=["high", "default", "low"],
|
75
|
-
result_ttl=3600
|
76
|
-
)
|
77
|
-
|
78
|
-
```
|
79
|
-
"""
|
80
|
-
if job_queue_type == "rq" and RQBackend is not None:
|
81
|
-
return RQBackend(**kwargs)
|
82
|
-
else:
|
83
|
-
if job_queue_type == "rq" and RQBackend is None:
|
84
|
-
logger.warning(
|
85
|
-
"RQ is not installed. `JobQueueBackend` is not initialized and using the job queue is disabled. "
|
86
|
-
"Install rq to use RQ. `uv pip install flowerpower[rq]` or `uv add flowerpower[rq]`"
|
87
|
-
)
|
88
|
-
return None
|
89
|
-
else:
|
90
|
-
raise ValueError(
|
91
|
-
f"Invalid job queue type: {job_queue_type}. Valid types: ['rq']"
|
92
|
-
)
|
93
|
-
|
94
|
-
|
95
|
-
class JobQueueManager:
|
96
|
-
"""A factory class for creating job queue instances for job scheduling and execution.
|
97
|
-
|
98
|
-
This class provides a unified interface for creating different types of job queue instances
|
99
|
-
(RQ, APScheduler, Huey) based on the specified backend type. Each job queue type provides
|
100
|
-
different capabilities for job scheduling and execution.
|
101
|
-
|
102
|
-
The job queue instances handle:
|
103
|
-
- Job scheduling and execution
|
104
|
-
- Background task processing
|
105
|
-
- Job queue management
|
106
|
-
- Result storage and retrieval
|
107
|
-
|
108
|
-
Example:
|
109
|
-
```python
|
110
|
-
# Create an RQ job queue
|
111
|
-
rq_worker = JobQueueManager(
|
112
|
-
type="rq",
|
113
|
-
name="my_worker",
|
114
|
-
log_level="DEBUG"
|
115
|
-
)
|
116
|
-
|
117
|
-
|
118
|
-
```
|
119
|
-
"""
|
120
|
-
|
121
|
-
def __new__(
|
122
|
-
cls,
|
123
|
-
type: str | None = None,
|
124
|
-
name: str | None = None,
|
125
|
-
base_dir: str | None = ".",
|
126
|
-
backend: JobQueueBackend | None = None,
|
127
|
-
storage_options: Optional[dict[str, Any]] = None,
|
128
|
-
fs: AbstractFileSystem | None = None,
|
129
|
-
log_level: str | None = None,
|
130
|
-
**kwargs,
|
131
|
-
) -> BaseJobQueueManager:
|
132
|
-
"""Create a new job queue instance based on the specified backend type.
|
133
|
-
|
134
|
-
Args:
|
135
|
-
type: The type of job queue to create. Valid values are:
|
136
|
-
- "rq": Redis Queue job queue for Redis-based job queuing
|
137
|
-
name: Name of the job queue instance. Used for identification in logs
|
138
|
-
and monitoring.
|
139
|
-
base_dir: Base directory for job queue files and configuration. Defaults
|
140
|
-
to current working directory if not specified.
|
141
|
-
backend: Pre-configured backend instance. If not provided, one will
|
142
|
-
be created based on configuration settings.
|
143
|
-
storage_options: Options for configuring filesystem storage access.
|
144
|
-
Example: {"mode": "async", "root": "/tmp", "protocol": "s3"}
|
145
|
-
fs: Custom filesystem implementation for storage operations.
|
146
|
-
Example: S3FileSystem, LocalFileSystem, etc.
|
147
|
-
log_level: Logging level for the job queue. Valid values are:
|
148
|
-
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
|
149
|
-
**kwargs: Additional configuration options passed to the specific
|
150
|
-
job queue implementation.
|
151
|
-
|
152
|
-
Returns:
|
153
|
-
BaseJobQueueManager: An instance of the specified job queue type (RQManager).
|
154
|
-
|
155
|
-
Raises:
|
156
|
-
ValueError: If an invalid job queue type is specified.
|
157
|
-
ImportError: If required dependencies for the chosen job queue type
|
158
|
-
are not installed.
|
159
|
-
RuntimeError: If job queue initialization fails due to configuration
|
160
|
-
or connection issues.
|
161
|
-
|
162
|
-
Example:
|
163
|
-
```python
|
164
|
-
# Basic RQ job queue
|
165
|
-
worker = JobQueueManager(type="rq", name="basic_worker")
|
166
|
-
|
167
|
-
|
168
|
-
```
|
169
|
-
"""
|
170
|
-
if type is None:
|
171
|
-
type = ProjectConfig.load(
|
172
|
-
base_dir=base_dir,
|
173
|
-
name=name,
|
174
|
-
fs=fs,
|
175
|
-
storage_options=storage_options or {},
|
176
|
-
).job_queue.type
|
177
|
-
|
178
|
-
if type == "rq":
|
179
|
-
if RQManager is not None:
|
180
|
-
return RQManager(
|
181
|
-
name=name,
|
182
|
-
base_dir=base_dir,
|
183
|
-
backend=backend,
|
184
|
-
storage_options=storage_options,
|
185
|
-
fs=fs,
|
186
|
-
log_level=log_level,
|
187
|
-
**kwargs,
|
188
|
-
)
|
189
|
-
else:
|
190
|
-
logger.warning(
|
191
|
-
"`JobQueueManager` can not be initialized. This might be due to missing dependencies (RQ), invalid configuration or backend not being available."
|
192
|
-
)
|
193
|
-
return None
|
194
|
-
|
195
|
-
else:
|
196
|
-
raise ImportError(f"Invalid job queue type: {type}. Valid types: ['rq']")
|
197
|
-
|
198
|
-
|
199
|
-
__all__ = [
|
200
|
-
"JobQueueManager",
|
201
|
-
"RQManager",
|
202
|
-
# "HueyWorker",
|
203
|
-
"JobQueueBackend",
|
204
|
-
"RQBackend",
|
205
|
-
]
|