FlowerPower 0.20.0__py3-none-any.whl → 0.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. flowerpower/__init__.py +2 -6
  2. flowerpower/cfg/__init__.py +4 -11
  3. flowerpower/cfg/base.py +29 -25
  4. flowerpower/cfg/pipeline/__init__.py +3 -3
  5. flowerpower/cfg/pipeline/_schedule.py +32 -0
  6. flowerpower/cfg/pipeline/adapter.py +0 -5
  7. flowerpower/cfg/pipeline/builder.py +377 -0
  8. flowerpower/cfg/pipeline/run.py +89 -0
  9. flowerpower/cfg/project/__init__.py +8 -21
  10. flowerpower/cfg/project/adapter.py +0 -12
  11. flowerpower/cli/__init__.py +2 -28
  12. flowerpower/cli/pipeline.py +10 -4
  13. flowerpower/flowerpower.py +275 -585
  14. flowerpower/pipeline/base.py +19 -10
  15. flowerpower/pipeline/io.py +52 -46
  16. flowerpower/pipeline/manager.py +149 -91
  17. flowerpower/pipeline/pipeline.py +159 -87
  18. flowerpower/pipeline/registry.py +68 -33
  19. flowerpower/pipeline/visualizer.py +4 -4
  20. flowerpower/plugins/{_io → io}/__init__.py +1 -1
  21. flowerpower/settings/__init__.py +0 -2
  22. flowerpower/settings/{backend.py → _backend.py} +0 -19
  23. flowerpower/settings/logging.py +1 -1
  24. flowerpower/utils/logging.py +24 -12
  25. flowerpower/utils/misc.py +17 -0
  26. flowerpower-0.30.0.dist-info/METADATA +451 -0
  27. flowerpower-0.30.0.dist-info/RECORD +42 -0
  28. flowerpower/cfg/pipeline/schedule.py +0 -74
  29. flowerpower/cfg/project/job_queue.py +0 -111
  30. flowerpower/cli/job_queue.py +0 -1329
  31. flowerpower/cli/mqtt.py +0 -174
  32. flowerpower/job_queue/__init__.py +0 -205
  33. flowerpower/job_queue/base.py +0 -611
  34. flowerpower/job_queue/rq/__init__.py +0 -10
  35. flowerpower/job_queue/rq/_trigger.py +0 -37
  36. flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +0 -226
  37. flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +0 -228
  38. flowerpower/job_queue/rq/manager.py +0 -1893
  39. flowerpower/job_queue/rq/setup.py +0 -154
  40. flowerpower/job_queue/rq/utils.py +0 -69
  41. flowerpower/mqtt.py +0 -12
  42. flowerpower/plugins/mqtt/__init__.py +0 -12
  43. flowerpower/plugins/mqtt/cfg.py +0 -17
  44. flowerpower/plugins/mqtt/manager.py +0 -962
  45. flowerpower/settings/job_queue.py +0 -31
  46. flowerpower-0.20.0.dist-info/METADATA +0 -693
  47. flowerpower-0.20.0.dist-info/RECORD +0 -58
  48. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/WHEEL +0 -0
  49. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/entry_points.txt +0 -0
  50. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/licenses/LICENSE +0 -0
  51. {flowerpower-0.20.0.dist-info → flowerpower-0.30.0.dist-info}/top_level.txt +0 -0
@@ -57,7 +57,7 @@ else:
57
57
 
58
58
  from ..cfg import PipelineConfig, ProjectConfig
59
59
  from ..cfg.pipeline.adapter import AdapterConfig as PipelineAdapterConfig
60
- from ..cfg.pipeline.run import ExecutorConfig, WithAdapterConfig
60
+ from ..cfg.pipeline.run import ExecutorConfig, RunConfig, WithAdapterConfig
61
61
  from ..cfg.project.adapter import AdapterConfig as ProjectAdapterConfig
62
62
 
63
63
  if TYPE_CHECKING:
@@ -90,67 +90,131 @@ class Pipeline(msgspec.Struct):
90
90
  if not settings.HAMILTON_AUTOLOAD_EXTENSIONS:
91
91
  disable_autoload()
92
92
 
93
+ def _merge_run_config_with_kwargs(self, run_config: RunConfig, kwargs: dict) -> RunConfig:
94
+ """Merge kwargs into the run_config object.
95
+
96
+ Args:
97
+ run_config: The base RunConfig object to merge into
98
+ kwargs: Additional parameters to merge into the run_config
99
+
100
+ Returns:
101
+ Updated RunConfig object with merged kwargs
102
+ """
103
+ from copy import deepcopy
104
+
105
+ # Create a deep copy of the run_config to avoid modifying the original
106
+ merged_config = deepcopy(run_config)
107
+
108
+ # Handle each possible kwarg
109
+ for key, value in kwargs.items():
110
+ if key == 'inputs' and value is not None:
111
+ if merged_config.inputs is None:
112
+ merged_config.inputs = {}
113
+ merged_config.inputs.update(value)
114
+ elif key == 'final_vars' and value is not None:
115
+ if merged_config.final_vars is None:
116
+ merged_config.final_vars = []
117
+ merged_config.final_vars = value
118
+ elif key == 'config' and value is not None:
119
+ if merged_config.config is None:
120
+ merged_config.config = {}
121
+ merged_config.config.update(value)
122
+ elif key == 'cache' and value is not None:
123
+ merged_config.cache = value
124
+ elif key == 'executor_cfg' and value is not None:
125
+ if isinstance(value, str):
126
+ merged_config.executor = ExecutorConfig(type=value)
127
+ elif isinstance(value, dict):
128
+ merged_config.executor = ExecutorConfig.from_dict(value)
129
+ elif isinstance(value, ExecutorConfig):
130
+ merged_config.executor = value
131
+ elif key == 'with_adapter_cfg' and value is not None:
132
+ if isinstance(value, dict):
133
+ merged_config.with_adapter = WithAdapterConfig.from_dict(value)
134
+ elif isinstance(value, WithAdapterConfig):
135
+ merged_config.with_adapter = value
136
+ elif key == 'pipeline_adapter_cfg' and value is not None:
137
+ merged_config.pipeline_adapter_cfg = value
138
+ elif key == 'project_adapter_cfg' and value is not None:
139
+ merged_config.project_adapter_cfg = value
140
+ elif key == 'adapter' and value is not None:
141
+ if merged_config.adapter is None:
142
+ merged_config.adapter = {}
143
+ merged_config.adapter.update(value)
144
+ elif key == 'reload' and value is not None:
145
+ merged_config.reload = value
146
+ elif key == 'log_level' and value is not None:
147
+ merged_config.log_level = value
148
+ elif key == 'max_retries' and value is not None:
149
+ merged_config.max_retries = value
150
+ elif key == 'retry_delay' and value is not None:
151
+ merged_config.retry_delay = value
152
+ elif key == 'jitter_factor' and value is not None:
153
+ merged_config.jitter_factor = value
154
+ elif key == 'retry_exceptions' and value is not None:
155
+ merged_config.retry_exceptions = value
156
+ elif key == 'on_success' and value is not None:
157
+ merged_config.on_success = value
158
+ elif key == 'on_failure' and value is not None:
159
+ merged_config.on_failure = value
160
+
161
+ return merged_config
162
+
93
163
  def run(
94
164
  self,
95
- inputs: dict | None = None,
96
- final_vars: list[str] | None = None,
97
- config: dict | None = None,
98
- cache: dict | None = None,
99
- executor_cfg: str | dict | ExecutorConfig | None = None,
100
- with_adapter_cfg: dict | WithAdapterConfig | None = None,
101
- pipeline_adapter_cfg: dict | PipelineAdapterConfig | None = None,
102
- project_adapter_cfg: dict | ProjectAdapterConfig | None = None,
103
- adapter: dict[str, Any] | None = None,
104
- reload: bool = False,
105
- log_level: str | None = None,
106
- max_retries: int | None = None,
107
- retry_delay: float | None = None,
108
- jitter_factor: float | None = None,
109
- retry_exceptions: tuple = (
110
- Exception,
111
- HTTPError,
112
- UnauthorizedException,
113
- ),
114
- on_success: Callable | tuple[Callable, tuple | None, dict | None] | None = None,
115
- on_failure: Callable | tuple[Callable, tuple | None, dict | None] | None = None,
165
+ run_config: RunConfig | None = None,
166
+ **kwargs
116
167
  ) -> dict[str, Any]:
117
168
  """Execute the pipeline with the given parameters.
118
169
 
119
170
  Args:
120
- inputs: Override pipeline input values
121
- final_vars: Specify which output variables to return
122
- config: Configuration for Hamilton pipeline executor
123
- cache: Cache configuration for results
124
- executor_cfg: Execution configuration
125
- with_adapter_cfg: Adapter settings for pipeline execution
126
- pipeline_adapter_cfg: Pipeline-specific adapter configuration
127
- project_adapter_cfg: Project-wide adapter configuration
128
- adapter: Additional Hamilton adapters
129
- reload: Whether to reload the module
130
- log_level: Log level for execution
131
- max_retries: Maximum number of retry attempts
132
- retry_delay: Base delay between retries in seconds
133
- jitter_factor: Factor to apply for jitter
134
- retry_exceptions: Exceptions to catch for retries
135
- on_success: Callback for successful execution
136
- on_failure: Callback for failed execution
171
+ run_config: Run configuration object containing all execution parameters.
172
+ If None, uses the pipeline's default configuration.
173
+ **kwargs: Additional parameters to override or extend the run_config.
137
174
 
138
175
  Returns:
139
176
  The result of executing the pipeline
140
177
  """
141
178
  start_time = dt.datetime.now()
142
179
 
180
+ # Initialize run_config with pipeline defaults if not provided
181
+ run_config = run_config or self.config.run
182
+
183
+ # Merge kwargs into the run_config
184
+ if kwargs:
185
+ run_config = self._merge_run_config_with_kwargs(run_config, kwargs)
186
+
143
187
  # Reload module if requested
144
- if reload:
188
+ if run_config.reload:
145
189
  self._reload_module()
146
190
 
147
- # Set up configuration with defaults from pipeline config
148
- inputs = inputs or self.config.run.inputs or {}
149
- final_vars = final_vars or self.config.run.final_vars or []
150
- config = {**(self.config.run.config or {}), **(config or {})}
151
- cache = cache or self.config.run.cache or {}
152
-
153
191
  # Set up retry configuration
192
+ retry_config = self._setup_retry_config(
193
+ run_config.max_retries, run_config.retry_delay, run_config.jitter_factor, run_config.retry_exceptions
194
+ )
195
+ max_retries = retry_config["max_retries"]
196
+ retry_delay = retry_config["retry_delay"]
197
+ jitter_factor = retry_config["jitter_factor"]
198
+ retry_exceptions = retry_config["retry_exceptions"]
199
+
200
+ # Execute with retry logic
201
+ return self._execute_with_retry(
202
+ run_config=run_config,
203
+ max_retries=max_retries,
204
+ retry_delay=retry_delay,
205
+ jitter_factor=jitter_factor,
206
+ retry_exceptions=retry_exceptions,
207
+ start_time=start_time,
208
+ )
209
+
210
+ def _setup_retry_config(
211
+ self,
212
+ max_retries: int | None,
213
+ retry_delay: float | None,
214
+ jitter_factor: float | None,
215
+ retry_exceptions: tuple | None,
216
+ ) -> dict:
217
+ """Set up retry configuration with defaults and validation."""
154
218
  max_retries = max_retries or self.config.run.max_retries or 0
155
219
  retry_delay = retry_delay or self.config.run.retry_delay or 1.0
156
220
  jitter_factor = jitter_factor or self.config.run.jitter_factor or 0.1
@@ -186,25 +250,30 @@ class Pipeline(msgspec.Struct):
186
250
  elif not retry_exceptions:
187
251
  retry_exceptions = (Exception,)
188
252
 
189
- # Execute with retry logic
253
+ return {
254
+ "max_retries": max_retries,
255
+ "retry_delay": retry_delay,
256
+ "jitter_factor": jitter_factor,
257
+ "retry_exceptions": retry_exceptions,
258
+ }
259
+
260
+ def _execute_with_retry(
261
+ self,
262
+ run_config: RunConfig,
263
+ max_retries: int,
264
+ retry_delay: float,
265
+ jitter_factor: float,
266
+ retry_exceptions: tuple,
267
+ start_time: dt.datetime,
268
+ ) -> dict[str, Any]:
269
+ """Execute pipeline with retry logic."""
190
270
  for attempt in range(max_retries + 1):
191
271
  try:
192
272
  logger.info(
193
273
  f"🚀 Running pipeline '{self.name}' (attempt {attempt + 1}/{max_retries + 1})"
194
274
  )
195
275
 
196
- result = self._execute_pipeline(
197
- inputs=inputs,
198
- final_vars=final_vars,
199
- config=config,
200
- cache=cache,
201
- executor_cfg=executor_cfg,
202
- with_adapter_cfg=with_adapter_cfg,
203
- pipeline_adapter_cfg=pipeline_adapter_cfg,
204
- project_adapter_cfg=project_adapter_cfg,
205
- adapter=adapter,
206
- log_level=log_level,
207
- )
276
+ result = self._execute_pipeline(run_config=run_config)
208
277
 
209
278
  end_time = dt.datetime.now()
210
279
  duration = humanize.naturaldelta(end_time - start_time)
@@ -214,8 +283,8 @@ class Pipeline(msgspec.Struct):
214
283
  )
215
284
 
216
285
  # Execute success callback if provided
217
- if on_success:
218
- self._execute_callback(on_success, result, None)
286
+ if run_config.on_success:
287
+ self._execute_callback(run_config.on_success, result, None)
219
288
 
220
289
  return result
221
290
 
@@ -239,8 +308,8 @@ class Pipeline(msgspec.Struct):
239
308
  )
240
309
 
241
310
  # Execute failure callback if provided
242
- if on_failure:
243
- self._execute_callback(on_failure, None, e)
311
+ if run_config.on_failure:
312
+ self._execute_callback(run_config.on_failure, None, e)
244
313
 
245
314
  raise
246
315
  except Exception as e:
@@ -250,39 +319,39 @@ class Pipeline(msgspec.Struct):
250
319
  logger.error(f"❌ Pipeline '{self.name}' failed in {duration}: {e}")
251
320
 
252
321
  # Execute failure callback if provided
253
- if on_failure:
254
- self._execute_callback(on_failure, None, e)
322
+ if run_config.on_failure:
323
+ self._execute_callback(run_config.on_failure, None, e)
255
324
 
256
325
  raise
257
326
 
258
- def _execute_pipeline(
327
+ def _setup_execution_context(
259
328
  self,
260
- inputs: dict,
261
- final_vars: list[str],
262
- config: dict,
263
- cache: dict,
264
- executor_cfg: str | dict | ExecutorConfig | None,
265
- with_adapter_cfg: dict | WithAdapterConfig | None,
266
- pipeline_adapter_cfg: dict | PipelineAdapterConfig | None,
267
- project_adapter_cfg: dict | ProjectAdapterConfig | None,
268
- adapter: dict[str, Any] | None,
269
- log_level: str | None,
270
- ) -> dict[str, Any]:
271
- """Execute the pipeline with Hamilton."""
329
+ run_config: RunConfig,
330
+ ) -> tuple[executors.BaseExecutor, Callable | None, list]:
331
+ """Set up executor and adapters for pipeline execution."""
272
332
  # Get executor and adapters
273
- executor, shutdown_func = self._get_executor(executor_cfg)
333
+ executor, shutdown_func = self._get_executor(run_config.executor)
274
334
  adapters = self._get_adapters(
275
- with_adapter_cfg=with_adapter_cfg,
276
- pipeline_adapter_cfg=pipeline_adapter_cfg,
277
- project_adapter_cfg=project_adapter_cfg,
278
- adapter=adapter,
335
+ with_adapter_cfg=run_config.with_adapter,
336
+ pipeline_adapter_cfg=run_config.pipeline_adapter_cfg,
337
+ project_adapter_cfg=run_config.project_adapter_cfg,
338
+ adapter=run_config.adapter,
279
339
  )
340
+ return executor, shutdown_func, adapters
341
+
342
+ def _execute_pipeline(
343
+ self,
344
+ run_config: RunConfig,
345
+ ) -> dict[str, Any]:
346
+ """Execute the pipeline with Hamilton."""
347
+ # Set up execution context
348
+ executor, shutdown_func, adapters = self._setup_execution_context(run_config=run_config)
280
349
 
281
350
  try:
282
351
  # Create Hamilton driver
283
352
  dr = (
284
353
  driver.Builder()
285
- .with_config(config)
354
+ .with_config(run_config.config)
286
355
  .with_modules(self.module)
287
356
  .with_adapters(*adapters)
288
357
  .build()
@@ -290,8 +359,8 @@ class Pipeline(msgspec.Struct):
290
359
 
291
360
  # Execute the pipeline
292
361
  result = dr.execute(
293
- final_vars=final_vars,
294
- inputs=inputs,
362
+ final_vars=run_config.final_vars,
363
+ inputs=run_config.inputs,
295
364
  )
296
365
 
297
366
  return result
@@ -566,6 +635,9 @@ class Pipeline(msgspec.Struct):
566
635
  try:
567
636
  importlib.reload(self.module)
568
637
  logger.debug(f"Reloaded module for pipeline '{self.name}'")
569
- except Exception as e:
638
+ except (ImportError, ModuleNotFoundError, AttributeError) as e:
570
639
  logger.error(f"Failed to reload module for pipeline '{self.name}': {e}")
571
640
  raise
641
+ except Exception as e:
642
+ logger.error(f"Unexpected error reloading module for pipeline '{self.name}': {e}")
643
+ raise
@@ -5,6 +5,7 @@ import datetime as dt
5
5
  import os
6
6
  import posixpath
7
7
  import sys
8
+ from dataclasses import dataclass
8
9
  from typing import TYPE_CHECKING, Any, Dict
9
10
 
10
11
  import rich
@@ -16,7 +17,7 @@ from rich.syntax import Syntax
16
17
  from rich.table import Table
17
18
  from rich.tree import Tree
18
19
 
19
- from .. import settings
20
+ from ..settings import CONFIG_DIR, LOG_LEVEL, PIPELINES_DIR
20
21
  # Import necessary config types and utility functions
21
22
  from ..cfg import PipelineConfig, ProjectConfig
22
23
  from ..utils.logging import setup_logging
@@ -47,7 +48,15 @@ class HookType(str, Enum):
47
48
  return self.value
48
49
 
49
50
 
50
- setup_logging(level=settings.LOG_LEVEL)
51
+ @dataclass
52
+ class CachedPipelineData:
53
+ """Container for cached pipeline data."""
54
+ pipeline: "Pipeline"
55
+ config: PipelineConfig
56
+ module: Any
57
+
58
+
59
+ setup_logging(level=LOG_LEVEL)
51
60
 
52
61
 
53
62
  class PipelineRegistry:
@@ -71,16 +80,14 @@ class PipelineRegistry:
71
80
  """
72
81
  self.project_cfg = project_cfg
73
82
  self._fs = fs
74
- self._cfg_dir = settings.CONFIG_DIR
75
- self._pipelines_dir = settings.PIPELINES_DIR
83
+ self._cfg_dir = CONFIG_DIR
84
+ self._pipelines_dir = PIPELINES_DIR
76
85
  self._base_dir = base_dir
77
86
  self._storage_options = storage_options or {}
78
87
  self._console = Console()
79
88
 
80
- # Cache for loaded pipelines
81
- self._pipeline_cache: Dict[str, "Pipeline"] = {}
82
- self._config_cache: Dict[str, PipelineConfig] = {}
83
- self._module_cache: Dict[str, Any] = {}
89
+ # Consolidated cache for pipeline data
90
+ self._pipeline_data_cache: Dict[str, CachedPipelineData] = {}
84
91
 
85
92
  # Ensure module paths are added
86
93
  self._add_modules_path()
@@ -198,9 +205,9 @@ class PipelineRegistry:
198
205
  ValueError: If pipeline configuration is invalid
199
206
  """
200
207
  # Use cache if available and not reloading
201
- if not reload and name in self._pipeline_cache:
208
+ if not reload and name in self._pipeline_data_cache:
202
209
  logger.debug(f"Returning cached pipeline '{name}'")
203
- return self._pipeline_cache[name]
210
+ return self._pipeline_data_cache[name].pipeline
204
211
 
205
212
  logger.debug(f"Creating pipeline instance for '{name}'")
206
213
 
@@ -221,8 +228,12 @@ class PipelineRegistry:
221
228
  project_context=project_context,
222
229
  )
223
230
 
224
- # Cache the pipeline instance
225
- self._pipeline_cache[name] = pipeline
231
+ # Cache the pipeline data
232
+ self._pipeline_data_cache[name] = CachedPipelineData(
233
+ pipeline=pipeline,
234
+ config=config,
235
+ module=module,
236
+ )
226
237
 
227
238
  logger.debug(f"Successfully created pipeline instance for '{name}'")
228
239
  return pipeline
@@ -238,9 +249,9 @@ class PipelineRegistry:
238
249
  PipelineConfig instance
239
250
  """
240
251
  # Use cache if available and not reloading
241
- if not reload and name in self._config_cache:
252
+ if not reload and name in self._pipeline_data_cache:
242
253
  logger.debug(f"Returning cached config for pipeline '{name}'")
243
- return self._config_cache[name]
254
+ return self._pipeline_data_cache[name].config
244
255
 
245
256
  logger.debug(f"Loading configuration for pipeline '{name}'")
246
257
 
@@ -252,8 +263,14 @@ class PipelineRegistry:
252
263
  storage_options=self._storage_options,
253
264
  )
254
265
 
255
- # Cache the configuration
256
- self._config_cache[name] = config
266
+ # Cache the configuration (will be stored in consolidated cache when pipeline is created)
267
+ # For now, we'll create a temporary cache entry if it doesn't exist
268
+ if name not in self._pipeline_data_cache:
269
+ self._pipeline_data_cache[name] = CachedPipelineData(
270
+ pipeline=None, # type: ignore
271
+ config=config,
272
+ module=None, # type: ignore
273
+ )
257
274
 
258
275
  return config
259
276
 
@@ -268,9 +285,11 @@ class PipelineRegistry:
268
285
  Loaded Python module
269
286
  """
270
287
  # Use cache if available and not reloading
271
- if not reload and name in self._module_cache:
272
- logger.debug(f"Returning cached module for pipeline '{name}'")
273
- return self._module_cache[name]
288
+ if not reload and name in self._pipeline_data_cache:
289
+ cached_data = self._pipeline_data_cache[name]
290
+ if cached_data.module is not None:
291
+ logger.debug(f"Returning cached module for pipeline '{name}'")
292
+ return cached_data.module
274
293
 
275
294
  logger.debug(f"Loading module for pipeline '{name}'")
276
295
 
@@ -281,8 +300,16 @@ class PipelineRegistry:
281
300
  # Load the module
282
301
  module = load_module(module_name, reload=reload)
283
302
 
284
- # Cache the module
285
- self._module_cache[name] = module
303
+ # Cache the module (will be stored in consolidated cache when pipeline is created)
304
+ # For now, we'll update the existing cache entry if it exists
305
+ if name in self._pipeline_data_cache:
306
+ self._pipeline_data_cache[name].module = module
307
+ else:
308
+ self._pipeline_data_cache[name] = CachedPipelineData(
309
+ pipeline=None, # type: ignore
310
+ config=None, # type: ignore
311
+ module=module,
312
+ )
286
313
 
287
314
  return module
288
315
 
@@ -295,14 +322,10 @@ class PipelineRegistry:
295
322
  """
296
323
  if name:
297
324
  logger.debug(f"Clearing cache for pipeline '{name}'")
298
- self._pipeline_cache.pop(name, None)
299
- self._config_cache.pop(name, None)
300
- self._module_cache.pop(name, None)
325
+ self._pipeline_data_cache.pop(name, None)
301
326
  else:
302
327
  logger.debug("Clearing entire pipeline cache")
303
- self._pipeline_cache.clear()
304
- self._config_cache.clear()
305
- self._module_cache.clear()
328
+ self._pipeline_data_cache.clear()
306
329
 
307
330
  # --- Methods moved from PipelineManager ---
308
331
  def new(self, name: str, overwrite: bool = False):
@@ -312,7 +335,6 @@ class PipelineRegistry:
312
335
  Args:
313
336
  name (str): The name of the pipeline.
314
337
  overwrite (bool): Whether to overwrite an existing pipeline. Defaults to False.
315
- job_queue_type (str | None): The type of worker to use. Defaults to None.
316
338
 
317
339
  Raises:
318
340
  ValueError: If the configuration or pipeline path does not exist, or if the pipeline already exists.
@@ -333,7 +355,7 @@ class PipelineRegistry:
333
355
 
334
356
  formatted_name = name.replace(".", "/").replace("-", "_")
335
357
  pipeline_file = posixpath.join(self._pipelines_dir, f"{formatted_name}.py")
336
- cfg_file = posixpath.join(self._cfg_dir, "pipelines", f"{formatted_name}.yml")
358
+ cfg_file = posixpath.join(self._cfg_dir, PIPELINES_DIR, f"{formatted_name}.yml")
337
359
 
338
360
  def check_and_handle(path: str):
339
361
  if self._fs.exists(path):
@@ -390,7 +412,7 @@ class PipelineRegistry:
390
412
  deleted_files = []
391
413
  if cfg:
392
414
  pipeline_cfg_path = posixpath.join(
393
- self._cfg_dir, "pipelines", f"{name}.yml"
415
+ self._cfg_dir, PIPELINES_DIR, f"{name}.yml"
394
416
  )
395
417
  if self._fs.exists(pipeline_cfg_path):
396
418
  self._fs.rm(pipeline_cfg_path)
@@ -436,11 +458,16 @@ class PipelineRegistry:
436
458
  """
437
459
  try:
438
460
  return self._fs.glob(posixpath.join(self._pipelines_dir, "*.py"))
439
- except Exception as e:
461
+ except (OSError, PermissionError) as e:
440
462
  logger.error(
441
463
  f"Error accessing pipeline directory {self._pipelines_dir}: {e}"
442
464
  )
443
465
  return []
466
+ except Exception as e:
467
+ logger.error(
468
+ f"Unexpected error accessing pipeline directory {self._pipelines_dir}: {e}"
469
+ )
470
+ return []
444
471
 
445
472
  def _get_names(self) -> list[str]:
446
473
  """
@@ -504,11 +531,16 @@ class PipelineRegistry:
504
531
  except FileNotFoundError:
505
532
  logger.warning(f"Module file not found for pipeline '{name}'")
506
533
  pipeline_summary["module"] = "# Module file not found"
507
- except Exception as e:
534
+ except (OSError, PermissionError, UnicodeDecodeError) as e:
508
535
  logger.error(
509
536
  f"Error reading module file for pipeline '{name}': {e}"
510
537
  )
511
538
  pipeline_summary["module"] = f"# Error reading module file: {e}"
539
+ except Exception as e:
540
+ logger.error(
541
+ f"Unexpected error reading module file for pipeline '{name}': {e}"
542
+ )
543
+ pipeline_summary["module"] = f"# Unexpected error reading module file: {e}"
512
544
 
513
545
  if pipeline_summary: # Only add if cfg or code was requested and found
514
546
  summary["pipelines"][name] = pipeline_summary
@@ -677,9 +709,12 @@ class PipelineRegistry:
677
709
  size = f"{size_bytes / 1024:.1f} KB" if size_bytes else "0.0 KB"
678
710
  except NotImplementedError:
679
711
  size = "N/A"
680
- except Exception as e:
712
+ except (OSError, PermissionError) as e:
681
713
  logger.warning(f"Could not get size for {path}: {e}")
682
714
  size = "Error"
715
+ except Exception as e:
716
+ logger.warning(f"Unexpected error getting size for {path}: {e}")
717
+ size = "Error"
683
718
 
684
719
  pipeline_info.append({
685
720
  "name": name,
@@ -25,8 +25,8 @@ class PipelineVisualizer:
25
25
  self._fs = fs
26
26
  # Attributes like fs and base_dir are accessed via self.project_cfg
27
27
 
28
- def _display_all_function(self, name: str, reload: bool = False):
29
- """Internal helper to load module/config and get the Hamilton DAG object.
28
+ def _get_dag_object(self, name: str, reload: bool = False):
29
+ """Get the Hamilton DAG object for a pipeline.
30
30
 
31
31
  Args:
32
32
  name (str): The name of the pipeline.
@@ -84,7 +84,7 @@ class PipelineVisualizer:
84
84
  >>> visualizer = PipelineVisualizer(project_cfg, fs)
85
85
  >>> visualizer.save_dag(name="example_pipeline", format="png")
86
86
  """
87
- dag = self._display_all_function(name=name, reload=reload)
87
+ dag = self._get_dag_object(name=name, reload=reload)
88
88
 
89
89
  # Use project_cfg attributes for path and filesystem access
90
90
  graph_dir = posixpath.join(self.project_cfg.base_dir, "graphs")
@@ -133,7 +133,7 @@ class PipelineVisualizer:
133
133
  >>> visualizer = PipelineVisualizer(project_cfg, fs)
134
134
  >>> visualizer.show_dag(name="example_pipeline", format="png")
135
135
  """
136
- dag = self._display_all_function(name=name, reload=reload)
136
+ dag = self._get_dag_object(name=name, reload=reload)
137
137
  if raw:
138
138
  return dag
139
139
  # Use view_img utility to display the rendered graph
@@ -1,7 +1,7 @@
1
1
  import warnings
2
2
 
3
3
  warnings.warn(
4
- "The flowerpower.plugins._io module is deprecated. "
4
+ "The flowerpower.plugins.io module is deprecated. "
5
5
  "Please use 'flowerpower-io' instead. Install it with 'pip install flowerpower-io'.",
6
6
  DeprecationWarning,
7
7
  stacklevel=2,
@@ -1,8 +1,6 @@
1
1
  # flake8: noqa
2
- from .backend import *
3
2
  from .executor import *
4
3
  from .general import *
5
4
  from .hamilton import *
6
- from .job_queue import *
7
5
  from .logging import *
8
6
  from .retry import *
@@ -47,14 +47,6 @@ BACKEND_PROPERTIES = {
47
47
  "default_password": None,
48
48
  "is_sqla_type": False,
49
49
  },
50
- "redis": {
51
- "uri_prefix": "redis://",
52
- "default_port": 6379,
53
- "default_host": "localhost",
54
- "default_database": 0,
55
- "default_username": None,
56
- "default_password": None,
57
- },
58
50
  "nats_kv": {
59
51
  "uri_prefix": "nats://",
60
52
  "default_port": 4222,
@@ -73,17 +65,6 @@ BACKEND_PROPERTIES = {
73
65
  },
74
66
  }
75
67
 
76
- # # REDIS ENVIRONMENT VARIABLES
77
- # REDIS_HOST = os.getenv("FP_REDIS_HOST", BACKEND_PROPERTIES["redis"]["default_host"])
78
- # REDIS_PORT = int(
79
- # os.getenv("FP_REDIS_PORT", BACKEND_PROPERTIES["redis"]["default_port"])
80
- # )
81
- # REDIS_DB = int(
82
- # os.getenv("FP_REDIS_DB", BACKEND_PROPERTIES["redis"]["default_database"])
83
- # )
84
- # REDIS_PASSWORD = os.getenv("FP_REDIS_PASSWORD", None)
85
- # REDIS_USERNAME = os.getenv("FP_REDIS_USERNAME", None)
86
- # REDIS_SSL = bool(os.getenv("FP_REDIS_SSL", False))
87
68
 
88
69
  # # POSTGRES ENVIRONMENT VARIABLES
89
70
  # POSTGRES_HOST = os.getenv(
@@ -1,4 +1,4 @@
1
1
  import os
2
2
 
3
3
  # LOGGING
4
- LOG_LEVEL = os.getenv("FP_LOG_LEVEL", "INFO")
4
+ LOG_LEVEL = os.getenv("FP_LOG_LEVEL", "CRITICAL")