FlowerPower 0.21.0__py3-none-any.whl → 0.31.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. flowerpower/cfg/__init__.py +143 -25
  2. flowerpower/cfg/base.py +132 -11
  3. flowerpower/cfg/exceptions.py +53 -0
  4. flowerpower/cfg/pipeline/__init__.py +151 -35
  5. flowerpower/cfg/pipeline/adapter.py +1 -0
  6. flowerpower/cfg/pipeline/builder.py +24 -25
  7. flowerpower/cfg/pipeline/builder_adapter.py +142 -0
  8. flowerpower/cfg/pipeline/builder_executor.py +101 -0
  9. flowerpower/cfg/pipeline/run.py +134 -22
  10. flowerpower/cfg/project/__init__.py +59 -14
  11. flowerpower/cfg/project/adapter.py +6 -0
  12. flowerpower/cli/__init__.py +8 -9
  13. flowerpower/cli/cfg.py +0 -38
  14. flowerpower/cli/pipeline.py +121 -83
  15. flowerpower/cli/utils.py +120 -71
  16. flowerpower/flowerpower.py +94 -120
  17. flowerpower/pipeline/config_manager.py +180 -0
  18. flowerpower/pipeline/executor.py +126 -0
  19. flowerpower/pipeline/lifecycle_manager.py +231 -0
  20. flowerpower/pipeline/manager.py +121 -276
  21. flowerpower/pipeline/pipeline.py +66 -278
  22. flowerpower/pipeline/registry.py +45 -4
  23. flowerpower/utils/__init__.py +19 -0
  24. flowerpower/utils/adapter.py +286 -0
  25. flowerpower/utils/callback.py +73 -67
  26. flowerpower/utils/config.py +306 -0
  27. flowerpower/utils/executor.py +178 -0
  28. flowerpower/utils/filesystem.py +194 -0
  29. flowerpower/utils/misc.py +249 -76
  30. flowerpower/utils/security.py +221 -0
  31. {flowerpower-0.21.0.dist-info → flowerpower-0.31.0.dist-info}/METADATA +1 -13
  32. flowerpower-0.31.0.dist-info/RECORD +53 -0
  33. flowerpower/cfg/pipeline/_schedule.py +0 -32
  34. flowerpower/cli/mqtt.py +0 -168
  35. flowerpower/plugins/mqtt/__init__.py +0 -8
  36. flowerpower-0.21.0.dist-info/RECORD +0 -44
  37. {flowerpower-0.21.0.dist-info → flowerpower-0.31.0.dist-info}/WHEEL +0 -0
  38. {flowerpower-0.21.0.dist-info → flowerpower-0.31.0.dist-info}/entry_points.txt +0 -0
  39. {flowerpower-0.21.0.dist-info → flowerpower-0.31.0.dist-info}/licenses/LICENSE +0 -0
  40. {flowerpower-0.21.0.dist-info → flowerpower-0.31.0.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,8 @@
1
1
  # Import necessary libraries
2
2
  import typer
3
3
  from loguru import logger
4
- from typing_extensions import Annotated
4
+ from typing_extensions import Annotated, Callable, Any
5
+ from typing import Dict, List, Optional, Tuple
5
6
 
6
7
  from ..flowerpower import FlowerPowerProject
7
8
  from ..pipeline.manager import HookType, PipelineManager
@@ -14,6 +15,27 @@ setup_logging()
14
15
  app = typer.Typer(help="Pipeline management commands")
15
16
 
16
17
 
18
+ # Note: common_options decorator removed as it was causing TypeError
19
+ # Options are now defined directly in each function's parameter list
20
+
21
+
22
+ def parse_common_options(
23
+ base_dir: Optional[str] = None,
24
+ storage_options: Optional[str] = None,
25
+ log_level: Optional[str] = None,
26
+ ) -> Tuple[Optional[str], Dict, Optional[str]]:
27
+ """Parse common CLI options and return processed values."""
28
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
29
+ # Ensure storage_options is always a dict, not None or list
30
+ if parsed_storage_options is None:
31
+ parsed_storage_options = {}
32
+ elif not isinstance(parsed_storage_options, dict):
33
+ # This should not happen with param_type="dict", but being safe
34
+ logger.warning(f"Expected dict for storage_options, got {type(parsed_storage_options)}")
35
+ parsed_storage_options = {}
36
+ return base_dir, parsed_storage_options, log_level
37
+
38
+
17
39
  @app.command()
18
40
  def run(
19
41
  name: str = typer.Argument(..., help="Name of the pipeline to run"),
@@ -96,12 +118,36 @@ def run(
96
118
  # Configure automatic retries on failure
97
119
  $ pipeline run my_pipeline --max-retries 3 --retry-delay 2.0 --jitter-factor 0.2
98
120
  """
99
- parsed_inputs = parse_dict_or_list_param(inputs, "dict")
100
- parsed_config = parse_dict_or_list_param(config, "dict")
101
- parsed_cache = parse_dict_or_list_param(cache, "dict")
102
- parsed_final_vars = parse_dict_or_list_param(final_vars, "list")
103
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
104
- parsed_with_adapter = parse_dict_or_list_param(with_adapter, "dict")
121
+ # Parse parameters with proper type handling
122
+ parsed_inputs = parse_dict_or_list_param(inputs, "dict") or {}
123
+ parsed_config = parse_dict_or_list_param(config, "dict") or {}
124
+ parsed_cache = parse_dict_or_list_param(cache, "dict") or {}
125
+ parsed_final_vars = parse_dict_or_list_param(final_vars, "list") or []
126
+ parsed_storage_options = parse_dict_or_list_param(storage_options, "dict") or {}
127
+ parsed_with_adapter = parse_dict_or_list_param(with_adapter, "dict") or {}
128
+
129
+ # Ensure proper types for RunConfig
130
+ if parsed_inputs is not None and not isinstance(parsed_inputs, dict):
131
+ parsed_inputs = {}
132
+ if parsed_config is not None and not isinstance(parsed_config, dict):
133
+ parsed_config = {}
134
+ if parsed_cache is not None and not isinstance(parsed_cache, (dict, bool)):
135
+ parsed_cache = False
136
+ if parsed_final_vars is not None and not isinstance(parsed_final_vars, list):
137
+ parsed_final_vars = []
138
+ if parsed_with_adapter is not None and not isinstance(parsed_with_adapter, dict):
139
+ parsed_with_adapter = {}
140
+
141
+ # Ensure storage_options is a dict for FlowerPowerProject.load
142
+ if parsed_storage_options is not None and not isinstance(parsed_storage_options, dict):
143
+ parsed_storage_options = {}
144
+
145
+ # Create WithAdapterConfig object if needed
146
+ from ..cfg.pipeline.run import WithAdapterConfig
147
+ if isinstance(parsed_with_adapter, dict):
148
+ with_adapter_config = WithAdapterConfig.from_dict(parsed_with_adapter)
149
+ else:
150
+ with_adapter_config = WithAdapterConfig()
105
151
 
106
152
  # Use FlowerPowerProject for better consistency with the new architecture
107
153
  project = FlowerPowerProject.load(
@@ -121,7 +167,7 @@ def run(
121
167
  final_vars=parsed_final_vars,
122
168
  config=parsed_config,
123
169
  cache=parsed_cache,
124
- with_adapter=parsed_with_adapter,
170
+ with_adapter=with_adapter_config, # type: ignore
125
171
  max_retries=max_retries,
126
172
  retry_delay=retry_delay,
127
173
  jitter_factor=jitter_factor,
@@ -133,6 +179,12 @@ def run(
133
179
 
134
180
  _ = project.run(name=name, run_config=run_config)
135
181
  logger.info(f"Pipeline '{name}' finished running.")
182
+ except (FileNotFoundError, PermissionError, OSError) as e:
183
+ logger.error(f"File system error during pipeline execution: {e}")
184
+ raise typer.Exit(1)
185
+ except ValueError as e:
186
+ logger.error(f"Invalid configuration for pipeline execution: {e}")
187
+ raise typer.Exit(1)
136
188
  except Exception as e:
137
189
  logger.error(f"Pipeline execution failed: {e}")
138
190
  raise typer.Exit(1)
@@ -141,13 +193,9 @@ def run(
141
193
  @app.command()
142
194
  def new(
143
195
  name: str = typer.Argument(..., help="Name of the pipeline to create"),
144
- base_dir: str | None = typer.Option(None, help="Base directory for the pipeline"),
145
- storage_options: str | None = typer.Option(
146
- None, help="Storage options as JSON, dict string, or key=value pairs"
147
- ),
148
- log_level: str | None = typer.Option(
149
- None, help="Logging level (debug, info, warning, error, critical)"
150
- ),
196
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
197
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
198
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
151
199
  overwrite: bool = typer.Option(
152
200
  False, help="Overwrite existing pipeline if it exists"
153
201
  ),
@@ -176,10 +224,12 @@ def new(
176
224
  # Create a pipeline in a specific directory
177
225
  $ pipeline new my_new_pipeline --base-dir /path/to/project
178
226
  """
179
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
227
+ base_dir, parsed_storage_options, log_level = parse_common_options(
228
+ base_dir, storage_options, log_level
229
+ )
180
230
  with PipelineManager(
181
231
  base_dir=base_dir,
182
- storage_options=parsed_storage_options or {},
232
+ storage_options=parsed_storage_options,
183
233
  log_level=log_level,
184
234
  ) as manager:
185
235
  manager.new(name=name, overwrite=overwrite)
@@ -189,21 +239,15 @@ def new(
189
239
  @app.command()
190
240
  def delete(
191
241
  name: str = typer.Argument(..., help="Name of the pipeline to delete"),
192
- base_dir: str | None = typer.Option(
193
- None, help="Base directory containing the pipeline"
194
- ),
242
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
243
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
244
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
195
245
  cfg: bool = typer.Option(
196
246
  False, "--cfg", "-c", help="Delete only the configuration file"
197
247
  ),
198
248
  module: bool = typer.Option(
199
249
  False, "--module", "-m", help="Delete only the pipeline module"
200
250
  ),
201
- storage_options: str | None = typer.Option(
202
- None, help="Storage options as JSON, dict string, or key=value pairs"
203
- ),
204
- log_level: str | None = typer.Option(
205
- None, help="Logging level (debug, info, warning, error, critical)"
206
- ),
207
251
  ):
208
252
  """
209
253
  Delete a pipeline's configuration and/or module files.
@@ -229,7 +273,9 @@ def delete(
229
273
  # Delete only the module file
230
274
  $ pipeline delete my_pipeline --module
231
275
  """
232
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
276
+ base_dir, parsed_storage_options, log_level = parse_common_options(
277
+ base_dir, storage_options, log_level
278
+ )
233
279
 
234
280
  # If neither flag is set, default to deleting both
235
281
  delete_cfg = cfg or not (cfg or module)
@@ -237,7 +283,7 @@ def delete(
237
283
 
238
284
  with PipelineManager(
239
285
  base_dir=base_dir,
240
- storage_options=parsed_storage_options or {},
286
+ storage_options=parsed_storage_options,
241
287
  log_level=log_level,
242
288
  ) as manager:
243
289
  manager.delete(name=name, cfg=delete_cfg, module=delete_module)
@@ -257,15 +303,9 @@ def delete(
257
303
  @app.command()
258
304
  def show_dag(
259
305
  name: str = typer.Argument(..., help="Name of the pipeline to visualize"),
260
- base_dir: str | None = typer.Option(
261
- None, help="Base directory containing the pipeline"
262
- ),
263
- storage_options: str | None = typer.Option(
264
- None, help="Storage options as JSON, dict string, or key=value pairs"
265
- ),
266
- log_level: str | None = typer.Option(
267
- None, help="Logging level (debug, info, warning, error, critical)"
268
- ),
306
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
307
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
308
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
269
309
  format: str = typer.Option(
270
310
  "png", help="Output format (e.g., png, svg, pdf). If 'raw', returns object."
271
311
  ),
@@ -293,12 +333,14 @@ def show_dag(
293
333
  # Get raw graphviz object
294
334
  $ pipeline show-dag my_pipeline --format raw
295
335
  """
296
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
336
+ base_dir, parsed_storage_options, log_level = parse_common_options(
337
+ base_dir, storage_options, log_level
338
+ )
297
339
  is_raw = format.lower() == "raw"
298
340
 
299
341
  with PipelineManager(
300
342
  base_dir=base_dir,
301
- storage_options=parsed_storage_options or {},
343
+ storage_options=parsed_storage_options,
302
344
  log_level=log_level,
303
345
  ) as manager:
304
346
  # Manager's show_dag likely handles rendering or returning raw object
@@ -317,6 +359,10 @@ def show_dag(
317
359
  logger.error(
318
360
  "Graphviz is not installed. Cannot show/save DAG. Install with: pip install graphviz"
319
361
  )
362
+ except (FileNotFoundError, PermissionError, OSError) as e:
363
+ logger.error(f"File system error generating DAG for pipeline '{name}': {e}")
364
+ except ValueError as e:
365
+ logger.error(f"Invalid configuration for DAG generation: {e}")
320
366
  except Exception as e:
321
367
  logger.error(f"Failed to generate DAG for pipeline '{name}': {e}")
322
368
 
@@ -324,15 +370,9 @@ def show_dag(
324
370
  @app.command()
325
371
  def save_dag(
326
372
  name: str = typer.Argument(..., help="Name of the pipeline to visualize"),
327
- base_dir: str | None = typer.Option(
328
- None, help="Base directory containing the pipeline"
329
- ),
330
- storage_options: str | None = typer.Option(
331
- None, help="Storage options as JSON, dict string, or key=value pairs"
332
- ),
333
- log_level: str | None = typer.Option(
334
- None, help="Logging level (debug, info, warning, error, critical)"
335
- ),
373
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
374
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
375
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
336
376
  format: str = typer.Option("png", help="Output format (e.g., png, svg, pdf)"),
337
377
  output_path: str | None = typer.Option(
338
378
  None, help="Custom path to save the file (default: <name>.<format>)"
@@ -362,10 +402,12 @@ def save_dag(
362
402
  # Save to a custom location
363
403
  $ pipeline save-dag my_pipeline --output-path ./visualizations/my_graph.png
364
404
  """
365
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
405
+ base_dir, parsed_storage_options, log_level = parse_common_options(
406
+ base_dir, storage_options, log_level
407
+ )
366
408
  with PipelineManager(
367
409
  base_dir=base_dir,
368
- storage_options=parsed_storage_options or {},
410
+ storage_options=parsed_storage_options,
369
411
  log_level=log_level,
370
412
  ) as manager:
371
413
  try:
@@ -377,21 +419,19 @@ def save_dag(
377
419
  logger.error(
378
420
  "Graphviz is not installed. Cannot save DAG. Install with: pip install graphviz"
379
421
  )
422
+ except (FileNotFoundError, PermissionError, OSError) as e:
423
+ logger.error(f"File system error saving DAG for pipeline '{name}': {e}")
424
+ except ValueError as e:
425
+ logger.error(f"Invalid configuration for DAG saving: {e}")
380
426
  except Exception as e:
381
427
  logger.error(f"Failed to save DAG for pipeline '{name}': {e}")
382
428
 
383
429
 
384
430
  @app.command()
385
431
  def show_pipelines(
386
- base_dir: str | None = typer.Option(
387
- None, help="Base directory containing pipelines"
388
- ),
389
- storage_options: str | None = typer.Option(
390
- None, help="Storage options as JSON, dict string, or key=value pairs"
391
- ),
392
- log_level: str | None = typer.Option(
393
- None, help="Logging level (debug, info, warning, error, critical)"
394
- ),
432
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
433
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
434
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
395
435
  format: str = typer.Option("table", help="Output format (table, json, yaml)"),
396
436
  ):
397
437
  """
@@ -416,10 +456,12 @@ def show_pipelines(
416
456
  # List pipelines from a specific directory
417
457
  $ pipeline show-pipelines --base-dir /path/to/project
418
458
  """
419
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
459
+ base_dir, parsed_storage_options, log_level = parse_common_options(
460
+ base_dir, storage_options, log_level
461
+ )
420
462
  with PipelineManager(
421
463
  base_dir=base_dir,
422
- storage_options=parsed_storage_options or {},
464
+ storage_options=parsed_storage_options,
423
465
  log_level=log_level,
424
466
  ) as manager:
425
467
  manager.show_pipelines(format=format)
@@ -433,15 +475,9 @@ def show_summary(
433
475
  cfg: bool = typer.Option(True, help="Include configuration details"),
434
476
  code: bool = typer.Option(True, help="Include code/module details"),
435
477
  project: bool = typer.Option(True, help="Include project context"),
436
- base_dir: str | None = typer.Option(
437
- None, help="Base directory containing pipelines"
438
- ),
439
- storage_options: str | None = typer.Option(
440
- None, help="Storage options as JSON, dict string, or key=value pairs"
441
- ),
442
- log_level: str | None = typer.Option(
443
- None, help="Logging level (debug, info, warning, error, critical)"
444
- ),
478
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
479
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
480
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
445
481
  to_html: bool = typer.Option(False, help="Output summary as HTML"),
446
482
  to_svg: bool = typer.Option(False, help="Output summary as SVG (if applicable)"),
447
483
  output_file: str | None = typer.Option(
@@ -480,10 +516,12 @@ def show_summary(
480
516
  # Generate HTML report
481
517
  $ pipeline show-summary --to-html --output-file pipeline_report.html
482
518
  """
483
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
519
+ base_dir, parsed_storage_options, log_level = parse_common_options(
520
+ base_dir, storage_options, log_level
521
+ )
484
522
  with PipelineManager(
485
523
  base_dir=base_dir,
486
- storage_options=parsed_storage_options or {},
524
+ storage_options=parsed_storage_options,
487
525
  log_level=log_level,
488
526
  ) as manager:
489
527
  # Assumes manager.show_summary handles printing/returning formatted output
@@ -521,15 +559,9 @@ def add_hook(
521
559
  to: str | None = typer.Option(
522
560
  None, help="Target node name or tag (required for node hooks)"
523
561
  ),
524
- base_dir: str | None = typer.Option(
525
- None, help="Base directory containing the pipeline"
526
- ),
527
- storage_options: str | None = typer.Option(
528
- None, help="Storage options as JSON, dict string, or key=value pairs"
529
- ),
530
- log_level: str | None = typer.Option(
531
- None, help="Logging level (debug, info, warning, error, critical)"
532
- ),
562
+ base_dir: str | None = typer.Option(None, "--base-dir", "-d", help="Base directory for the pipeline"),
563
+ storage_options: str | None = typer.Option(None, "--storage-options", "-s", help="Storage options as JSON, dict string, or key=value pairs"),
564
+ log_level: str | None = typer.Option(None, "--log-level", help="Logging level (debug, info, warning, error, critical)"),
533
565
  ):
534
566
  """
535
567
  Add a hook to a pipeline configuration.
@@ -560,7 +592,9 @@ def add_hook(
560
592
  # Add a hook for all nodes with a specific tag
561
593
  $ pipeline add-hook my_pipeline --function log_metrics --type NODE_POST_EXECUTE --to @metrics
562
594
  """
563
- parsed_storage_options = parse_dict_or_list_param(storage_options, "dict")
595
+ base_dir, parsed_storage_options, log_level = parse_common_options(
596
+ base_dir, storage_options, log_level
597
+ )
564
598
 
565
599
  # Validate 'to' argument for node hooks
566
600
  if type in (HookType.NODE_PRE_EXECUTE, HookType.NODE_POST_EXECUTE) and not to:
@@ -570,7 +604,7 @@ def add_hook(
570
604
 
571
605
  with PipelineManager(
572
606
  base_dir=base_dir,
573
- storage_options=parsed_storage_options or {},
607
+ storage_options=parsed_storage_options,
574
608
  log_level=log_level,
575
609
  ) as manager:
576
610
  try:
@@ -583,5 +617,9 @@ def add_hook(
583
617
  logger.info(
584
618
  f"Hook '{function_name}' added to pipeline '{name}' (type: {type.value})."
585
619
  )
620
+ except (FileNotFoundError, PermissionError, OSError) as e:
621
+ logger.error(f"File system error adding hook to pipeline '{name}': {e}")
622
+ except ValueError as e:
623
+ logger.error(f"Invalid configuration for hook addition: {e}")
586
624
  except Exception as e:
587
625
  logger.error(f"Failed to add hook to pipeline '{name}': {e}")
flowerpower/cli/utils.py CHANGED
@@ -1,9 +1,10 @@
1
1
  import ast
2
2
  import importlib
3
+ import importlib.util
3
4
  import json
5
+ import os
4
6
  import posixpath
5
7
  import re
6
- import sys
7
8
  from typing import Callable
8
9
 
9
10
  from loguru import logger
@@ -15,12 +16,70 @@ from ..utils.logging import setup_logging
15
16
  setup_logging()
16
17
 
17
18
 
18
- # Parse additional parameters
19
- def parse_param_dict(param_str: str | None) -> dict:
20
- """Helper to parse parameter dictionaries"""
21
- if not param_str:
22
- return {}
23
- return dict(param.split("=") for param in param_str.split(","))
19
+ def convert_string_booleans(obj):
20
+ """Convert string 'true'/'false' to boolean values recursively."""
21
+ if isinstance(obj, dict):
22
+ return {k: convert_string_booleans(v) for k, v in obj.items()}
23
+ elif isinstance(obj, list):
24
+ return [convert_string_booleans(item) for item in obj]
25
+ elif isinstance(obj, str):
26
+ if obj.lower() == "true":
27
+ return True
28
+ elif obj.lower() == "false":
29
+ return False
30
+ return obj
31
+
32
+
33
+ def _parse_json(value: str):
34
+ """Parse value as JSON string."""
35
+ try:
36
+ return json.loads(value)
37
+ except json.JSONDecodeError:
38
+ return None
39
+
40
+
41
+ def _parse_python_literal(value: str, param_type: str):
42
+ """Parse value as Python literal (dict/list)."""
43
+ try:
44
+ parsed = ast.literal_eval(value)
45
+
46
+ # Validate type
47
+ if param_type == "dict" and not isinstance(parsed, dict):
48
+ raise ValueError(f"Expected dict, got {type(parsed)}")
49
+ elif param_type == "list" and not isinstance(parsed, list):
50
+ raise ValueError(f"Expected list, got {type(parsed)}")
51
+
52
+ return parsed
53
+ except (SyntaxError, ValueError):
54
+ return None
55
+
56
+
57
+ def _parse_key_value_pairs(value: str):
58
+ """Parse value as comma-separated key=value pairs."""
59
+ if "=" not in value:
60
+ return None
61
+
62
+ try:
63
+ return dict(
64
+ pair.split("=", 1) for pair in value.split(",") if pair.strip()
65
+ )
66
+ except ValueError:
67
+ return None
68
+
69
+
70
+ def _parse_comma_separated_list(value: str):
71
+ """Parse value as comma-separated list with optional quotes."""
72
+ # Remove surrounding square brackets and whitespace
73
+ value = value.strip()
74
+ if value.startswith("[") and value.endswith("]"):
75
+ value = value[1:-1].strip()
76
+
77
+ # Parse list-like string with or without quotes
78
+ # This regex handles: a,b | 'a','b' | "a","b" | a, b | 'a', 'b'
79
+ list_items = re.findall(r"['\"]?(.*?)['\"]?(?=\s*,|\s*$)", value)
80
+
81
+ # Remove any empty strings and strip whitespace
82
+ return [item.strip() for item in list_items if item.strip()]
24
83
 
25
84
 
26
85
  def parse_dict_or_list_param(
@@ -43,65 +102,34 @@ def parse_dict_or_list_param(
43
102
  Returns:
44
103
  dict | list | None: Parsed parameter or None if parsing fails
45
104
  """
46
-
47
- def convert_string_booleans(obj):
48
- if isinstance(obj, dict):
49
- return {k: convert_string_booleans(v) for k, v in obj.items()}
50
- elif isinstance(obj, list):
51
- return [convert_string_booleans(item) for item in obj]
52
- elif isinstance(obj, str):
53
- if obj.lower() == "true":
54
- return True
55
- elif obj.lower() == "false":
56
- return False
57
- return obj
58
-
59
105
  if value is None:
60
106
  return None
61
107
 
62
- try:
63
- # Try parsing as JSON first
64
- parsed = json.loads(value)
108
+ # Try parsing as JSON first
109
+ parsed = _parse_json(value)
110
+ if parsed is not None:
65
111
  return convert_string_booleans(parsed)
66
- except json.JSONDecodeError:
67
- try:
68
- # Try parsing as Python literal
69
- parsed = ast.literal_eval(value)
70
-
71
- # Validate type
72
- if param_type == "dict" and not isinstance(parsed, dict):
73
- raise ValueError(f"Expected dict, got {type(parsed)}")
74
- elif param_type == "list" and not isinstance(parsed, list):
75
- raise ValueError(f"Expected list, got {type(parsed)}")
76
-
112
+
113
+ # Try parsing as Python literal
114
+ parsed = _parse_python_literal(value, param_type)
115
+ if parsed is not None:
116
+ return convert_string_booleans(parsed)
117
+
118
+ # For dicts, try parsing as comma-separated key=value pairs
119
+ if param_type == "dict":
120
+ parsed = _parse_key_value_pairs(value)
121
+ if parsed is not None:
77
122
  return convert_string_booleans(parsed)
78
- except (SyntaxError, ValueError):
79
- # For dicts, try parsing as comma-separated key=value pairs
80
- if param_type == "dict" and "=" in value:
81
- parsed = dict(
82
- pair.split("=", 1) for pair in value.split(",") if pair.strip()
83
- )
84
- return convert_string_booleans(parsed)
85
-
86
- # For lists, try multiple parsing strategies
87
- if param_type == "list":
88
- # Remove surrounding square brackets and whitespace
89
- value = value.strip()
90
- if value.startswith("[") and value.endswith("]"):
91
- value = value[1:-1].strip()
92
-
93
- # Parse list-like string with or without quotes
94
- # This regex handles: a,b | 'a','b' | "a","b" | a, b | 'a', 'b'
95
- list_items = re.findall(r"['\"]?(.*?)['\"]?(?=\s*,|\s*$)", value)
96
-
97
- # Remove any empty strings and strip whitespace
98
- parsed = [item.strip() for item in list_items if item.strip()]
99
-
100
- return convert_string_booleans(parsed)
101
-
102
- # If all parsing fails, log warning and return None
103
- logger.warning(f"Could not parse {param_type} parameter: {value}")
104
- return None
123
+
124
+ # For lists, try parsing as comma-separated values
125
+ if param_type == "list":
126
+ parsed = _parse_comma_separated_list(value)
127
+ if parsed:
128
+ return convert_string_booleans(parsed)
129
+
130
+ # If all parsing fails, log warning and return None
131
+ logger.warning(f"Could not parse {param_type} parameter: {value}")
132
+ return None
105
133
 
106
134
 
107
135
  def load_hook(
@@ -132,17 +160,38 @@ def load_hook(
132
160
  elif len(path_segments) == 3:
133
161
  # If the function path is in the format 'package.[subpackage.]module_name.function_name'
134
162
  module_path, module_name, function_name = path_segments
135
-
136
- logger.debug(
137
- posixpath.join(
138
- pm._fs.path, "hooks", pipeline_name, module_path.replace(".", "/")
139
- )
140
- )
141
- sys.path.append(
142
- posixpath.join(
143
- pm._fs.path, "hooks", pipeline_name, module_path.replace(".", "/")
163
+ else:
164
+ raise ValueError(
165
+ f"Invalid function_path format: {function_path}. "
166
+ "Expected 'module_name.function_name' or 'package.module_name.function_name'"
144
167
  )
168
+
169
+ # Construct the full path to the module file
170
+ hooks_dir = posixpath.join(
171
+ pm._fs.path, "hooks", pipeline_name, module_path.replace(".", "/")
145
172
  )
146
- hook_module = importlib.import_module(module_name)
173
+ module_file_path = os.path.join(hooks_dir, f"{module_name}.py")
174
+
175
+ logger.debug(f"Loading hook module from: {module_file_path}")
176
+
177
+ # Validate that the module file exists
178
+ if not os.path.exists(module_file_path):
179
+ raise FileNotFoundError(f"Hook module not found: {module_file_path}")
180
+
181
+ # Use importlib.util to safely load the module without modifying sys.path
182
+ spec = importlib.util.spec_from_file_location(module_name, module_file_path)
183
+ if spec is None or spec.loader is None:
184
+ raise ImportError(f"Could not load module spec for {module_name} from {module_file_path}")
185
+
186
+ hook_module = importlib.util.module_from_spec(spec)
187
+ spec.loader.exec_module(hook_module)
188
+
189
+ # Get the function from the loaded module
190
+ if not hasattr(hook_module, function_name):
191
+ raise AttributeError(f"Function {function_name} not found in module {module_name}")
192
+
147
193
  hook_function = getattr(hook_module, function_name)
194
+ if not callable(hook_function):
195
+ raise TypeError(f"{function_name} is not callable in module {module_name}")
196
+
148
197
  return hook_function