mcp-souschef 2.8.0__py3-none-any.whl → 3.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.2.0.dist-info}/METADATA +159 -384
- mcp_souschef-3.2.0.dist-info/RECORD +47 -0
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.2.0.dist-info}/WHEEL +1 -1
- souschef/__init__.py +31 -7
- souschef/assessment.py +1451 -105
- souschef/ci/common.py +126 -0
- souschef/ci/github_actions.py +3 -92
- souschef/ci/gitlab_ci.py +2 -52
- souschef/ci/jenkins_pipeline.py +2 -59
- souschef/cli.py +149 -16
- souschef/converters/playbook.py +378 -138
- souschef/converters/resource.py +12 -11
- souschef/converters/template.py +177 -0
- souschef/core/__init__.py +6 -1
- souschef/core/metrics.py +313 -0
- souschef/core/path_utils.py +233 -19
- souschef/core/validation.py +53 -0
- souschef/deployment.py +71 -12
- souschef/generators/__init__.py +13 -0
- souschef/generators/repo.py +695 -0
- souschef/parsers/attributes.py +1 -1
- souschef/parsers/habitat.py +1 -1
- souschef/parsers/inspec.py +25 -2
- souschef/parsers/metadata.py +5 -3
- souschef/parsers/recipe.py +1 -1
- souschef/parsers/resource.py +1 -1
- souschef/parsers/template.py +1 -1
- souschef/server.py +1039 -121
- souschef/ui/app.py +486 -374
- souschef/ui/pages/ai_settings.py +74 -8
- souschef/ui/pages/cookbook_analysis.py +3216 -373
- souschef/ui/pages/validation_reports.py +274 -0
- mcp_souschef-2.8.0.dist-info/RECORD +0 -42
- souschef/converters/cookbook_specific.py.backup +0 -109
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.2.0.dist-info}/licenses/LICENSE +0 -0
souschef/assessment.py
CHANGED
|
@@ -6,18 +6,67 @@ generating migration plans, analyzing dependencies, and validating conversions.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
|
+
import os
|
|
9
10
|
import re
|
|
10
11
|
from pathlib import Path
|
|
11
12
|
from typing import Any
|
|
12
13
|
|
|
13
|
-
from souschef.core import
|
|
14
|
+
from souschef.core import (
|
|
15
|
+
METADATA_FILENAME,
|
|
16
|
+
_ensure_within_base_path,
|
|
17
|
+
_normalize_path,
|
|
18
|
+
_safe_join,
|
|
19
|
+
)
|
|
14
20
|
from souschef.core.errors import format_error_with_context
|
|
21
|
+
from souschef.core.metrics import (
|
|
22
|
+
ComplexityLevel,
|
|
23
|
+
EffortMetrics,
|
|
24
|
+
categorize_complexity,
|
|
25
|
+
estimate_effort_for_complexity,
|
|
26
|
+
)
|
|
27
|
+
from souschef.core.path_utils import _validated_candidate, safe_glob
|
|
15
28
|
from souschef.core.validation import (
|
|
16
29
|
ValidationEngine,
|
|
17
30
|
ValidationLevel,
|
|
18
31
|
ValidationResult,
|
|
32
|
+
_format_validation_results_summary,
|
|
19
33
|
)
|
|
20
34
|
|
|
35
|
+
# Optional AI provider imports
|
|
36
|
+
try:
|
|
37
|
+
import requests # type: ignore[import-untyped]
|
|
38
|
+
except ImportError:
|
|
39
|
+
requests = None
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from ibm_watsonx_ai import APIClient # type: ignore[import-not-found]
|
|
43
|
+
except ImportError:
|
|
44
|
+
APIClient = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _normalize_cookbook_root(cookbook_path: Path | str) -> Path:
|
|
48
|
+
"""
|
|
49
|
+
Normalise cookbook paths.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
cookbook_path: User-provided cookbook path.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
A resolved Path.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
ValueError: If the path cannot be normalised.
|
|
59
|
+
|
|
60
|
+
"""
|
|
61
|
+
# Normalise the path (resolves symlinks, expands ~, etc.)
|
|
62
|
+
# Safety for accessing files within this cookbook is enforced per-operation
|
|
63
|
+
# using _ensure_within_base_path with the cookbook dir as the base
|
|
64
|
+
return _normalize_path(cookbook_path)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# Optimised patterns to avoid catastrophic backtracking in resource parsing
|
|
68
|
+
RESOURCE_BLOCK_PATTERN = re.compile(r"\w{1,100}\s+['\"]([^'\"\r\n]{0,200})['\"]\s+do")
|
|
69
|
+
|
|
21
70
|
|
|
22
71
|
def assess_chef_migration_complexity(
|
|
23
72
|
cookbook_paths: str,
|
|
@@ -127,8 +176,9 @@ def parse_chef_migration_assessment(
|
|
|
127
176
|
"recommendations": recommendations,
|
|
128
177
|
"roadmap": roadmap,
|
|
129
178
|
"complexity": _get_overall_complexity_level(overall_metrics),
|
|
130
|
-
"estimated_hours":
|
|
131
|
-
|
|
179
|
+
"estimated_hours": EffortMetrics(
|
|
180
|
+
overall_metrics.get("estimated_effort_days", 0)
|
|
181
|
+
).estimated_hours,
|
|
132
182
|
}
|
|
133
183
|
|
|
134
184
|
except Exception as e:
|
|
@@ -202,7 +252,6 @@ def _parse_and_assess_cookbooks(cookbook_paths: str) -> tuple[list, str | None]:
|
|
|
202
252
|
|
|
203
253
|
cookbook_assessments = []
|
|
204
254
|
for cookbook_path in valid_paths:
|
|
205
|
-
# deepcode ignore PT: path normalized via _normalize_path
|
|
206
255
|
assessment = _assess_single_cookbook(cookbook_path)
|
|
207
256
|
cookbook_assessments.append(assessment)
|
|
208
257
|
|
|
@@ -305,7 +354,8 @@ def analyse_cookbook_dependencies(
|
|
|
305
354
|
dependency_depth: Analysis depth (direct, transitive, full)
|
|
306
355
|
|
|
307
356
|
Returns:
|
|
308
|
-
Dependency analysis with migration order recommendations
|
|
357
|
+
Dependency analysis with migration order recommendations.
|
|
358
|
+
|
|
309
359
|
|
|
310
360
|
"""
|
|
311
361
|
try:
|
|
@@ -317,15 +367,21 @@ def analyse_cookbook_dependencies(
|
|
|
317
367
|
f"Suggestion: Use one of {', '.join(valid_depths)}"
|
|
318
368
|
)
|
|
319
369
|
|
|
320
|
-
|
|
321
|
-
|
|
370
|
+
# Validate and normalise user-provided path
|
|
371
|
+
# Containment is enforced at filesystem operation level
|
|
372
|
+
try:
|
|
373
|
+
normalized_input: Path = _normalize_path(cookbook_path)
|
|
374
|
+
except (ValueError, OSError) as e:
|
|
375
|
+
return f"Error: Invalid cookbook path '{cookbook_path}': {e}"
|
|
376
|
+
|
|
377
|
+
if not normalized_input.exists():
|
|
322
378
|
return (
|
|
323
379
|
f"Error: Cookbook path not found: {cookbook_path}\n\n"
|
|
324
380
|
"Suggestion: Check that the path exists and points to a cookbook directory"
|
|
325
381
|
)
|
|
326
382
|
|
|
327
|
-
# Analyze dependencies
|
|
328
|
-
dependency_analysis = _analyse_cookbook_dependencies_detailed(
|
|
383
|
+
# Analyze dependencies using normalized path
|
|
384
|
+
dependency_analysis = _analyse_cookbook_dependencies_detailed(normalized_input)
|
|
329
385
|
|
|
330
386
|
# Determine migration order
|
|
331
387
|
migration_order = _determine_migration_order(dependency_analysis)
|
|
@@ -334,7 +390,7 @@ def analyse_cookbook_dependencies(
|
|
|
334
390
|
circular_deps = _identify_circular_dependencies(dependency_analysis)
|
|
335
391
|
|
|
336
392
|
return f"""# Cookbook Dependency Analysis
|
|
337
|
-
# Cookbook: {
|
|
393
|
+
# Cookbook: {normalized_input.name}
|
|
338
394
|
# Analysis Depth: {dependency_depth}
|
|
339
395
|
|
|
340
396
|
## Dependency Overview:
|
|
@@ -534,7 +590,9 @@ def _parse_cookbook_paths(cookbook_paths: str) -> list[Any]:
|
|
|
534
590
|
List of valid Path objects (may be empty)
|
|
535
591
|
|
|
536
592
|
"""
|
|
537
|
-
paths = [
|
|
593
|
+
paths = [
|
|
594
|
+
_normalize_cookbook_root(path.strip()) for path in cookbook_paths.split(",")
|
|
595
|
+
]
|
|
538
596
|
valid_paths = [p for p in paths if p.exists()]
|
|
539
597
|
return valid_paths
|
|
540
598
|
|
|
@@ -562,7 +620,6 @@ def _analyse_cookbook_metrics(
|
|
|
562
620
|
}
|
|
563
621
|
|
|
564
622
|
for cookbook_path in valid_paths:
|
|
565
|
-
# deepcode ignore PT: path normalized via _normalize_path
|
|
566
623
|
assessment = _assess_single_cookbook(cookbook_path)
|
|
567
624
|
cookbook_assessments.append(assessment)
|
|
568
625
|
|
|
@@ -632,62 +689,438 @@ def _format_assessment_report(
|
|
|
632
689
|
"""
|
|
633
690
|
|
|
634
691
|
|
|
635
|
-
def _count_cookbook_artifacts(cookbook_path) -> dict[str, int]:
|
|
636
|
-
"""Count
|
|
637
|
-
|
|
638
|
-
|
|
692
|
+
def _count_cookbook_artifacts(cookbook_path: Path) -> dict[str, int]: # noqa: C901
|
|
693
|
+
"""Count comprehensive cookbook artifacts including all Chef components."""
|
|
694
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
695
|
+
base = cookbook_path
|
|
696
|
+
|
|
697
|
+
# Helper function to safely glob within a directory
|
|
698
|
+
def _glob_safe(directory: Path, pattern: str) -> int:
|
|
699
|
+
"""Count files matching a glob pattern within a directory."""
|
|
700
|
+
if not directory.exists() or not directory.is_dir():
|
|
701
|
+
return 0
|
|
702
|
+
try:
|
|
703
|
+
return len(list(directory.glob(pattern)))
|
|
704
|
+
except (OSError, ValueError):
|
|
705
|
+
return 0
|
|
706
|
+
|
|
707
|
+
# Helper function to check existence safely
|
|
708
|
+
def _exists_safe(path: Path) -> bool:
|
|
709
|
+
"""Check if a path exists."""
|
|
710
|
+
try:
|
|
711
|
+
return path.exists()
|
|
712
|
+
except (OSError, ValueError):
|
|
713
|
+
return False
|
|
714
|
+
|
|
715
|
+
# All paths are safe-joined to the validated base
|
|
716
|
+
recipes_dir: Path = _safe_join(base, "recipes")
|
|
717
|
+
recipe_count: int = _glob_safe(recipes_dir, "*.rb")
|
|
718
|
+
|
|
719
|
+
templates_dir: Path = _safe_join(base, "templates")
|
|
720
|
+
template_count: int = _glob_safe(templates_dir, "**/*.erb")
|
|
721
|
+
|
|
722
|
+
files_dir: Path = _safe_join(base, "files")
|
|
723
|
+
file_count: int = _glob_safe(files_dir, "**/*")
|
|
724
|
+
|
|
725
|
+
attributes_dir: Path = _safe_join(base, "attributes")
|
|
726
|
+
attributes_count: int = _glob_safe(attributes_dir, "*.rb")
|
|
639
727
|
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
728
|
+
libraries_dir: Path = _safe_join(base, "libraries")
|
|
729
|
+
libraries_count: int = _glob_safe(libraries_dir, "*.rb")
|
|
730
|
+
|
|
731
|
+
definitions_dir: Path = _safe_join(base, "definitions")
|
|
732
|
+
definitions_count: int = _glob_safe(definitions_dir, "*.rb")
|
|
733
|
+
|
|
734
|
+
resources_dir: Path = _safe_join(base, "resources")
|
|
735
|
+
resources_count: int = _glob_safe(resources_dir, "*.rb")
|
|
736
|
+
|
|
737
|
+
providers_dir: Path = _safe_join(base, "providers")
|
|
738
|
+
providers_count: int = _glob_safe(providers_dir, "*.rb")
|
|
739
|
+
|
|
740
|
+
berksfile: Path = _safe_join(base, "Berksfile")
|
|
741
|
+
has_berksfile: bool = _exists_safe(berksfile)
|
|
742
|
+
|
|
743
|
+
chefignore: Path = _safe_join(base, "chefignore")
|
|
744
|
+
has_chefignore: bool = _exists_safe(chefignore)
|
|
745
|
+
|
|
746
|
+
thorfile: Path = _safe_join(base, "Thorfile")
|
|
747
|
+
has_thorfile: bool = _exists_safe(thorfile)
|
|
748
|
+
|
|
749
|
+
kitchen_yml: Path = _safe_join(base, ".kitchen.yml")
|
|
750
|
+
kitchen_yaml: Path = _safe_join(base, "kitchen.yml")
|
|
751
|
+
has_kitchen_yml: bool = _exists_safe(kitchen_yml) or _exists_safe(kitchen_yaml)
|
|
752
|
+
|
|
753
|
+
test_dir: Path = _safe_join(base, "test")
|
|
754
|
+
spec_dir: Path = _safe_join(base, "spec")
|
|
755
|
+
has_test_dir: bool = _exists_safe(test_dir) or _exists_safe(spec_dir)
|
|
756
|
+
|
|
757
|
+
libraries_dir = cookbook_path / "libraries"
|
|
758
|
+
libraries_count = (
|
|
759
|
+
len(list(libraries_dir.glob("*.rb"))) if libraries_dir.exists() else 0
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
definitions_dir = cookbook_path / "definitions"
|
|
763
|
+
definitions_count = (
|
|
764
|
+
len(list(definitions_dir.glob("*.rb"))) if definitions_dir.exists() else 0
|
|
644
765
|
)
|
|
645
766
|
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
else 0
|
|
767
|
+
resources_dir = cookbook_path / "resources"
|
|
768
|
+
resources_count = (
|
|
769
|
+
len(list(resources_dir.glob("*.rb"))) if resources_dir.exists() else 0
|
|
650
770
|
)
|
|
651
771
|
|
|
772
|
+
providers_dir = cookbook_path / "providers"
|
|
773
|
+
providers_count = (
|
|
774
|
+
len(list(providers_dir.glob("*.rb"))) if providers_dir.exists() else 0
|
|
775
|
+
)
|
|
776
|
+
|
|
777
|
+
# Configuration files
|
|
778
|
+
has_berksfile = (cookbook_path / "Berksfile").exists()
|
|
779
|
+
has_chefignore = (cookbook_path / "chefignore").exists()
|
|
780
|
+
has_thorfile = (cookbook_path / "Thorfile").exists()
|
|
781
|
+
has_kitchen_yml = (cookbook_path / ".kitchen.yml").exists() or (
|
|
782
|
+
cookbook_path / "kitchen.yml"
|
|
783
|
+
).exists()
|
|
784
|
+
has_test_dir = (cookbook_path / "test").exists() or (
|
|
785
|
+
cookbook_path / "spec"
|
|
786
|
+
).exists()
|
|
787
|
+
|
|
652
788
|
return {
|
|
653
789
|
"recipe_count": recipe_count,
|
|
654
|
-
"
|
|
655
|
-
"
|
|
790
|
+
"template_count": template_count,
|
|
791
|
+
"file_count": file_count,
|
|
792
|
+
"attributes_count": attributes_count,
|
|
793
|
+
"libraries_count": libraries_count,
|
|
794
|
+
"definitions_count": definitions_count,
|
|
795
|
+
"resources_count": resources_count,
|
|
796
|
+
"providers_count": providers_count,
|
|
797
|
+
"has_berksfile": int(has_berksfile),
|
|
798
|
+
"has_chefignore": int(has_chefignore),
|
|
799
|
+
"has_thorfile": int(has_thorfile),
|
|
800
|
+
"has_kitchen_yml": int(has_kitchen_yml),
|
|
801
|
+
"has_test_dir": int(has_test_dir),
|
|
656
802
|
}
|
|
657
803
|
|
|
658
804
|
|
|
659
|
-
def _analyse_recipe_complexity(cookbook_path) -> dict[str, int]:
|
|
660
|
-
"""Analyse recipe files for resource counts, Ruby blocks, and custom resources."""
|
|
661
|
-
|
|
805
|
+
def _analyse_recipe_complexity(cookbook_path: Path) -> dict[str, int]:
|
|
806
|
+
"""Analyse recipe files and other cookbook components for resource counts, Ruby blocks, and custom resources."""
|
|
807
|
+
# Note: cookbook_path is expected to be pre-validated at function entry points
|
|
808
|
+
# Do not call _normalize_cookbook_root here as it's already a validated Path
|
|
809
|
+
|
|
662
810
|
resource_count = 0
|
|
663
811
|
custom_resources = 0
|
|
664
812
|
ruby_blocks = 0
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
)
|
|
677
|
-
custom_resources += len(
|
|
678
|
-
re.findall(
|
|
679
|
-
r"custom_resource|provides|use_inline_resources", content
|
|
680
|
-
)
|
|
681
|
-
)
|
|
682
|
-
resource_count += resources
|
|
813
|
+
erb_templates = 0
|
|
814
|
+
attribute_complexity = 0
|
|
815
|
+
library_complexity = 0
|
|
816
|
+
definition_count = 0
|
|
817
|
+
|
|
818
|
+
# Analyze different cookbook components
|
|
819
|
+
resource_count, ruby_blocks, custom_resources = _analyze_recipes(cookbook_path)
|
|
820
|
+
attribute_complexity = _analyze_attributes(cookbook_path)
|
|
821
|
+
erb_templates = _analyze_templates(cookbook_path)
|
|
822
|
+
library_complexity = _analyze_libraries(cookbook_path)
|
|
823
|
+
definition_count = _count_definitions(cookbook_path)
|
|
683
824
|
|
|
684
825
|
return {
|
|
685
826
|
"resource_count": resource_count,
|
|
686
827
|
"custom_resources": custom_resources,
|
|
687
828
|
"ruby_blocks": ruby_blocks,
|
|
829
|
+
"erb_templates": erb_templates,
|
|
830
|
+
"attribute_complexity": attribute_complexity,
|
|
831
|
+
"library_complexity": library_complexity,
|
|
832
|
+
"definition_count": definition_count,
|
|
688
833
|
}
|
|
689
834
|
|
|
690
835
|
|
|
836
|
+
def _analyze_recipes(cookbook_path: Path) -> tuple[int, int, int]:
|
|
837
|
+
"""Analyze recipe files for resources, ruby blocks, and custom resources."""
|
|
838
|
+
resource_count = 0
|
|
839
|
+
ruby_blocks = 0
|
|
840
|
+
custom_resources = 0
|
|
841
|
+
|
|
842
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
843
|
+
# Use it directly with _safe_join to access recipes directory
|
|
844
|
+
recipes_dir: Path = _safe_join(cookbook_path, "recipes")
|
|
845
|
+
try:
|
|
846
|
+
recipe_files: list[Path] = (
|
|
847
|
+
list(recipes_dir.glob("*.rb")) if recipes_dir.exists() else []
|
|
848
|
+
)
|
|
849
|
+
except (OSError, ValueError):
|
|
850
|
+
recipe_files = []
|
|
851
|
+
|
|
852
|
+
for recipe_file in recipe_files:
|
|
853
|
+
try:
|
|
854
|
+
# Validate each glob result
|
|
855
|
+
validated_file: Path = _validated_candidate(recipe_file, cookbook_path)
|
|
856
|
+
except ValueError:
|
|
857
|
+
continue
|
|
858
|
+
try:
|
|
859
|
+
content = validated_file.read_text(encoding="utf-8", errors="ignore")
|
|
860
|
+
resources = len(RESOURCE_BLOCK_PATTERN.findall(content))
|
|
861
|
+
ruby_blocks += len(
|
|
862
|
+
re.findall(r"ruby_block|execute|bash|script", content, re.IGNORECASE)
|
|
863
|
+
)
|
|
864
|
+
custom_resources += len(
|
|
865
|
+
re.findall(
|
|
866
|
+
r"custom_resource|provides|use_inline_resources|lwrp_resource",
|
|
867
|
+
content,
|
|
868
|
+
)
|
|
869
|
+
)
|
|
870
|
+
resource_count += resources
|
|
871
|
+
except Exception:
|
|
872
|
+
continue
|
|
873
|
+
|
|
874
|
+
return resource_count, ruby_blocks, custom_resources
|
|
875
|
+
|
|
876
|
+
|
|
877
|
+
def _analyze_attributes(cookbook_path: Path) -> int:
|
|
878
|
+
"""Analyse attribute files for complexity."""
|
|
879
|
+
attribute_complexity = 0
|
|
880
|
+
|
|
881
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
882
|
+
attributes_dir: Path = _safe_join(cookbook_path, "attributes")
|
|
883
|
+
try:
|
|
884
|
+
attr_files: list[Path] = (
|
|
885
|
+
list(attributes_dir.glob("*.rb")) if attributes_dir.exists() else []
|
|
886
|
+
)
|
|
887
|
+
except (OSError, ValueError):
|
|
888
|
+
attr_files = []
|
|
889
|
+
|
|
890
|
+
for attr_file in attr_files:
|
|
891
|
+
try:
|
|
892
|
+
# Validate each glob result
|
|
893
|
+
validated_file: Path = _validated_candidate(attr_file, cookbook_path)
|
|
894
|
+
except ValueError:
|
|
895
|
+
continue
|
|
896
|
+
try:
|
|
897
|
+
content = validated_file.read_text(encoding="utf-8", errors="ignore")
|
|
898
|
+
assignments = len(
|
|
899
|
+
re.findall(
|
|
900
|
+
r"^[ \t]{0,20}\w+[ \t]{0,10}(?:\[\w*\])?[ \t]{0,10}=",
|
|
901
|
+
content,
|
|
902
|
+
re.MULTILINE,
|
|
903
|
+
)
|
|
904
|
+
)
|
|
905
|
+
complex_expressions = len(
|
|
906
|
+
re.findall(r"(?:node|default|override)\[", content)
|
|
907
|
+
)
|
|
908
|
+
attribute_complexity += assignments + complex_expressions
|
|
909
|
+
except Exception:
|
|
910
|
+
continue
|
|
911
|
+
|
|
912
|
+
return attribute_complexity
|
|
913
|
+
|
|
914
|
+
|
|
915
|
+
def _analyze_templates(cookbook_path: Path) -> int:
|
|
916
|
+
"""Analyze template files for ERB complexity."""
|
|
917
|
+
erb_templates = 0
|
|
918
|
+
|
|
919
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
920
|
+
templates_dir: Path = _safe_join(cookbook_path, "templates")
|
|
921
|
+
try:
|
|
922
|
+
template_files: list[Path] = (
|
|
923
|
+
list(templates_dir.glob("**/*.erb")) if templates_dir.exists() else []
|
|
924
|
+
)
|
|
925
|
+
except (OSError, ValueError):
|
|
926
|
+
template_files = []
|
|
927
|
+
|
|
928
|
+
for template_file in template_files:
|
|
929
|
+
try:
|
|
930
|
+
# Validate each glob result
|
|
931
|
+
validated_file: Path = _validated_candidate(template_file, cookbook_path)
|
|
932
|
+
except ValueError:
|
|
933
|
+
continue
|
|
934
|
+
|
|
935
|
+
try:
|
|
936
|
+
content = validated_file.read_text(encoding="utf-8", errors="ignore")
|
|
937
|
+
erb_expressions = len(re.findall(r"<%.*?%>", content))
|
|
938
|
+
erb_templates += erb_expressions
|
|
939
|
+
except Exception:
|
|
940
|
+
continue
|
|
941
|
+
|
|
942
|
+
return erb_templates
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
def _analyze_libraries(cookbook_path: Path) -> int:
|
|
946
|
+
"""Analyse library files for complexity."""
|
|
947
|
+
library_complexity = 0
|
|
948
|
+
|
|
949
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
950
|
+
libraries_dir: Path = _safe_join(cookbook_path, "libraries")
|
|
951
|
+
try:
|
|
952
|
+
lib_files: list[Path] = (
|
|
953
|
+
safe_glob(libraries_dir, "*.rb", cookbook_path)
|
|
954
|
+
if libraries_dir.exists()
|
|
955
|
+
else []
|
|
956
|
+
)
|
|
957
|
+
except (OSError, ValueError):
|
|
958
|
+
lib_files = []
|
|
959
|
+
|
|
960
|
+
for lib_file in lib_files:
|
|
961
|
+
try:
|
|
962
|
+
# lib_file is already validated by safe_glob
|
|
963
|
+
content = lib_file.read_text(encoding="utf-8", errors="ignore")
|
|
964
|
+
classes = len(re.findall(r"class\s+\w+", content))
|
|
965
|
+
methods = len(re.findall(r"def\s+\w+", content))
|
|
966
|
+
library_complexity += classes * 2 + methods
|
|
967
|
+
except Exception:
|
|
968
|
+
continue
|
|
969
|
+
|
|
970
|
+
return library_complexity
|
|
971
|
+
|
|
972
|
+
|
|
973
|
+
def _count_definitions(cookbook_path: Path) -> int:
|
|
974
|
+
"""Count definition files."""
|
|
975
|
+
# Note: cookbook_path is expected to be pre-validated
|
|
976
|
+
definitions_dir: Path = _safe_join(cookbook_path, "definitions")
|
|
977
|
+
try:
|
|
978
|
+
def_files: list[Path] = (
|
|
979
|
+
safe_glob(definitions_dir, "*.rb", cookbook_path)
|
|
980
|
+
if definitions_dir.exists()
|
|
981
|
+
else []
|
|
982
|
+
)
|
|
983
|
+
except (OSError, ValueError):
|
|
984
|
+
def_files = []
|
|
985
|
+
return len(def_files)
|
|
986
|
+
|
|
987
|
+
|
|
988
|
+
def _parse_berksfile(cookbook_path: Path) -> dict[str, Any]:
|
|
989
|
+
"""Parse Berksfile for dependency information."""
|
|
990
|
+
base = _normalize_cookbook_root(cookbook_path)
|
|
991
|
+
berksfile_path = _safe_join(base, "Berksfile")
|
|
992
|
+
|
|
993
|
+
if not berksfile_path.exists():
|
|
994
|
+
return {"dependencies": [], "external_cookbooks": [], "complexity": 0}
|
|
995
|
+
|
|
996
|
+
try:
|
|
997
|
+
content = berksfile_path.read_text(encoding="utf-8", errors="ignore")
|
|
998
|
+
|
|
999
|
+
cookbook_deps = re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1000
|
+
external_deps = re.findall(
|
|
1001
|
+
r'cookbook\s+[\'"]([^\'"]+)[\'"]\s*,\s*[\'"]([^\'"]+)[\'"]', content
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
complex_deps = len(re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]\s*,', content))
|
|
1005
|
+
git_sources = len(re.findall(r"git:", content))
|
|
1006
|
+
path_sources = len(re.findall(r"path:", content))
|
|
1007
|
+
|
|
1008
|
+
return {
|
|
1009
|
+
"dependencies": cookbook_deps,
|
|
1010
|
+
"external_cookbooks": [dep[0] for dep in external_deps],
|
|
1011
|
+
"complexity": complex_deps + git_sources * 2 + path_sources * 2,
|
|
1012
|
+
"has_git_sources": git_sources > 0,
|
|
1013
|
+
"has_path_sources": path_sources > 0,
|
|
1014
|
+
}
|
|
1015
|
+
except Exception:
|
|
1016
|
+
return {"dependencies": [], "external_cookbooks": [], "complexity": 0}
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
def _parse_chefignore(cookbook_path) -> dict[str, Any]:
|
|
1020
|
+
"""Parse chefignore file for ignore patterns."""
|
|
1021
|
+
base = _normalize_cookbook_root(cookbook_path)
|
|
1022
|
+
chefignore_path = _ensure_within_base_path(_safe_join(base, "chefignore"), base)
|
|
1023
|
+
|
|
1024
|
+
if not chefignore_path.exists():
|
|
1025
|
+
return {"patterns": [], "complexity": 0}
|
|
1026
|
+
|
|
1027
|
+
try:
|
|
1028
|
+
content = chefignore_path.read_text(encoding="utf-8", errors="ignore")
|
|
1029
|
+
lines = [
|
|
1030
|
+
line.strip()
|
|
1031
|
+
for line in content.split("\n")
|
|
1032
|
+
if line.strip() and not line.startswith("#")
|
|
1033
|
+
]
|
|
1034
|
+
|
|
1035
|
+
wildcard_patterns = len([p for p in lines if "*" in p or "?" in p])
|
|
1036
|
+
directory_patterns = len([p for p in lines if p.endswith("/") or "/" in p])
|
|
1037
|
+
|
|
1038
|
+
return {
|
|
1039
|
+
"patterns": lines,
|
|
1040
|
+
"pattern_count": len(lines),
|
|
1041
|
+
"complexity": wildcard_patterns + directory_patterns,
|
|
1042
|
+
"has_wildcards": wildcard_patterns > 0,
|
|
1043
|
+
}
|
|
1044
|
+
except Exception:
|
|
1045
|
+
return {"patterns": [], "complexity": 0}
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
def _parse_thorfile(cookbook_path) -> dict[str, Any]:
|
|
1049
|
+
"""Parse Thorfile for Thor tasks."""
|
|
1050
|
+
base = _normalize_cookbook_root(cookbook_path)
|
|
1051
|
+
thorfile_path = _ensure_within_base_path(_safe_join(base, "Thorfile"), base)
|
|
1052
|
+
|
|
1053
|
+
if not thorfile_path.exists():
|
|
1054
|
+
return {"tasks": [], "complexity": 0}
|
|
1055
|
+
|
|
1056
|
+
try:
|
|
1057
|
+
content = thorfile_path.read_text(encoding="utf-8", errors="ignore")
|
|
1058
|
+
|
|
1059
|
+
tasks = len(re.findall(r'desc\s+[\'"]([^\'"]+)[\'"]', content))
|
|
1060
|
+
methods = len(re.findall(r"def\s+\w+", content))
|
|
1061
|
+
|
|
1062
|
+
return {
|
|
1063
|
+
"tasks": tasks,
|
|
1064
|
+
"methods": methods,
|
|
1065
|
+
"complexity": tasks + methods,
|
|
1066
|
+
"has_tasks": tasks > 0,
|
|
1067
|
+
}
|
|
1068
|
+
except Exception:
|
|
1069
|
+
return {"tasks": [], "complexity": 0}
|
|
1070
|
+
|
|
1071
|
+
|
|
1072
|
+
def _parse_metadata_file(cookbook_path) -> dict[str, Any]:
|
|
1073
|
+
"""Parse metadata.rb for cookbook information."""
|
|
1074
|
+
base = _normalize_cookbook_root(cookbook_path)
|
|
1075
|
+
metadata_path = _ensure_within_base_path(_safe_join(base, "metadata.rb"), base)
|
|
1076
|
+
|
|
1077
|
+
if not metadata_path.exists():
|
|
1078
|
+
return {
|
|
1079
|
+
"name": "",
|
|
1080
|
+
"version": "",
|
|
1081
|
+
"dependencies": [],
|
|
1082
|
+
"supports": [],
|
|
1083
|
+
"complexity": 0,
|
|
1084
|
+
}
|
|
1085
|
+
|
|
1086
|
+
try:
|
|
1087
|
+
content = metadata_path.read_text(encoding="utf-8", errors="ignore")
|
|
1088
|
+
|
|
1089
|
+
name_match = re.search(r'name\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1090
|
+
version_match = re.search(r'version\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1091
|
+
|
|
1092
|
+
# Parse dependencies
|
|
1093
|
+
depends_matches = re.findall(r'depends\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1094
|
+
|
|
1095
|
+
# Parse supported platforms
|
|
1096
|
+
supports_matches = re.findall(r'supports\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1097
|
+
|
|
1098
|
+
# Count complex metadata (recipes, attributes, etc.)
|
|
1099
|
+
recipes = len(re.findall(r'recipe\s+[\'"]([^\'"]+)[\'"]', content))
|
|
1100
|
+
attributes = len(re.findall(r'attribute\s+[\'"]([^\'"]+)[\'"]', content))
|
|
1101
|
+
|
|
1102
|
+
return {
|
|
1103
|
+
"name": name_match.group(1) if name_match else "",
|
|
1104
|
+
"version": version_match.group(1) if version_match else "",
|
|
1105
|
+
"dependencies": depends_matches,
|
|
1106
|
+
"supports": supports_matches,
|
|
1107
|
+
"recipes": recipes,
|
|
1108
|
+
"attributes": attributes,
|
|
1109
|
+
"complexity": len(depends_matches)
|
|
1110
|
+
+ len(supports_matches)
|
|
1111
|
+
+ recipes
|
|
1112
|
+
+ attributes,
|
|
1113
|
+
}
|
|
1114
|
+
except Exception:
|
|
1115
|
+
return {
|
|
1116
|
+
"name": "",
|
|
1117
|
+
"version": "",
|
|
1118
|
+
"dependencies": [],
|
|
1119
|
+
"supports": [],
|
|
1120
|
+
"complexity": 0,
|
|
1121
|
+
}
|
|
1122
|
+
|
|
1123
|
+
|
|
691
1124
|
def _calculate_complexity_score(metrics: dict[str, int]) -> int:
|
|
692
1125
|
"""Calculate complexity score (0-100) based on metrics."""
|
|
693
1126
|
recipe_count = metrics["recipe_count"]
|
|
@@ -698,8 +1131,8 @@ def _calculate_complexity_score(metrics: dict[str, int]) -> int:
|
|
|
698
1131
|
"resource_density": min(resource_count / max(recipe_count, 1) * 5, 25),
|
|
699
1132
|
"custom_resources": metrics["custom_resources"] * 10,
|
|
700
1133
|
"ruby_blocks": metrics["ruby_blocks"] * 5,
|
|
701
|
-
"templates": min(metrics["
|
|
702
|
-
"files": min(metrics["
|
|
1134
|
+
"templates": min(metrics["template_count"] * 2, 15),
|
|
1135
|
+
"files": min(metrics["file_count"] * 1, 10),
|
|
703
1136
|
}
|
|
704
1137
|
|
|
705
1138
|
return int(sum(complexity_factors.values()))
|
|
@@ -736,21 +1169,22 @@ def _determine_migration_priority(complexity_score: int) -> str:
|
|
|
736
1169
|
|
|
737
1170
|
def _assess_single_cookbook(cookbook_path: Path) -> dict:
|
|
738
1171
|
"""Assess complexity of a single cookbook."""
|
|
739
|
-
|
|
740
|
-
cookbook = cookbook_path
|
|
1172
|
+
cookbook = _normalize_cookbook_root(cookbook_path)
|
|
741
1173
|
|
|
742
1174
|
# Collect metrics
|
|
743
1175
|
artifact_counts = _count_cookbook_artifacts(cookbook)
|
|
744
1176
|
recipe_complexity = _analyse_recipe_complexity(cookbook)
|
|
745
1177
|
metrics = {**artifact_counts, **recipe_complexity}
|
|
746
1178
|
|
|
747
|
-
# Calculate complexity
|
|
1179
|
+
# Calculate complexity score using existing function
|
|
748
1180
|
complexity_score = _calculate_complexity_score(metrics)
|
|
749
|
-
|
|
750
|
-
#
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
1181
|
+
|
|
1182
|
+
# Use centralized EffortMetrics for consistent calculations
|
|
1183
|
+
effort_metrics = estimate_effort_for_complexity(
|
|
1184
|
+
complexity_score=complexity_score,
|
|
1185
|
+
resource_count=metrics["recipe_count"],
|
|
1186
|
+
)
|
|
1187
|
+
estimated_effort = effort_metrics.estimated_days
|
|
754
1188
|
|
|
755
1189
|
# Build assessment
|
|
756
1190
|
return {
|
|
@@ -772,7 +1206,7 @@ def _format_overall_metrics(metrics: dict) -> str:
|
|
|
772
1206
|
• Total Resources: {metrics["total_resources"]}
|
|
773
1207
|
• Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
|
|
774
1208
|
• Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
|
|
775
|
-
• Estimated Duration: {
|
|
1209
|
+
• Estimated Duration: {EffortMetrics(metrics["estimated_effort_days"]).estimated_weeks_range}"""
|
|
776
1210
|
|
|
777
1211
|
|
|
778
1212
|
def _format_cookbook_assessments(assessments: list) -> str:
|
|
@@ -859,7 +1293,9 @@ def _generate_migration_recommendations_from_assessment(
|
|
|
859
1293
|
|
|
860
1294
|
# Complexity-based recommendations
|
|
861
1295
|
avg_complexity = metrics.get("avg_complexity", 0)
|
|
862
|
-
|
|
1296
|
+
complexity_level = categorize_complexity(avg_complexity)
|
|
1297
|
+
|
|
1298
|
+
if complexity_level == ComplexityLevel.HIGH:
|
|
863
1299
|
recommendations.append(
|
|
864
1300
|
"• Consider phased migration approach due to high complexity"
|
|
865
1301
|
)
|
|
@@ -867,9 +1303,12 @@ def _generate_migration_recommendations_from_assessment(
|
|
|
867
1303
|
"• Allocate additional time for custom resource conversion"
|
|
868
1304
|
)
|
|
869
1305
|
recommendations.append("• Plan for comprehensive testing and validation")
|
|
870
|
-
|
|
1306
|
+
elif complexity_level == ComplexityLevel.LOW:
|
|
871
1307
|
recommendations.append("• Standard migration timeline should be sufficient")
|
|
872
1308
|
recommendations.append("• Consider big-bang approach for faster delivery")
|
|
1309
|
+
else:
|
|
1310
|
+
recommendations.append("• Balanced approach recommended for medium complexity")
|
|
1311
|
+
recommendations.append("• Plan iterative validation checkpoints")
|
|
873
1312
|
|
|
874
1313
|
# Effort-based recommendations
|
|
875
1314
|
total_effort = metrics["estimated_effort_days"]
|
|
@@ -899,6 +1338,7 @@ def _create_migration_roadmap(assessments: list) -> str:
|
|
|
899
1338
|
# Sort cookbooks by complexity (low to high for easier wins first)
|
|
900
1339
|
sorted_cookbooks = sorted(assessments, key=lambda x: x["complexity_score"])
|
|
901
1340
|
|
|
1341
|
+
# Use complexity thresholds from metrics module for consistency
|
|
902
1342
|
phases = {
|
|
903
1343
|
"Phase 1 - Foundation (Weeks 1-2)": [
|
|
904
1344
|
"Set up Ansible/AWX environment",
|
|
@@ -917,17 +1357,19 @@ def _create_migration_roadmap(assessments: list) -> str:
|
|
|
917
1357
|
],
|
|
918
1358
|
}
|
|
919
1359
|
|
|
920
|
-
# Distribute cookbooks across phases
|
|
1360
|
+
# Distribute cookbooks across phases using centralized complexity thresholds
|
|
921
1361
|
for cookbook in sorted_cookbooks:
|
|
922
|
-
|
|
1362
|
+
complexity_level = categorize_complexity(cookbook["complexity_score"])
|
|
1363
|
+
|
|
1364
|
+
if complexity_level == ComplexityLevel.LOW:
|
|
923
1365
|
phases["Phase 2 - Low Complexity Migration (Weeks 3-5)"].append(
|
|
924
1366
|
f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
|
|
925
1367
|
)
|
|
926
|
-
elif
|
|
1368
|
+
elif complexity_level == ComplexityLevel.MEDIUM:
|
|
927
1369
|
phases["Phase 3 - Medium Complexity Migration (Weeks 6-9)"].append(
|
|
928
1370
|
f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
|
|
929
1371
|
)
|
|
930
|
-
else:
|
|
1372
|
+
else: # HIGH
|
|
931
1373
|
phases["Phase 4 - High Complexity Migration (Weeks 10-12)"].append(
|
|
932
1374
|
f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
|
|
933
1375
|
)
|
|
@@ -1025,38 +1467,92 @@ def _estimate_resource_requirements(metrics: dict, target_platform: str) -> str:
|
|
|
1025
1467
|
• **Training:** 2-3 days Ansible/AWX training for team"""
|
|
1026
1468
|
|
|
1027
1469
|
|
|
1028
|
-
def _analyse_cookbook_dependencies_detailed(cookbook_path) -> dict:
|
|
1029
|
-
"""
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1470
|
+
def _analyse_cookbook_dependencies_detailed(cookbook_path: Path | str) -> dict:
|
|
1471
|
+
"""
|
|
1472
|
+
Analyse cookbook dependencies in detail.
|
|
1473
|
+
|
|
1474
|
+
Args:
|
|
1475
|
+
cookbook_path: Path to the cookbook (may be string or Path).
|
|
1476
|
+
|
|
1477
|
+
Returns:
|
|
1478
|
+
Dictionary with dependency information.
|
|
1479
|
+
|
|
1480
|
+
Raises:
|
|
1481
|
+
ValueError: If the path is invalid.
|
|
1482
|
+
|
|
1483
|
+
"""
|
|
1484
|
+
# Normalize the input path
|
|
1485
|
+
base_path: Path = _normalize_path(cookbook_path)
|
|
1486
|
+
|
|
1487
|
+
# Validate basic accessibility
|
|
1488
|
+
if not base_path.exists():
|
|
1489
|
+
msg = f"Cookbook path does not exist: {cookbook_path}"
|
|
1490
|
+
raise ValueError(msg)
|
|
1491
|
+
if not base_path.is_dir():
|
|
1492
|
+
msg = f"Cookbook path is not a directory: {cookbook_path}"
|
|
1493
|
+
raise ValueError(msg)
|
|
1494
|
+
|
|
1495
|
+
# Collect dependencies from metadata and Berksfile
|
|
1496
|
+
direct_dependencies = _collect_metadata_dependencies(base_path)
|
|
1497
|
+
external_dependencies = _collect_berks_dependencies(base_path)
|
|
1498
|
+
community_cookbooks = _identify_community_cookbooks_from_list(
|
|
1499
|
+
direct_dependencies + external_dependencies
|
|
1500
|
+
)
|
|
1501
|
+
|
|
1502
|
+
return {
|
|
1503
|
+
"cookbook_name": base_path.name,
|
|
1504
|
+
"direct_dependencies": direct_dependencies,
|
|
1033
1505
|
"transitive_dependencies": [],
|
|
1034
|
-
"external_dependencies":
|
|
1035
|
-
"community_cookbooks":
|
|
1506
|
+
"external_dependencies": external_dependencies,
|
|
1507
|
+
"community_cookbooks": community_cookbooks,
|
|
1036
1508
|
"circular_dependencies": [],
|
|
1037
1509
|
}
|
|
1038
1510
|
|
|
1039
|
-
# Read metadata.rb for dependencies
|
|
1040
|
-
metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
|
|
1041
|
-
if metadata_file.exists():
|
|
1042
|
-
with metadata_file.open("r", encoding="utf-8", errors="ignore") as f:
|
|
1043
|
-
content = f.read()
|
|
1044
1511
|
|
|
1045
|
-
|
|
1512
|
+
def _collect_metadata_dependencies(base_path: Path) -> list[str]:
|
|
1513
|
+
"""Collect dependency declarations from metadata.rb with containment checks."""
|
|
1514
|
+
# Build metadata path safely within the cookbook
|
|
1515
|
+
metadata_path: Path = _safe_join(base_path, METADATA_FILENAME)
|
|
1046
1516
|
|
|
1047
|
-
|
|
1048
|
-
|
|
1517
|
+
if not metadata_path.is_file():
|
|
1518
|
+
return []
|
|
1519
|
+
|
|
1520
|
+
try:
|
|
1521
|
+
# Validate metadata_path is within base_path
|
|
1522
|
+
_validated_candidate(metadata_path, base_path)
|
|
1523
|
+
except ValueError:
|
|
1524
|
+
# metadata.rb is outside cookbook root
|
|
1525
|
+
return []
|
|
1049
1526
|
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
if berksfile.exists():
|
|
1053
|
-
with berksfile.open("r", encoding="utf-8", errors="ignore") as f:
|
|
1054
|
-
content = f.read()
|
|
1527
|
+
with metadata_path.open(encoding="utf-8", errors="ignore") as f:
|
|
1528
|
+
content = f.read()
|
|
1055
1529
|
|
|
1056
|
-
|
|
1057
|
-
analysis["external_dependencies"].extend(cookbook_matches)
|
|
1530
|
+
return re.findall(r'depends\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1058
1531
|
|
|
1059
|
-
|
|
1532
|
+
|
|
1533
|
+
def _collect_berks_dependencies(base_path: Path) -> list[str]:
|
|
1534
|
+
"""Collect dependency declarations from Berksfile with containment checks."""
|
|
1535
|
+
# Build Berksfile path safely within the cookbook
|
|
1536
|
+
berksfile_path: Path = _safe_join(base_path, "Berksfile")
|
|
1537
|
+
|
|
1538
|
+
if not berksfile_path.is_file():
|
|
1539
|
+
return []
|
|
1540
|
+
|
|
1541
|
+
try:
|
|
1542
|
+
# Validate berksfile_path is within base_path
|
|
1543
|
+
_validated_candidate(berksfile_path, base_path)
|
|
1544
|
+
except ValueError:
|
|
1545
|
+
# Berksfile is outside cookbook root
|
|
1546
|
+
return []
|
|
1547
|
+
|
|
1548
|
+
with berksfile_path.open(encoding="utf-8", errors="ignore") as f:
|
|
1549
|
+
content = f.read()
|
|
1550
|
+
|
|
1551
|
+
return re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]', content)
|
|
1552
|
+
|
|
1553
|
+
|
|
1554
|
+
def _identify_community_cookbooks_from_list(dependencies: list[str]) -> list[str]:
|
|
1555
|
+
"""Return dependencies considered community cookbooks based on patterns."""
|
|
1060
1556
|
community_cookbook_patterns = [
|
|
1061
1557
|
"apache2",
|
|
1062
1558
|
"nginx",
|
|
@@ -1073,12 +1569,11 @@ def _analyse_cookbook_dependencies_detailed(cookbook_path) -> dict:
|
|
|
1073
1569
|
"users",
|
|
1074
1570
|
]
|
|
1075
1571
|
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
return analysis
|
|
1572
|
+
return [
|
|
1573
|
+
dep
|
|
1574
|
+
for dep in dependencies
|
|
1575
|
+
if any(pattern in dep.lower() for pattern in community_cookbook_patterns)
|
|
1576
|
+
]
|
|
1082
1577
|
|
|
1083
1578
|
|
|
1084
1579
|
def _determine_migration_order(dependency_analysis: dict) -> list:
|
|
@@ -1556,26 +2051,877 @@ def _format_validation_results_text(
|
|
|
1556
2051
|
return "\n".join(output_lines)
|
|
1557
2052
|
|
|
1558
2053
|
|
|
1559
|
-
def
|
|
1560
|
-
|
|
2054
|
+
def assess_single_cookbook_with_ai(
|
|
2055
|
+
cookbook_path: str,
|
|
2056
|
+
ai_provider: str = "anthropic",
|
|
2057
|
+
api_key: str = "",
|
|
2058
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
2059
|
+
temperature: float = 0.3,
|
|
2060
|
+
max_tokens: int = 2000,
|
|
2061
|
+
project_id: str = "",
|
|
2062
|
+
base_url: str = "",
|
|
2063
|
+
) -> dict[str, Any]:
|
|
2064
|
+
"""
|
|
2065
|
+
Assess a single Chef cookbook using AI analysis.
|
|
2066
|
+
|
|
2067
|
+
Args:
|
|
2068
|
+
cookbook_path: Path to the Chef cookbook directory
|
|
2069
|
+
ai_provider: AI provider (anthropic, openai, watson)
|
|
2070
|
+
api_key: API key for the AI provider
|
|
2071
|
+
model: AI model to use
|
|
2072
|
+
temperature: AI temperature setting
|
|
2073
|
+
max_tokens: Maximum tokens for AI response
|
|
2074
|
+
project_id: Project ID for IBM Watsonx (required for watson provider)
|
|
2075
|
+
base_url: Custom base URL for the AI provider
|
|
2076
|
+
|
|
2077
|
+
Returns:
|
|
2078
|
+
Dictionary containing assessment data with complexity, recommendations, etc.
|
|
2079
|
+
|
|
2080
|
+
"""
|
|
2081
|
+
try:
|
|
2082
|
+
cookbook_path_obj = _normalize_path(cookbook_path)
|
|
2083
|
+
if not cookbook_path_obj.exists(): # Read-only check on normalized path
|
|
2084
|
+
return {"error": f"Cookbook path not found: {cookbook_path}"}
|
|
2085
|
+
|
|
2086
|
+
# Check if AI is available
|
|
2087
|
+
ai_available = _is_ai_available(ai_provider, api_key)
|
|
2088
|
+
if not ai_available:
|
|
2089
|
+
# Fall back to rule-based analysis
|
|
2090
|
+
return parse_chef_migration_assessment(cookbook_path)
|
|
2091
|
+
|
|
2092
|
+
# Get AI-enhanced assessment
|
|
2093
|
+
assessment = _assess_single_cookbook_with_ai(
|
|
2094
|
+
cookbook_path_obj,
|
|
2095
|
+
ai_provider,
|
|
2096
|
+
api_key,
|
|
2097
|
+
model,
|
|
2098
|
+
temperature,
|
|
2099
|
+
max_tokens,
|
|
2100
|
+
project_id,
|
|
2101
|
+
base_url,
|
|
2102
|
+
)
|
|
2103
|
+
|
|
2104
|
+
# Convert to the format expected by the UI
|
|
2105
|
+
complexity_level = "Low"
|
|
2106
|
+
if assessment["complexity_score"] > 70:
|
|
2107
|
+
complexity_level = "High"
|
|
2108
|
+
elif assessment["complexity_score"] > 30:
|
|
2109
|
+
complexity_level = "Medium"
|
|
2110
|
+
|
|
2111
|
+
return {
|
|
2112
|
+
"complexity": complexity_level,
|
|
2113
|
+
"estimated_hours": EffortMetrics(
|
|
2114
|
+
assessment["estimated_effort_days"]
|
|
2115
|
+
).estimated_hours,
|
|
2116
|
+
"recommendations": assessment.get(
|
|
2117
|
+
"ai_insights", "AI-enhanced analysis completed"
|
|
2118
|
+
),
|
|
2119
|
+
}
|
|
2120
|
+
|
|
2121
|
+
except Exception as e:
|
|
2122
|
+
return {
|
|
2123
|
+
"error": format_error_with_context(
|
|
2124
|
+
e, "assessing single cookbook with AI", cookbook_path
|
|
2125
|
+
)
|
|
2126
|
+
}
|
|
2127
|
+
|
|
2128
|
+
|
|
2129
|
+
def assess_chef_migration_complexity_with_ai(
|
|
2130
|
+
cookbook_paths: str,
|
|
2131
|
+
migration_scope: str = "full",
|
|
2132
|
+
target_platform: str = "ansible_awx",
|
|
2133
|
+
ai_provider: str = "anthropic",
|
|
2134
|
+
api_key: str = "",
|
|
2135
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
2136
|
+
temperature: float = 0.3,
|
|
2137
|
+
max_tokens: int = 2000,
|
|
2138
|
+
project_id: str = "",
|
|
2139
|
+
base_url: str = "",
|
|
1561
2140
|
) -> str:
|
|
1562
2141
|
"""
|
|
1563
|
-
|
|
2142
|
+
Assess the complexity of migrating Chef cookbooks to Ansible using AI analysis.
|
|
2143
|
+
|
|
2144
|
+
This function uses AI to provide more intelligent analysis of cookbook complexity,
|
|
2145
|
+
migration challenges, and recommendations. Falls back to rule-based analysis
|
|
2146
|
+
if AI is not available.
|
|
1564
2147
|
|
|
1565
2148
|
Args:
|
|
1566
|
-
|
|
1567
|
-
|
|
2149
|
+
cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
|
|
2150
|
+
migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
|
|
2151
|
+
target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
|
|
2152
|
+
ai_provider: AI provider (anthropic, openai, watson)
|
|
2153
|
+
api_key: API key for the AI provider
|
|
2154
|
+
model: AI model to use
|
|
2155
|
+
temperature: AI temperature setting
|
|
2156
|
+
max_tokens: Maximum tokens for AI response
|
|
2157
|
+
project_id: Project ID for IBM Watsonx (required for watson provider)
|
|
2158
|
+
base_url: Custom base URL for the AI provider
|
|
2159
|
+
|
|
2160
|
+
Returns:
|
|
2161
|
+
Comprehensive migration complexity assessment with AI-enhanced recommendations
|
|
2162
|
+
|
|
2163
|
+
"""
|
|
2164
|
+
try:
|
|
2165
|
+
# Validate and parse inputs
|
|
2166
|
+
error_msg = _validate_assessment_inputs(
|
|
2167
|
+
cookbook_paths, migration_scope, target_platform
|
|
2168
|
+
)
|
|
2169
|
+
if error_msg:
|
|
2170
|
+
return error_msg
|
|
2171
|
+
|
|
2172
|
+
# Check if AI is available
|
|
2173
|
+
ai_available = _is_ai_available(ai_provider, api_key)
|
|
2174
|
+
if not ai_available:
|
|
2175
|
+
# Fall back to rule-based analysis
|
|
2176
|
+
return assess_chef_migration_complexity(
|
|
2177
|
+
cookbook_paths, migration_scope, target_platform
|
|
2178
|
+
)
|
|
2179
|
+
|
|
2180
|
+
# Process cookbook analysis with AI
|
|
2181
|
+
return _process_cookbook_assessment_with_ai(
|
|
2182
|
+
cookbook_paths,
|
|
2183
|
+
migration_scope,
|
|
2184
|
+
target_platform,
|
|
2185
|
+
ai_provider,
|
|
2186
|
+
api_key,
|
|
2187
|
+
model,
|
|
2188
|
+
temperature,
|
|
2189
|
+
max_tokens,
|
|
2190
|
+
project_id,
|
|
2191
|
+
base_url,
|
|
2192
|
+
)
|
|
2193
|
+
|
|
2194
|
+
except Exception as e:
|
|
2195
|
+
return format_error_with_context(
|
|
2196
|
+
e, "assessing Chef migration complexity with AI", cookbook_paths
|
|
2197
|
+
)
|
|
2198
|
+
|
|
2199
|
+
|
|
2200
|
+
def _is_ai_available(ai_provider: str, api_key: str) -> bool:
|
|
2201
|
+
"""Check if AI analysis is available."""
|
|
2202
|
+
if not api_key:
|
|
2203
|
+
return False
|
|
2204
|
+
|
|
2205
|
+
if ai_provider == "anthropic" or ai_provider == "openai":
|
|
2206
|
+
return requests is not None
|
|
2207
|
+
elif ai_provider == "watson":
|
|
2208
|
+
return APIClient is not None
|
|
2209
|
+
else:
|
|
2210
|
+
return False
|
|
2211
|
+
|
|
2212
|
+
|
|
2213
|
+
def _process_cookbook_assessment_with_ai(
|
|
2214
|
+
cookbook_paths: str,
|
|
2215
|
+
migration_scope: str,
|
|
2216
|
+
target_platform: str,
|
|
2217
|
+
ai_provider: str,
|
|
2218
|
+
api_key: str,
|
|
2219
|
+
model: str,
|
|
2220
|
+
temperature: float,
|
|
2221
|
+
max_tokens: int,
|
|
2222
|
+
project_id: str = "",
|
|
2223
|
+
base_url: str = "",
|
|
2224
|
+
) -> str:
|
|
2225
|
+
"""Process the cookbook assessment workflow with AI analysis."""
|
|
2226
|
+
# Parse cookbook paths (may be empty if none exist)
|
|
2227
|
+
valid_paths = _parse_cookbook_paths(cookbook_paths)
|
|
2228
|
+
|
|
2229
|
+
# Analyze all cookbooks with AI enhancement
|
|
2230
|
+
cookbook_assessments, overall_metrics = _analyse_cookbook_metrics_with_ai(
|
|
2231
|
+
valid_paths,
|
|
2232
|
+
ai_provider,
|
|
2233
|
+
api_key,
|
|
2234
|
+
model,
|
|
2235
|
+
temperature,
|
|
2236
|
+
max_tokens,
|
|
2237
|
+
project_id,
|
|
2238
|
+
base_url,
|
|
2239
|
+
)
|
|
2240
|
+
|
|
2241
|
+
# Generate AI-enhanced recommendations and reports
|
|
2242
|
+
recommendations = _generate_ai_migration_recommendations(
|
|
2243
|
+
cookbook_assessments,
|
|
2244
|
+
overall_metrics,
|
|
2245
|
+
target_platform,
|
|
2246
|
+
ai_provider,
|
|
2247
|
+
api_key,
|
|
2248
|
+
model,
|
|
2249
|
+
temperature,
|
|
2250
|
+
max_tokens,
|
|
2251
|
+
project_id,
|
|
2252
|
+
base_url,
|
|
2253
|
+
)
|
|
2254
|
+
roadmap = _create_ai_migration_roadmap(
|
|
2255
|
+
cookbook_assessments,
|
|
2256
|
+
ai_provider,
|
|
2257
|
+
api_key,
|
|
2258
|
+
model,
|
|
2259
|
+
temperature,
|
|
2260
|
+
max_tokens,
|
|
2261
|
+
project_id,
|
|
2262
|
+
base_url,
|
|
2263
|
+
)
|
|
2264
|
+
|
|
2265
|
+
# Format final assessment report
|
|
2266
|
+
return _format_ai_assessment_report(
|
|
2267
|
+
migration_scope,
|
|
2268
|
+
target_platform,
|
|
2269
|
+
overall_metrics,
|
|
2270
|
+
cookbook_assessments,
|
|
2271
|
+
recommendations,
|
|
2272
|
+
roadmap,
|
|
2273
|
+
)
|
|
2274
|
+
|
|
2275
|
+
|
|
2276
|
+
def _analyse_cookbook_metrics_with_ai(
|
|
2277
|
+
valid_paths: list[Any],
|
|
2278
|
+
ai_provider: str,
|
|
2279
|
+
api_key: str,
|
|
2280
|
+
model: str,
|
|
2281
|
+
temperature: float,
|
|
2282
|
+
max_tokens: int,
|
|
2283
|
+
project_id: str = "",
|
|
2284
|
+
base_url: str = "",
|
|
2285
|
+
) -> tuple[list[Any], dict[str, int]]:
|
|
2286
|
+
"""
|
|
2287
|
+
Analyse metrics for all cookbooks with AI enhancement.
|
|
2288
|
+
|
|
2289
|
+
Args:
|
|
2290
|
+
valid_paths: List of valid cookbook paths
|
|
2291
|
+
ai_provider: AI provider name
|
|
2292
|
+
api_key: API key
|
|
2293
|
+
model: AI model
|
|
2294
|
+
temperature: AI temperature
|
|
2295
|
+
max_tokens: Max tokens
|
|
2296
|
+
project_id: Project ID for IBM Watsonx (required for watson provider)
|
|
2297
|
+
base_url: Custom base URL for the AI provider
|
|
2298
|
+
|
|
2299
|
+
Returns:
|
|
2300
|
+
Tuple of (cookbook_assessments, overall_metrics)
|
|
2301
|
+
|
|
2302
|
+
"""
|
|
2303
|
+
cookbook_assessments = []
|
|
2304
|
+
overall_metrics = {
|
|
2305
|
+
"total_cookbooks": 0,
|
|
2306
|
+
"total_recipes": 0,
|
|
2307
|
+
"total_resources": 0,
|
|
2308
|
+
"complexity_score": 0,
|
|
2309
|
+
"estimated_effort_days": 0,
|
|
2310
|
+
}
|
|
2311
|
+
|
|
2312
|
+
for cookbook_path in valid_paths:
|
|
2313
|
+
# deepcode ignore PT: path normalized via _normalize_path
|
|
2314
|
+
assessment = _assess_single_cookbook_with_ai(
|
|
2315
|
+
cookbook_path,
|
|
2316
|
+
ai_provider,
|
|
2317
|
+
api_key,
|
|
2318
|
+
model,
|
|
2319
|
+
temperature,
|
|
2320
|
+
max_tokens,
|
|
2321
|
+
project_id,
|
|
2322
|
+
base_url,
|
|
2323
|
+
)
|
|
2324
|
+
cookbook_assessments.append(assessment)
|
|
2325
|
+
|
|
2326
|
+
# Aggregate metrics
|
|
2327
|
+
overall_metrics["total_cookbooks"] += 1
|
|
2328
|
+
overall_metrics["total_recipes"] += assessment["metrics"]["recipe_count"]
|
|
2329
|
+
overall_metrics["total_resources"] += assessment["metrics"]["resource_count"]
|
|
2330
|
+
overall_metrics["complexity_score"] += assessment["complexity_score"]
|
|
2331
|
+
overall_metrics["estimated_effort_days"] += assessment["estimated_effort_days"]
|
|
2332
|
+
|
|
2333
|
+
# Calculate averages
|
|
2334
|
+
if cookbook_assessments:
|
|
2335
|
+
overall_metrics["avg_complexity"] = int(
|
|
2336
|
+
overall_metrics["complexity_score"] / len(cookbook_assessments)
|
|
2337
|
+
)
|
|
2338
|
+
|
|
2339
|
+
return cookbook_assessments, overall_metrics
|
|
2340
|
+
|
|
2341
|
+
|
|
2342
|
+
def _assess_single_cookbook_with_ai(
|
|
2343
|
+
cookbook_path: Path,
|
|
2344
|
+
ai_provider: str,
|
|
2345
|
+
api_key: str,
|
|
2346
|
+
model: str,
|
|
2347
|
+
temperature: float,
|
|
2348
|
+
max_tokens: int,
|
|
2349
|
+
project_id: str = "",
|
|
2350
|
+
base_url: str = "",
|
|
2351
|
+
) -> dict:
|
|
2352
|
+
"""Assess complexity of a single cookbook using AI analysis."""
|
|
2353
|
+
cookbook = _normalize_cookbook_root(cookbook_path)
|
|
2354
|
+
|
|
2355
|
+
# Collect basic metrics (same as rule-based)
|
|
2356
|
+
artifact_counts = _count_cookbook_artifacts(cookbook)
|
|
2357
|
+
recipe_complexity = _analyse_recipe_complexity(cookbook)
|
|
2358
|
+
metrics = {**artifact_counts, **recipe_complexity}
|
|
2359
|
+
|
|
2360
|
+
# Get AI analysis for this cookbook
|
|
2361
|
+
ai_analysis = _get_ai_cookbook_analysis(
|
|
2362
|
+
cookbook,
|
|
2363
|
+
metrics,
|
|
2364
|
+
ai_provider,
|
|
2365
|
+
api_key,
|
|
2366
|
+
model,
|
|
2367
|
+
temperature,
|
|
2368
|
+
max_tokens,
|
|
2369
|
+
project_id,
|
|
2370
|
+
base_url,
|
|
2371
|
+
)
|
|
2372
|
+
|
|
2373
|
+
# Use AI-provided complexity score if available, otherwise fall back to rule-based
|
|
2374
|
+
if ai_analysis and "complexity_score" in ai_analysis:
|
|
2375
|
+
complexity_score = ai_analysis["complexity_score"]
|
|
2376
|
+
else:
|
|
2377
|
+
complexity_score = _calculate_complexity_score(metrics)
|
|
2378
|
+
|
|
2379
|
+
# Use AI-provided effort estimate if available, otherwise fall back to rule-based
|
|
2380
|
+
if ai_analysis and "estimated_effort_days" in ai_analysis:
|
|
2381
|
+
estimated_effort = ai_analysis["estimated_effort_days"]
|
|
2382
|
+
else:
|
|
2383
|
+
base_effort = metrics["recipe_count"] * 0.125 # 0.125 days per recipe
|
|
2384
|
+
complexity_multiplier = 1 + (complexity_score / 100)
|
|
2385
|
+
estimated_effort = round(base_effort * complexity_multiplier, 1)
|
|
2386
|
+
|
|
2387
|
+
# Build assessment with AI insights
|
|
2388
|
+
assessment = {
|
|
2389
|
+
"cookbook_name": cookbook.name,
|
|
2390
|
+
"cookbook_path": str(cookbook),
|
|
2391
|
+
"metrics": metrics,
|
|
2392
|
+
"complexity_score": complexity_score,
|
|
2393
|
+
"estimated_effort_days": estimated_effort,
|
|
2394
|
+
"challenges": ai_analysis.get("challenges", [])
|
|
2395
|
+
if ai_analysis
|
|
2396
|
+
else _identify_migration_challenges(metrics, complexity_score),
|
|
2397
|
+
"migration_priority": ai_analysis.get(
|
|
2398
|
+
"migration_priority", _determine_migration_priority(complexity_score)
|
|
2399
|
+
)
|
|
2400
|
+
if ai_analysis
|
|
2401
|
+
else _determine_migration_priority(complexity_score),
|
|
2402
|
+
"ai_insights": ai_analysis.get("insights", "") if ai_analysis else "",
|
|
2403
|
+
"dependencies": [],
|
|
2404
|
+
}
|
|
2405
|
+
|
|
2406
|
+
return assessment
|
|
2407
|
+
|
|
2408
|
+
|
|
2409
|
+
def _get_ai_cookbook_analysis(
|
|
2410
|
+
cookbook_path: Path,
|
|
2411
|
+
metrics: dict,
|
|
2412
|
+
ai_provider: str,
|
|
2413
|
+
api_key: str,
|
|
2414
|
+
model: str,
|
|
2415
|
+
temperature: float,
|
|
2416
|
+
max_tokens: int,
|
|
2417
|
+
project_id: str = "",
|
|
2418
|
+
base_url: str = "",
|
|
2419
|
+
) -> dict | None:
|
|
2420
|
+
"""Get AI analysis for a single cookbook."""
|
|
2421
|
+
try:
|
|
2422
|
+
# Read key files for AI analysis
|
|
2423
|
+
recipe_content = _get_recipe_content_sample(cookbook_path)
|
|
2424
|
+
metadata_content = _get_metadata_content(cookbook_path)
|
|
2425
|
+
|
|
2426
|
+
# Prepare prompt for AI
|
|
2427
|
+
prompt = f"""Analyze this Chef cookbook for migration to Ansible. Provide a detailed assessment including:
|
|
2428
|
+
|
|
2429
|
+
1. Complexity score (0-100, where 100 is most complex)
|
|
2430
|
+
2. Estimated effort in days (realistic estimate for an experienced engineer)
|
|
2431
|
+
3. Key migration challenges and risks
|
|
2432
|
+
4. Migration priority (low/medium/high)
|
|
2433
|
+
5. Specific insights about this cookbook's conversion difficulty
|
|
2434
|
+
|
|
2435
|
+
Cookbook: {cookbook_path.name}
|
|
2436
|
+
Basic metrics: {json.dumps(metrics, indent=2)}
|
|
2437
|
+
|
|
2438
|
+
Metadata:
|
|
2439
|
+
{metadata_content}
|
|
2440
|
+
|
|
2441
|
+
Sample recipe content:
|
|
2442
|
+
{recipe_content}
|
|
2443
|
+
|
|
2444
|
+
Provide your analysis in JSON format with keys: complexity_score, estimated_effort_days, challenges (array), migration_priority, insights."""
|
|
2445
|
+
|
|
2446
|
+
# Call AI API
|
|
2447
|
+
ai_response = _call_ai_api(
|
|
2448
|
+
prompt,
|
|
2449
|
+
ai_provider,
|
|
2450
|
+
api_key,
|
|
2451
|
+
model,
|
|
2452
|
+
temperature,
|
|
2453
|
+
max_tokens,
|
|
2454
|
+
project_id,
|
|
2455
|
+
base_url,
|
|
2456
|
+
)
|
|
2457
|
+
|
|
2458
|
+
if ai_response:
|
|
2459
|
+
# Parse JSON response
|
|
2460
|
+
try:
|
|
2461
|
+
parsed = json.loads(ai_response.strip())
|
|
2462
|
+
return dict(parsed) # Cast to dict to satisfy type checker
|
|
2463
|
+
except json.JSONDecodeError:
|
|
2464
|
+
# Try to extract JSON from response
|
|
2465
|
+
json_match = re.search(r"\{.*\}", ai_response, re.DOTALL)
|
|
2466
|
+
if json_match:
|
|
2467
|
+
parsed = json.loads(json_match.group())
|
|
2468
|
+
return dict(parsed) # Cast to dict to satisfy type checker
|
|
2469
|
+
else:
|
|
2470
|
+
return None
|
|
2471
|
+
return None
|
|
2472
|
+
|
|
2473
|
+
except Exception:
|
|
2474
|
+
# If AI analysis fails, return None to fall back to rule-based
|
|
2475
|
+
return None
|
|
2476
|
+
|
|
2477
|
+
|
|
2478
|
+
def _get_recipe_content_sample(cookbook_path: Path) -> str:
|
|
2479
|
+
"""Get a sample of ALL recipe content for AI analysis."""
|
|
2480
|
+
# Inline guard directly adjacent to sink
|
|
2481
|
+
base = os.path.realpath(str(cookbook_path)) # noqa: PTH111
|
|
2482
|
+
recipes_dir_str = os.path.realpath(os.path.join(base, "recipes")) # noqa: PTH111, PTH118
|
|
2483
|
+
if os.path.commonpath([base, recipes_dir_str]) != base:
|
|
2484
|
+
raise RuntimeError("Path traversal")
|
|
2485
|
+
if not os.path.exists(recipes_dir_str): # noqa: PTH110
|
|
2486
|
+
return "No recipes directory found"
|
|
2487
|
+
|
|
2488
|
+
recipes_dir = Path(recipes_dir_str)
|
|
2489
|
+
recipe_files = list(recipes_dir.glob("*.rb"))
|
|
2490
|
+
if not recipe_files:
|
|
2491
|
+
return "No recipe files found"
|
|
2492
|
+
|
|
2493
|
+
# Read ALL recipe files, with reasonable size limits
|
|
2494
|
+
all_recipes_content = []
|
|
2495
|
+
total_chars = 0
|
|
2496
|
+
max_total_chars = 8000 # Increased limit to cover multiple recipes
|
|
2497
|
+
|
|
2498
|
+
for recipe_file in recipe_files:
|
|
2499
|
+
try:
|
|
2500
|
+
content = recipe_file.read_text(encoding="utf-8", errors="ignore")
|
|
2501
|
+
recipe_header = f"\n=== {recipe_file.name} ===\n"
|
|
2502
|
+
|
|
2503
|
+
# Add this recipe if we have room
|
|
2504
|
+
if total_chars + len(recipe_header) + len(content) < max_total_chars:
|
|
2505
|
+
all_recipes_content.append(recipe_header + content)
|
|
2506
|
+
total_chars += len(recipe_header) + len(content)
|
|
2507
|
+
else:
|
|
2508
|
+
# Add truncated version
|
|
2509
|
+
remaining = max_total_chars - total_chars - len(recipe_header)
|
|
2510
|
+
if remaining > 100:
|
|
2511
|
+
all_recipes_content.append(
|
|
2512
|
+
recipe_header + content[:remaining] + "..."
|
|
2513
|
+
)
|
|
2514
|
+
break
|
|
2515
|
+
except Exception:
|
|
2516
|
+
continue
|
|
2517
|
+
|
|
2518
|
+
if not all_recipes_content:
|
|
2519
|
+
return "Could not read recipe content"
|
|
2520
|
+
|
|
2521
|
+
return "\n".join(all_recipes_content)
|
|
2522
|
+
|
|
2523
|
+
|
|
2524
|
+
def _get_metadata_content(cookbook_path: Path) -> str:
|
|
2525
|
+
"""Get metadata content for AI analysis."""
|
|
2526
|
+
# Inline guard directly adjacent to sink
|
|
2527
|
+
base = os.path.realpath(str(cookbook_path)) # noqa: PTH111
|
|
2528
|
+
metadata_file_str = os.path.realpath(os.path.join(base, METADATA_FILENAME)) # noqa: PTH111, PTH118
|
|
2529
|
+
if os.path.commonpath([base, metadata_file_str]) != base:
|
|
2530
|
+
raise RuntimeError("Path traversal")
|
|
2531
|
+
if not os.path.exists(metadata_file_str): # noqa: PTH110
|
|
2532
|
+
return "No metadata.rb found"
|
|
2533
|
+
|
|
2534
|
+
try:
|
|
2535
|
+
return Path(metadata_file_str).read_text(encoding="utf-8", errors="ignore")
|
|
2536
|
+
except Exception:
|
|
2537
|
+
return "Could not read metadata"
|
|
2538
|
+
|
|
2539
|
+
|
|
2540
|
+
def _call_ai_api(
|
|
2541
|
+
prompt: str,
|
|
2542
|
+
ai_provider: str,
|
|
2543
|
+
api_key: str,
|
|
2544
|
+
model: str,
|
|
2545
|
+
temperature: float,
|
|
2546
|
+
max_tokens: int,
|
|
2547
|
+
project_id: str | None = None,
|
|
2548
|
+
base_url: str | None = None,
|
|
2549
|
+
) -> str | None:
|
|
2550
|
+
"""Call the AI API for analysis."""
|
|
2551
|
+
try:
|
|
2552
|
+
if ai_provider == "anthropic":
|
|
2553
|
+
return _call_anthropic_api(prompt, api_key, model, temperature, max_tokens)
|
|
2554
|
+
elif ai_provider == "openai":
|
|
2555
|
+
return _call_openai_api(prompt, api_key, model, temperature, max_tokens)
|
|
2556
|
+
elif ai_provider == "watson":
|
|
2557
|
+
return _call_watson_api(
|
|
2558
|
+
prompt, api_key, model, temperature, max_tokens, project_id, base_url
|
|
2559
|
+
)
|
|
2560
|
+
else:
|
|
2561
|
+
return None
|
|
2562
|
+
except Exception:
|
|
2563
|
+
return None
|
|
2564
|
+
|
|
2565
|
+
|
|
2566
|
+
def _call_anthropic_api(
|
|
2567
|
+
prompt: str, api_key: str, model: str, temperature: float, max_tokens: int
|
|
2568
|
+
) -> str | None:
|
|
2569
|
+
"""Call Anthropic Claude API."""
|
|
2570
|
+
if not requests:
|
|
2571
|
+
return None
|
|
2572
|
+
|
|
2573
|
+
try:
|
|
2574
|
+
response = requests.post(
|
|
2575
|
+
"https://api.anthropic.com/v1/messages",
|
|
2576
|
+
headers={
|
|
2577
|
+
"x-api-key": api_key,
|
|
2578
|
+
"anthropic-version": "2023-06-01",
|
|
2579
|
+
"content-type": "application/json",
|
|
2580
|
+
},
|
|
2581
|
+
json={
|
|
2582
|
+
"model": model,
|
|
2583
|
+
"max_tokens": max_tokens,
|
|
2584
|
+
"temperature": temperature,
|
|
2585
|
+
"system": "You are an expert in Chef to Ansible migration analysis. Provide accurate, detailed assessments.",
|
|
2586
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
2587
|
+
},
|
|
2588
|
+
timeout=30,
|
|
2589
|
+
)
|
|
2590
|
+
|
|
2591
|
+
if response.status_code == 200:
|
|
2592
|
+
result = response.json()
|
|
2593
|
+
return str(result["content"][0]["text"])
|
|
2594
|
+
return None
|
|
2595
|
+
except Exception:
|
|
2596
|
+
return None
|
|
2597
|
+
|
|
2598
|
+
|
|
2599
|
+
def _call_openai_api(
|
|
2600
|
+
prompt: str, api_key: str, model: str, temperature: float, max_tokens: int
|
|
2601
|
+
) -> str | None:
|
|
2602
|
+
"""Call OpenAI API."""
|
|
2603
|
+
if not requests:
|
|
2604
|
+
return None
|
|
2605
|
+
|
|
2606
|
+
try:
|
|
2607
|
+
response = requests.post(
|
|
2608
|
+
"https://api.openai.com/v1/chat/completions",
|
|
2609
|
+
headers={
|
|
2610
|
+
"Authorization": f"Bearer {api_key}",
|
|
2611
|
+
"Content-Type": "application/json",
|
|
2612
|
+
},
|
|
2613
|
+
json={
|
|
2614
|
+
"model": model,
|
|
2615
|
+
"messages": [
|
|
2616
|
+
{
|
|
2617
|
+
"role": "system",
|
|
2618
|
+
"content": "You are an expert in Chef to Ansible migration analysis. Provide accurate, detailed assessments.",
|
|
2619
|
+
},
|
|
2620
|
+
{"role": "user", "content": prompt},
|
|
2621
|
+
],
|
|
2622
|
+
"temperature": temperature,
|
|
2623
|
+
"max_tokens": max_tokens,
|
|
2624
|
+
},
|
|
2625
|
+
timeout=30,
|
|
2626
|
+
)
|
|
2627
|
+
|
|
2628
|
+
if response.status_code == 200:
|
|
2629
|
+
result = response.json()
|
|
2630
|
+
return str(result["choices"][0]["message"]["content"])
|
|
2631
|
+
return None
|
|
2632
|
+
except Exception:
|
|
2633
|
+
return None
|
|
2634
|
+
|
|
2635
|
+
|
|
2636
|
+
def _call_watson_api(
|
|
2637
|
+
prompt: str,
|
|
2638
|
+
api_key: str,
|
|
2639
|
+
model: str,
|
|
2640
|
+
temperature: float,
|
|
2641
|
+
max_tokens: int,
|
|
2642
|
+
project_id: str | None = None,
|
|
2643
|
+
base_url: str | None = None,
|
|
2644
|
+
) -> str | None:
|
|
2645
|
+
"""Call IBM Watsonx API."""
|
|
2646
|
+
if not APIClient:
|
|
2647
|
+
return None
|
|
2648
|
+
|
|
2649
|
+
try:
|
|
2650
|
+
# Initialize Watsonx API client
|
|
2651
|
+
client = (
|
|
2652
|
+
APIClient(api_key=api_key, url=base_url)
|
|
2653
|
+
if base_url
|
|
2654
|
+
else APIClient(api_key=api_key)
|
|
2655
|
+
)
|
|
2656
|
+
|
|
2657
|
+
# Prepare request parameters
|
|
2658
|
+
request_params = {
|
|
2659
|
+
"prompt": prompt,
|
|
2660
|
+
"max_tokens": max_tokens,
|
|
2661
|
+
"temperature": temperature,
|
|
2662
|
+
"model_id": model,
|
|
2663
|
+
"project_id": project_id,
|
|
2664
|
+
}
|
|
2665
|
+
|
|
2666
|
+
# Call Watsonx API for text generation
|
|
2667
|
+
response = (
|
|
2668
|
+
client.deployments.text_generation_stream(**request_params)
|
|
2669
|
+
if hasattr(client, "deployments")
|
|
2670
|
+
else None
|
|
2671
|
+
)
|
|
2672
|
+
|
|
2673
|
+
if response:
|
|
2674
|
+
# Collect streamed response
|
|
2675
|
+
generated_text = ""
|
|
2676
|
+
for chunk in response:
|
|
2677
|
+
if hasattr(chunk, "results") and chunk.results:
|
|
2678
|
+
generated_text += str(chunk.results[0].generated_text)
|
|
2679
|
+
return generated_text if generated_text else None
|
|
2680
|
+
|
|
2681
|
+
return None
|
|
2682
|
+
except Exception:
|
|
2683
|
+
return None
|
|
2684
|
+
|
|
2685
|
+
|
|
2686
|
+
def _generate_ai_migration_recommendations(
|
|
2687
|
+
assessments: list,
|
|
2688
|
+
metrics: dict,
|
|
2689
|
+
target_platform: str,
|
|
2690
|
+
ai_provider: str,
|
|
2691
|
+
api_key: str,
|
|
2692
|
+
model: str,
|
|
2693
|
+
temperature: float,
|
|
2694
|
+
max_tokens: int,
|
|
2695
|
+
project_id: str | None = None,
|
|
2696
|
+
base_url: str | None = None,
|
|
2697
|
+
) -> str:
|
|
2698
|
+
"""Generate AI-enhanced migration recommendations."""
|
|
2699
|
+
try:
|
|
2700
|
+
# Prepare assessment summary for AI
|
|
2701
|
+
assessment_summary = {
|
|
2702
|
+
"total_cookbooks": metrics["total_cookbooks"],
|
|
2703
|
+
"avg_complexity": metrics.get("avg_complexity", 0),
|
|
2704
|
+
"total_effort_days": metrics["estimated_effort_days"],
|
|
2705
|
+
"target_platform": target_platform,
|
|
2706
|
+
"cookbook_highlights": [
|
|
2707
|
+
{
|
|
2708
|
+
"name": a["cookbook_name"],
|
|
2709
|
+
"complexity": a["complexity_score"],
|
|
2710
|
+
"effort": a["estimated_effort_days"],
|
|
2711
|
+
"insights": a.get("ai_insights", ""),
|
|
2712
|
+
}
|
|
2713
|
+
for a in assessments[:5] # Top 5 cookbooks
|
|
2714
|
+
],
|
|
2715
|
+
}
|
|
2716
|
+
|
|
2717
|
+
prompt = f"""Based on this Chef to Ansible migration assessment, provide specific, actionable recommendations:
|
|
2718
|
+
|
|
2719
|
+
Assessment Summary: {json.dumps(assessment_summary, indent=2)}
|
|
2720
|
+
|
|
2721
|
+
Provide recommendations covering:
|
|
2722
|
+
1. Migration strategy (phased vs big-bang vs parallel)
|
|
2723
|
+
2. Team composition and skills needed
|
|
2724
|
+
3. Timeline considerations
|
|
2725
|
+
4. Risk mitigation approaches
|
|
2726
|
+
5. Platform-specific advice for {target_platform}
|
|
2727
|
+
6. Priority ordering for cookbook migration
|
|
2728
|
+
|
|
2729
|
+
Format as a bulleted list of specific recommendations."""
|
|
2730
|
+
|
|
2731
|
+
ai_response = _call_ai_api(
|
|
2732
|
+
prompt,
|
|
2733
|
+
ai_provider,
|
|
2734
|
+
api_key,
|
|
2735
|
+
model,
|
|
2736
|
+
temperature,
|
|
2737
|
+
max_tokens,
|
|
2738
|
+
project_id,
|
|
2739
|
+
base_url,
|
|
2740
|
+
)
|
|
2741
|
+
|
|
2742
|
+
if ai_response:
|
|
2743
|
+
return ai_response
|
|
2744
|
+
else:
|
|
2745
|
+
# Fall back to rule-based recommendations
|
|
2746
|
+
return _generate_migration_recommendations_from_assessment(
|
|
2747
|
+
assessments, metrics, target_platform
|
|
2748
|
+
)
|
|
2749
|
+
|
|
2750
|
+
except Exception:
|
|
2751
|
+
# Fall back to rule-based recommendations
|
|
2752
|
+
return _generate_migration_recommendations_from_assessment(
|
|
2753
|
+
assessments, metrics, target_platform
|
|
2754
|
+
)
|
|
2755
|
+
|
|
2756
|
+
|
|
2757
|
+
def _create_ai_migration_roadmap(
|
|
2758
|
+
assessments: list,
|
|
2759
|
+
ai_provider: str,
|
|
2760
|
+
api_key: str,
|
|
2761
|
+
model: str,
|
|
2762
|
+
temperature: float,
|
|
2763
|
+
max_tokens: int,
|
|
2764
|
+
project_id: str | None = None,
|
|
2765
|
+
base_url: str | None = None,
|
|
2766
|
+
) -> str:
|
|
2767
|
+
"""Create AI-enhanced migration roadmap."""
|
|
2768
|
+
try:
|
|
2769
|
+
# Prepare cookbook complexity data for AI
|
|
2770
|
+
cookbook_data = [
|
|
2771
|
+
{
|
|
2772
|
+
"name": a["cookbook_name"],
|
|
2773
|
+
"complexity": a["complexity_score"],
|
|
2774
|
+
"effort": a["estimated_effort_days"],
|
|
2775
|
+
"priority": a["migration_priority"],
|
|
2776
|
+
"insights": a.get("ai_insights", ""),
|
|
2777
|
+
}
|
|
2778
|
+
for a in assessments
|
|
2779
|
+
]
|
|
2780
|
+
|
|
2781
|
+
prompt = f"""Create a detailed migration roadmap for these Chef cookbooks. Consider complexity, dependencies, and migration priorities.
|
|
2782
|
+
|
|
2783
|
+
Cookbook Data: {json.dumps(cookbook_data, indent=2)}
|
|
2784
|
+
|
|
2785
|
+
Provide a phased migration plan with:
|
|
2786
|
+
1. Phase breakdown (Foundation, Core Migration, Advanced, Finalization)
|
|
2787
|
+
2. Week-by-week milestones
|
|
2788
|
+
3. Success criteria for each phase
|
|
2789
|
+
4. Risk mitigation strategies
|
|
2790
|
+
5. Team resource allocation recommendations
|
|
2791
|
+
|
|
2792
|
+
Format as structured markdown with clear phases and timelines."""
|
|
2793
|
+
|
|
2794
|
+
ai_response = _call_ai_api(
|
|
2795
|
+
prompt,
|
|
2796
|
+
ai_provider,
|
|
2797
|
+
api_key,
|
|
2798
|
+
model,
|
|
2799
|
+
temperature,
|
|
2800
|
+
max_tokens,
|
|
2801
|
+
project_id,
|
|
2802
|
+
base_url,
|
|
2803
|
+
)
|
|
2804
|
+
|
|
2805
|
+
if ai_response:
|
|
2806
|
+
return ai_response
|
|
2807
|
+
else:
|
|
2808
|
+
# Fall back to rule-based roadmap
|
|
2809
|
+
return _create_migration_roadmap(assessments)
|
|
2810
|
+
|
|
2811
|
+
except Exception:
|
|
2812
|
+
# Fall back to rule-based roadmap
|
|
2813
|
+
return _create_migration_roadmap(assessments)
|
|
2814
|
+
|
|
2815
|
+
|
|
2816
|
+
def _format_ai_assessment_report(
|
|
2817
|
+
migration_scope: str,
|
|
2818
|
+
target_platform: str,
|
|
2819
|
+
overall_metrics: dict[str, int],
|
|
2820
|
+
cookbook_assessments: list[dict],
|
|
2821
|
+
recommendations: str,
|
|
2822
|
+
roadmap: str,
|
|
2823
|
+
) -> str:
|
|
2824
|
+
"""
|
|
2825
|
+
Format the AI-enhanced assessment report.
|
|
2826
|
+
|
|
2827
|
+
Args:
|
|
2828
|
+
migration_scope: Scope of migration
|
|
2829
|
+
target_platform: Target platform
|
|
2830
|
+
overall_metrics: Overall metrics dictionary
|
|
2831
|
+
cookbook_assessments: List of cookbook assessments
|
|
2832
|
+
recommendations: AI-generated recommendations
|
|
2833
|
+
roadmap: AI-generated roadmap
|
|
1568
2834
|
|
|
1569
2835
|
Returns:
|
|
1570
|
-
Formatted
|
|
2836
|
+
Formatted AI-enhanced report string
|
|
1571
2837
|
|
|
1572
2838
|
"""
|
|
1573
|
-
|
|
2839
|
+
ai_indicator = "\n🤖 **AI-Enhanced Analysis**: This report includes AI-powered insights for more accurate complexity assessment and migration planning.\n"
|
|
2840
|
+
|
|
2841
|
+
return f"""# Chef to Ansible Migration Assessment (AI-Enhanced)
|
|
2842
|
+
# Scope: {migration_scope}
|
|
2843
|
+
# Target Platform: {target_platform}
|
|
2844
|
+
{ai_indicator}
|
|
2845
|
+
|
|
2846
|
+
## Overall Migration Metrics:
|
|
2847
|
+
{_format_overall_metrics(overall_metrics)}
|
|
2848
|
+
|
|
2849
|
+
## Cookbook Assessments:
|
|
2850
|
+
{_format_ai_cookbook_assessments(cookbook_assessments)}
|
|
1574
2851
|
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
2852
|
+
## Migration Complexity Analysis:
|
|
2853
|
+
{_format_ai_complexity_analysis(cookbook_assessments)}
|
|
2854
|
+
|
|
2855
|
+
## AI-Generated Migration Recommendations:
|
|
2856
|
+
{recommendations}
|
|
1579
2857
|
|
|
1580
|
-
|
|
2858
|
+
## AI-Generated Migration Roadmap:
|
|
2859
|
+
{roadmap}
|
|
2860
|
+
|
|
2861
|
+
## Risk Assessment:
|
|
2862
|
+
{_assess_migration_risks(cookbook_assessments, target_platform)}
|
|
2863
|
+
|
|
2864
|
+
## Resource Requirements:
|
|
2865
|
+
{_estimate_resource_requirements(overall_metrics, target_platform)}
|
|
1581
2866
|
"""
|
|
2867
|
+
|
|
2868
|
+
|
|
2869
|
+
def _format_ai_cookbook_assessments(assessments: list) -> str:
|
|
2870
|
+
"""Format individual cookbook assessments with AI insights."""
|
|
2871
|
+
if not assessments:
|
|
2872
|
+
return "No cookbooks assessed."
|
|
2873
|
+
|
|
2874
|
+
def _get_priority_icon(priority: str) -> str:
|
|
2875
|
+
"""Get priority icon based on migration priority level."""
|
|
2876
|
+
if priority == "high":
|
|
2877
|
+
return "🔴"
|
|
2878
|
+
elif priority == "medium":
|
|
2879
|
+
return "🟡"
|
|
2880
|
+
else:
|
|
2881
|
+
return "🟢"
|
|
2882
|
+
|
|
2883
|
+
formatted = []
|
|
2884
|
+
for assessment in assessments:
|
|
2885
|
+
priority_icon = _get_priority_icon(assessment["migration_priority"])
|
|
2886
|
+
ai_insights = assessment.get("ai_insights", "")
|
|
2887
|
+
insights_section = (
|
|
2888
|
+
f"\n 🤖 **AI Insights**: {ai_insights}" if ai_insights else ""
|
|
2889
|
+
)
|
|
2890
|
+
|
|
2891
|
+
formatted.append(f"""### {assessment["cookbook_name"]} {priority_icon}
|
|
2892
|
+
• Complexity Score: {assessment["complexity_score"]:.1f}/100
|
|
2893
|
+
• Estimated Effort: {assessment["estimated_effort_days"]} days
|
|
2894
|
+
• Recipes: {assessment["metrics"]["recipe_count"]}
|
|
2895
|
+
• Resources: {assessment["metrics"]["resource_count"]}
|
|
2896
|
+
• Custom Resources: {assessment["metrics"]["custom_resources"]}
|
|
2897
|
+
• Challenges: {len(assessment["challenges"])}{insights_section}""")
|
|
2898
|
+
|
|
2899
|
+
return "\n\n".join(formatted)
|
|
2900
|
+
|
|
2901
|
+
|
|
2902
|
+
def _format_ai_complexity_analysis(assessments: list) -> str:
|
|
2903
|
+
"""Format AI-enhanced complexity analysis."""
|
|
2904
|
+
if not assessments:
|
|
2905
|
+
return "No complexity analysis available."
|
|
2906
|
+
|
|
2907
|
+
high_complexity = [a for a in assessments if a["complexity_score"] > 70]
|
|
2908
|
+
medium_complexity = [a for a in assessments if 30 <= a["complexity_score"] <= 70]
|
|
2909
|
+
low_complexity = [a for a in assessments if a["complexity_score"] < 30]
|
|
2910
|
+
|
|
2911
|
+
# Check for AI insights
|
|
2912
|
+
ai_insights_count = sum(1 for a in assessments if a.get("ai_insights"))
|
|
2913
|
+
|
|
2914
|
+
analysis = f"""• High Complexity (>70): {len(high_complexity)} cookbooks
|
|
2915
|
+
• Medium Complexity (30-70): {len(medium_complexity)} cookbooks
|
|
2916
|
+
• Low Complexity (<30): {len(low_complexity)} cookbooks
|
|
2917
|
+
• AI-Enhanced Assessments: {ai_insights_count}/{len(assessments)} cookbooks
|
|
2918
|
+
|
|
2919
|
+
**Top Migration Challenges:**
|
|
2920
|
+
{_identify_top_challenges(assessments)}
|
|
2921
|
+
|
|
2922
|
+
**AI Analysis Summary:**
|
|
2923
|
+
• {ai_insights_count} cookbooks received AI-powered complexity analysis
|
|
2924
|
+
• Enhanced accuracy for effort estimation and risk identification
|
|
2925
|
+
• Context-aware migration recommendations"""
|
|
2926
|
+
|
|
2927
|
+
return analysis
|