mcp-souschef 2.8.0__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/assessment.py CHANGED
@@ -12,12 +12,36 @@ from typing import Any
12
12
 
13
13
  from souschef.core import METADATA_FILENAME, _normalize_path, _safe_join
14
14
  from souschef.core.errors import format_error_with_context
15
+ from souschef.core.metrics import (
16
+ ComplexityLevel,
17
+ EffortMetrics,
18
+ categorize_complexity,
19
+ estimate_effort_for_complexity,
20
+ )
15
21
  from souschef.core.validation import (
16
22
  ValidationEngine,
17
23
  ValidationLevel,
18
24
  ValidationResult,
25
+ _format_validation_results_summary,
19
26
  )
20
27
 
28
+ # Optional AI provider imports
29
+ try:
30
+ import requests # type: ignore[import-untyped]
31
+ except ImportError:
32
+ requests = None
33
+
34
+ try:
35
+ from ibm_watsonx_ai import ( # type: ignore[import-not-found]
36
+ APIClient,
37
+ )
38
+ except ImportError:
39
+ APIClient = None
40
+
41
+
42
+ # Optimised patterns to avoid catastrophic backtracking in resource parsing
43
+ RESOURCE_BLOCK_PATTERN = re.compile(r"\w{1,100}\s+['\"]([^'\"\r\n]{0,200})['\"]\s+do")
44
+
21
45
 
22
46
  def assess_chef_migration_complexity(
23
47
  cookbook_paths: str,
@@ -127,8 +151,9 @@ def parse_chef_migration_assessment(
127
151
  "recommendations": recommendations,
128
152
  "roadmap": roadmap,
129
153
  "complexity": _get_overall_complexity_level(overall_metrics),
130
- "estimated_hours": overall_metrics.get("estimated_effort_days", 0)
131
- * 8, # Convert days to hours
154
+ "estimated_hours": EffortMetrics(
155
+ overall_metrics.get("estimated_effort_days", 0)
156
+ ).estimated_hours,
132
157
  }
133
158
 
134
159
  except Exception as e:
@@ -632,60 +657,373 @@ def _format_assessment_report(
632
657
  """
633
658
 
634
659
 
635
- def _count_cookbook_artifacts(cookbook_path) -> dict[str, int]:
636
- """Count basic cookbook artifacts (recipes, templates, files)."""
637
- recipes_dir = _safe_join(cookbook_path, "recipes")
660
+ def _count_cookbook_artifacts(cookbook_path: Path) -> dict[str, int]:
661
+ """Count comprehensive cookbook artifacts including all Chef components."""
662
+ # deepcode ignore PT: path normalized via _normalize_path in caller
663
+ cookbook_path = (
664
+ Path(cookbook_path) if not isinstance(cookbook_path, Path) else cookbook_path
665
+ )
666
+
667
+ # Basic directory counts
668
+ # cookbook_path already normalized by caller
669
+ recipes_dir = (
670
+ cookbook_path / "recipes"
671
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
638
672
  recipe_count = len(list(recipes_dir.glob("*.rb"))) if recipes_dir.exists() else 0
639
673
 
640
- templates_count = (
641
- len(list(_safe_join(cookbook_path, "templates").glob("*")))
642
- if _safe_join(cookbook_path, "templates").exists()
643
- else 0
674
+ templates_dir = (
675
+ cookbook_path / "templates"
676
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
677
+ template_count = (
678
+ len(list(templates_dir.glob("**/*.erb"))) if templates_dir.exists() else 0
679
+ )
680
+
681
+ files_dir = cookbook_path / "files"
682
+ file_count = len(list(files_dir.glob("**/*"))) if files_dir.exists() else 0
683
+
684
+ # Additional Chef components
685
+ attributes_dir = cookbook_path / "attributes"
686
+ attributes_count = (
687
+ len(list(attributes_dir.glob("*.rb"))) if attributes_dir.exists() else 0
644
688
  )
645
689
 
646
- files_count = (
647
- len(list(_safe_join(cookbook_path, "files").glob("*")))
648
- if _safe_join(cookbook_path, "files").exists()
649
- else 0
690
+ libraries_dir = cookbook_path / "libraries"
691
+ libraries_count = (
692
+ len(list(libraries_dir.glob("*.rb"))) if libraries_dir.exists() else 0
650
693
  )
651
694
 
695
+ definitions_dir = cookbook_path / "definitions"
696
+ definitions_count = (
697
+ len(list(definitions_dir.glob("*.rb"))) if definitions_dir.exists() else 0
698
+ )
699
+
700
+ resources_dir = cookbook_path / "resources"
701
+ resources_count = (
702
+ len(list(resources_dir.glob("*.rb"))) if resources_dir.exists() else 0
703
+ )
704
+
705
+ providers_dir = cookbook_path / "providers"
706
+ providers_count = (
707
+ len(list(providers_dir.glob("*.rb"))) if providers_dir.exists() else 0
708
+ )
709
+
710
+ # Configuration files
711
+ has_berksfile = (cookbook_path / "Berksfile").exists()
712
+ has_chefignore = (cookbook_path / "chefignore").exists()
713
+ has_thorfile = (cookbook_path / "Thorfile").exists()
714
+ has_kitchen_yml = (cookbook_path / ".kitchen.yml").exists() or (
715
+ cookbook_path / "kitchen.yml"
716
+ ).exists()
717
+ has_test_dir = (cookbook_path / "test").exists() or (
718
+ cookbook_path / "spec"
719
+ ).exists()
720
+
652
721
  return {
653
722
  "recipe_count": recipe_count,
654
- "templates": templates_count,
655
- "files": files_count,
723
+ "template_count": template_count,
724
+ "file_count": file_count,
725
+ "attributes_count": attributes_count,
726
+ "libraries_count": libraries_count,
727
+ "definitions_count": definitions_count,
728
+ "resources_count": resources_count,
729
+ "providers_count": providers_count,
730
+ "has_berksfile": int(has_berksfile),
731
+ "has_chefignore": int(has_chefignore),
732
+ "has_thorfile": int(has_thorfile),
733
+ "has_kitchen_yml": int(has_kitchen_yml),
734
+ "has_test_dir": int(has_test_dir),
656
735
  }
657
736
 
658
737
 
659
- def _analyse_recipe_complexity(cookbook_path) -> dict[str, int]:
660
- """Analyse recipe files for resource counts, Ruby blocks, and custom resources."""
661
- recipes_dir = _safe_join(cookbook_path, "recipes")
738
+ def _analyse_recipe_complexity(cookbook_path: Path) -> dict[str, int]:
739
+ """Analyse recipe files and other cookbook components for resource counts, Ruby blocks, and custom resources."""
740
+ # deepcode ignore PT: path normalized via _normalize_path in caller
741
+ cookbook_path = (
742
+ Path(cookbook_path) if not isinstance(cookbook_path, Path) else cookbook_path
743
+ )
744
+
662
745
  resource_count = 0
663
746
  custom_resources = 0
664
747
  ruby_blocks = 0
748
+ erb_templates = 0
749
+ attribute_complexity = 0
750
+ library_complexity = 0
751
+ definition_count = 0
752
+
753
+ # Analyze different cookbook components
754
+ resource_count, ruby_blocks, custom_resources = _analyze_recipes(cookbook_path)
755
+ attribute_complexity = _analyze_attributes(cookbook_path)
756
+ erb_templates = _analyze_templates(cookbook_path)
757
+ library_complexity = _analyze_libraries(cookbook_path)
758
+ definition_count = _count_definitions(cookbook_path)
759
+
760
+ return {
761
+ "resource_count": resource_count,
762
+ "custom_resources": custom_resources,
763
+ "ruby_blocks": ruby_blocks,
764
+ "erb_templates": erb_templates,
765
+ "attribute_complexity": attribute_complexity,
766
+ "library_complexity": library_complexity,
767
+ "definition_count": definition_count,
768
+ }
769
+
770
+
771
+ def _analyze_recipes(cookbook_path: Path) -> tuple[int, int, int]:
772
+ """Analyze recipe files for resources, ruby blocks, and custom resources."""
773
+ resource_count = 0
774
+ ruby_blocks = 0
775
+ custom_resources = 0
665
776
 
777
+ # cookbook_path already normalized by caller
778
+ recipes_dir = (
779
+ cookbook_path / "recipes"
780
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
666
781
  if recipes_dir.exists():
667
782
  for recipe_file in recipes_dir.glob("*.rb"):
668
- with recipe_file.open("r", encoding="utf-8", errors="ignore") as f:
669
- content = f.read()
783
+ try:
784
+ content = recipe_file.read_text(encoding="utf-8", errors="ignore")
670
785
  # Count Chef resources
671
- resources = len(
672
- re.findall(r'\w{1,100}\s+[\'"]([^\'"]{0,200})[\'"]\s+do', content)
673
- )
786
+ resources = len(RESOURCE_BLOCK_PATTERN.findall(content))
674
787
  ruby_blocks += len(
675
- re.findall(r"ruby_block|execute|bash", content, re.IGNORECASE)
788
+ re.findall(
789
+ r"ruby_block|execute|bash|script", content, re.IGNORECASE
790
+ )
676
791
  )
677
792
  custom_resources += len(
678
793
  re.findall(
679
- r"custom_resource|provides|use_inline_resources", content
794
+ r"custom_resource|provides|use_inline_resources|lwrp_resource",
795
+ content,
680
796
  )
681
797
  )
682
798
  resource_count += resources
799
+ except Exception:
800
+ continue
801
+
802
+ return resource_count, ruby_blocks, custom_resources
803
+
804
+
805
+ def _analyze_attributes(cookbook_path: Path) -> int:
806
+ """Analyze attribute files for complexity."""
807
+ attribute_complexity = 0
808
+
809
+ attributes_dir = (
810
+ cookbook_path / "attributes"
811
+ ) # deepcode ignore PT: path normalized via _normalize_path
812
+ if attributes_dir.exists():
813
+ for attr_file in attributes_dir.glob("*.rb"):
814
+ try:
815
+ content = attr_file.read_text(encoding="utf-8", errors="ignore")
816
+ # Count attribute assignments and complex expressions
817
+ # Use simpler regex patterns to avoid ReDoS vulnerabilities
818
+ assignments = len(
819
+ re.findall(r"^\s*\w+\s*(?:\[\w*\])?\s*=", content, re.MULTILINE)
820
+ )
821
+ complex_expressions = len(
822
+ re.findall(r"(?:node|default|override)\[", content)
823
+ )
824
+ attribute_complexity += assignments + complex_expressions
825
+ except Exception:
826
+ continue
827
+
828
+ return attribute_complexity
829
+
830
+
831
+ def _analyze_templates(cookbook_path: Path) -> int:
832
+ """Analyze template files for ERB complexity."""
833
+ erb_templates = 0
834
+
835
+ # cookbook_path already normalized by caller
836
+ templates_dir = (
837
+ cookbook_path / "templates"
838
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
839
+ if templates_dir.exists():
840
+ for template_file in templates_dir.glob("**/*.erb"):
841
+ try:
842
+ content = template_file.read_text(encoding="utf-8", errors="ignore")
843
+ # Count ERB expressions and complex logic
844
+ erb_expressions = len(re.findall(r"<%.*?%>", content))
845
+ erb_templates += erb_expressions
846
+ except Exception:
847
+ continue
848
+
849
+ return erb_templates
850
+
851
+
852
+ def _analyze_libraries(cookbook_path: Path) -> int:
853
+ """Analyze library files for complexity."""
854
+ library_complexity = 0
855
+
856
+ # cookbook_path already normalized by caller
857
+ libraries_dir = (
858
+ cookbook_path / "libraries"
859
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
860
+ if libraries_dir.exists():
861
+ for lib_file in libraries_dir.glob("*.rb"):
862
+ try:
863
+ content = lib_file.read_text(encoding="utf-8", errors="ignore")
864
+ # Count class definitions, methods, and complex Ruby constructs
865
+ classes = len(re.findall(r"class\s+\w+", content))
866
+ methods = len(re.findall(r"def\s+\w+", content))
867
+ library_complexity += classes * 2 + methods
868
+ except Exception:
869
+ continue
870
+
871
+ return library_complexity
872
+
873
+
874
+ def _count_definitions(cookbook_path: Path) -> int:
875
+ """Count definition files."""
876
+ # cookbook_path already normalized by caller
877
+ definitions_dir = (
878
+ cookbook_path / "definitions"
879
+ ) # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
880
+ if definitions_dir.exists():
881
+ return len(list(definitions_dir.glob("*.rb")))
882
+ return 0
883
+
884
+
885
+ def _parse_berksfile(cookbook_path: Path) -> dict[str, Any]:
886
+ """Parse Berksfile for dependency information."""
887
+ # deepcode ignore PT: path normalized via _normalize_path in caller
888
+ cookbook_path = (
889
+ Path(cookbook_path) if not isinstance(cookbook_path, Path) else cookbook_path
890
+ )
891
+ berksfile = cookbook_path / "Berksfile"
683
892
 
684
- return {
685
- "resource_count": resource_count,
686
- "custom_resources": custom_resources,
687
- "ruby_blocks": ruby_blocks,
688
- }
893
+ if not berksfile.exists():
894
+ return {"dependencies": [], "external_cookbooks": [], "complexity": 0}
895
+
896
+ try:
897
+ content = berksfile.read_text(encoding="utf-8", errors="ignore")
898
+
899
+ # Parse cookbook dependencies
900
+ cookbook_deps = re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]', content)
901
+ external_deps = re.findall(
902
+ r'cookbook\s+[\'"]([^\'"]+)[\'"]\s*,\s*[\'"]([^\'"]+)[\'"]', content
903
+ )
904
+
905
+ # Count complex dependency specifications (with version constraints, git sources, etc.)
906
+ complex_deps = len(re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]\s*,', content))
907
+ git_sources = len(re.findall(r"git:", content))
908
+ path_sources = len(re.findall(r"path:", content))
909
+
910
+ return {
911
+ "dependencies": cookbook_deps,
912
+ "external_cookbooks": [dep[0] for dep in external_deps],
913
+ "complexity": complex_deps + git_sources * 2 + path_sources * 2,
914
+ "has_git_sources": git_sources > 0,
915
+ "has_path_sources": path_sources > 0,
916
+ }
917
+ except Exception:
918
+ return {"dependencies": [], "external_cookbooks": [], "complexity": 0}
919
+
920
+
921
+ def _parse_chefignore(cookbook_path) -> dict[str, Any]:
922
+ """Parse chefignore file for ignore patterns."""
923
+ cookbook_path = Path(cookbook_path)
924
+ chefignore = cookbook_path / "chefignore"
925
+
926
+ if not chefignore.exists():
927
+ return {"patterns": [], "complexity": 0}
928
+
929
+ try:
930
+ content = chefignore.read_text(encoding="utf-8", errors="ignore")
931
+ lines = [
932
+ line.strip()
933
+ for line in content.split("\n")
934
+ if line.strip() and not line.startswith("#")
935
+ ]
936
+
937
+ # Count complex patterns (wildcards, directories, etc.)
938
+ wildcard_patterns = len([p for p in lines if "*" in p or "?" in p])
939
+ directory_patterns = len([p for p in lines if p.endswith("/") or "/" in p])
940
+
941
+ return {
942
+ "patterns": lines,
943
+ "pattern_count": len(lines),
944
+ "complexity": wildcard_patterns + directory_patterns,
945
+ "has_wildcards": wildcard_patterns > 0,
946
+ }
947
+ except Exception:
948
+ return {"patterns": [], "complexity": 0}
949
+
950
+
951
+ def _parse_thorfile(cookbook_path) -> dict[str, Any]:
952
+ """Parse Thorfile for Thor tasks."""
953
+ cookbook_path = Path(cookbook_path)
954
+ thorfile = cookbook_path / "Thorfile"
955
+
956
+ if not thorfile.exists():
957
+ return {"tasks": [], "complexity": 0}
958
+
959
+ try:
960
+ content = thorfile.read_text(encoding="utf-8", errors="ignore")
961
+
962
+ # Count Thor tasks and methods
963
+ tasks = len(re.findall(r'desc\s+[\'"]([^\'"]+)[\'"]', content))
964
+ methods = len(re.findall(r"def\s+\w+", content))
965
+
966
+ return {
967
+ "tasks": tasks,
968
+ "methods": methods,
969
+ "complexity": tasks + methods,
970
+ "has_tasks": tasks > 0,
971
+ }
972
+ except Exception:
973
+ return {"tasks": [], "complexity": 0}
974
+
975
+
976
+ def _parse_metadata_file(cookbook_path) -> dict[str, Any]:
977
+ """Parse metadata.rb for cookbook information."""
978
+ cookbook_path = Path(cookbook_path)
979
+ metadata_file = cookbook_path / "metadata.rb"
980
+
981
+ if not metadata_file.exists():
982
+ return {
983
+ "name": "",
984
+ "version": "",
985
+ "dependencies": [],
986
+ "supports": [],
987
+ "complexity": 0,
988
+ }
989
+
990
+ try:
991
+ content = metadata_file.read_text(encoding="utf-8", errors="ignore")
992
+
993
+ # Extract basic metadata
994
+ name_match = re.search(r'name\s+[\'"]([^\'"]+)[\'"]', content)
995
+ version_match = re.search(r'version\s+[\'"]([^\'"]+)[\'"]', content)
996
+
997
+ # Parse dependencies
998
+ depends_matches = re.findall(r'depends\s+[\'"]([^\'"]+)[\'"]', content)
999
+
1000
+ # Parse supported platforms
1001
+ supports_matches = re.findall(r'supports\s+[\'"]([^\'"]+)[\'"]', content)
1002
+
1003
+ # Count complex metadata (recipes, attributes, etc.)
1004
+ recipes = len(re.findall(r'recipe\s+[\'"]([^\'"]+)[\'"]', content))
1005
+ attributes = len(re.findall(r'attribute\s+[\'"]([^\'"]+)[\'"]', content))
1006
+
1007
+ return {
1008
+ "name": name_match.group(1) if name_match else "",
1009
+ "version": version_match.group(1) if version_match else "",
1010
+ "dependencies": depends_matches,
1011
+ "supports": supports_matches,
1012
+ "recipes": recipes,
1013
+ "attributes": attributes,
1014
+ "complexity": len(depends_matches)
1015
+ + len(supports_matches)
1016
+ + recipes
1017
+ + attributes,
1018
+ }
1019
+ except Exception:
1020
+ return {
1021
+ "name": "",
1022
+ "version": "",
1023
+ "dependencies": [],
1024
+ "supports": [],
1025
+ "complexity": 0,
1026
+ }
689
1027
 
690
1028
 
691
1029
  def _calculate_complexity_score(metrics: dict[str, int]) -> int:
@@ -698,8 +1036,8 @@ def _calculate_complexity_score(metrics: dict[str, int]) -> int:
698
1036
  "resource_density": min(resource_count / max(recipe_count, 1) * 5, 25),
699
1037
  "custom_resources": metrics["custom_resources"] * 10,
700
1038
  "ruby_blocks": metrics["ruby_blocks"] * 5,
701
- "templates": min(metrics["templates"] * 2, 15),
702
- "files": min(metrics["files"] * 1, 10),
1039
+ "templates": min(metrics["template_count"] * 2, 15),
1040
+ "files": min(metrics["file_count"] * 1, 10),
703
1041
  }
704
1042
 
705
1043
  return int(sum(complexity_factors.values()))
@@ -744,13 +1082,15 @@ def _assess_single_cookbook(cookbook_path: Path) -> dict:
744
1082
  recipe_complexity = _analyse_recipe_complexity(cookbook)
745
1083
  metrics = {**artifact_counts, **recipe_complexity}
746
1084
 
747
- # Calculate complexity and effort
1085
+ # Calculate complexity score using existing function
748
1086
  complexity_score = _calculate_complexity_score(metrics)
749
- # More realistic effort: 0.5-2 hours per recipe with AI assistance
750
- # Base: 1 hour per recipe = 0.125 days (8-hour day)
751
- base_effort = metrics["recipe_count"] * 0.125 # 0.125 days per recipe
752
- complexity_multiplier = 1 + (complexity_score / 100)
753
- estimated_effort = round(base_effort * complexity_multiplier, 1)
1087
+
1088
+ # Use centralized EffortMetrics for consistent calculations
1089
+ effort_metrics = estimate_effort_for_complexity(
1090
+ complexity_score=complexity_score,
1091
+ resource_count=metrics["recipe_count"],
1092
+ )
1093
+ estimated_effort = effort_metrics.estimated_days
754
1094
 
755
1095
  # Build assessment
756
1096
  return {
@@ -772,7 +1112,7 @@ def _format_overall_metrics(metrics: dict) -> str:
772
1112
  • Total Resources: {metrics["total_resources"]}
773
1113
  • Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
774
1114
  • Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
775
- • Estimated Duration: {max(1, int(metrics["estimated_effort_days"] / 2))}-{max(2, int(metrics["estimated_effort_days"]))} weeks (with 2-4 parallel engineers)"""
1115
+ • Estimated Duration: {EffortMetrics(metrics["estimated_effort_days"]).estimated_weeks_range}"""
776
1116
 
777
1117
 
778
1118
  def _format_cookbook_assessments(assessments: list) -> str:
@@ -859,7 +1199,9 @@ def _generate_migration_recommendations_from_assessment(
859
1199
 
860
1200
  # Complexity-based recommendations
861
1201
  avg_complexity = metrics.get("avg_complexity", 0)
862
- if avg_complexity > 60:
1202
+ complexity_level = categorize_complexity(avg_complexity)
1203
+
1204
+ if complexity_level == ComplexityLevel.HIGH:
863
1205
  recommendations.append(
864
1206
  "• Consider phased migration approach due to high complexity"
865
1207
  )
@@ -867,9 +1209,12 @@ def _generate_migration_recommendations_from_assessment(
867
1209
  "• Allocate additional time for custom resource conversion"
868
1210
  )
869
1211
  recommendations.append("• Plan for comprehensive testing and validation")
870
- else:
1212
+ elif complexity_level == ComplexityLevel.LOW:
871
1213
  recommendations.append("• Standard migration timeline should be sufficient")
872
1214
  recommendations.append("• Consider big-bang approach for faster delivery")
1215
+ else:
1216
+ recommendations.append("• Balanced approach recommended for medium complexity")
1217
+ recommendations.append("• Plan iterative validation checkpoints")
873
1218
 
874
1219
  # Effort-based recommendations
875
1220
  total_effort = metrics["estimated_effort_days"]
@@ -899,6 +1244,7 @@ def _create_migration_roadmap(assessments: list) -> str:
899
1244
  # Sort cookbooks by complexity (low to high for easier wins first)
900
1245
  sorted_cookbooks = sorted(assessments, key=lambda x: x["complexity_score"])
901
1246
 
1247
+ # Use complexity thresholds from metrics module for consistency
902
1248
  phases = {
903
1249
  "Phase 1 - Foundation (Weeks 1-2)": [
904
1250
  "Set up Ansible/AWX environment",
@@ -917,17 +1263,19 @@ def _create_migration_roadmap(assessments: list) -> str:
917
1263
  ],
918
1264
  }
919
1265
 
920
- # Distribute cookbooks across phases
1266
+ # Distribute cookbooks across phases using centralized complexity thresholds
921
1267
  for cookbook in sorted_cookbooks:
922
- if cookbook["complexity_score"] < 30:
1268
+ complexity_level = categorize_complexity(cookbook["complexity_score"])
1269
+
1270
+ if complexity_level == ComplexityLevel.LOW:
923
1271
  phases["Phase 2 - Low Complexity Migration (Weeks 3-5)"].append(
924
1272
  f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
925
1273
  )
926
- elif cookbook["complexity_score"] < 70:
1274
+ elif complexity_level == ComplexityLevel.MEDIUM:
927
1275
  phases["Phase 3 - Medium Complexity Migration (Weeks 6-9)"].append(
928
1276
  f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
929
1277
  )
930
- else:
1278
+ else: # HIGH
931
1279
  phases["Phase 4 - High Complexity Migration (Weeks 10-12)"].append(
932
1280
  f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
933
1281
  )
@@ -1556,26 +1904,869 @@ def _format_validation_results_text(
1556
1904
  return "\n".join(output_lines)
1557
1905
 
1558
1906
 
1559
- def _format_validation_results_summary(
1560
- conversion_type: str, summary: dict[str, int]
1907
+ def assess_single_cookbook_with_ai(
1908
+ cookbook_path: str,
1909
+ ai_provider: str = "anthropic",
1910
+ api_key: str = "",
1911
+ model: str = "claude-3-5-sonnet-20241022",
1912
+ temperature: float = 0.3,
1913
+ max_tokens: int = 2000,
1914
+ project_id: str = "",
1915
+ base_url: str = "",
1916
+ ) -> dict[str, Any]:
1917
+ """
1918
+ Assess a single Chef cookbook using AI analysis.
1919
+
1920
+ Args:
1921
+ cookbook_path: Path to the Chef cookbook directory
1922
+ ai_provider: AI provider (anthropic, openai, watson)
1923
+ api_key: API key for the AI provider
1924
+ model: AI model to use
1925
+ temperature: AI temperature setting
1926
+ max_tokens: Maximum tokens for AI response
1927
+ project_id: Project ID for IBM Watsonx (required for watson provider)
1928
+ base_url: Custom base URL for the AI provider
1929
+
1930
+ Returns:
1931
+ Dictionary containing assessment data with complexity, recommendations, etc.
1932
+
1933
+ """
1934
+ try:
1935
+ cookbook_path_obj = _normalize_path(cookbook_path)
1936
+ if not cookbook_path_obj.exists():
1937
+ return {"error": f"Cookbook path not found: {cookbook_path}"}
1938
+
1939
+ # Check if AI is available
1940
+ ai_available = _is_ai_available(ai_provider, api_key)
1941
+ if not ai_available:
1942
+ # Fall back to rule-based analysis
1943
+ return parse_chef_migration_assessment(cookbook_path)
1944
+
1945
+ # Get AI-enhanced assessment
1946
+ assessment = _assess_single_cookbook_with_ai(
1947
+ cookbook_path_obj,
1948
+ ai_provider,
1949
+ api_key,
1950
+ model,
1951
+ temperature,
1952
+ max_tokens,
1953
+ project_id,
1954
+ base_url,
1955
+ )
1956
+
1957
+ # Convert to the format expected by the UI
1958
+ complexity_level = "Low"
1959
+ if assessment["complexity_score"] > 70:
1960
+ complexity_level = "High"
1961
+ elif assessment["complexity_score"] > 30:
1962
+ complexity_level = "Medium"
1963
+
1964
+ return {
1965
+ "complexity": complexity_level,
1966
+ "estimated_hours": EffortMetrics(
1967
+ assessment["estimated_effort_days"]
1968
+ ).estimated_hours,
1969
+ "recommendations": assessment.get(
1970
+ "ai_insights", "AI-enhanced analysis completed"
1971
+ ),
1972
+ }
1973
+
1974
+ except Exception as e:
1975
+ return {
1976
+ "error": format_error_with_context(
1977
+ e, "assessing single cookbook with AI", cookbook_path
1978
+ )
1979
+ }
1980
+
1981
+
1982
+ def assess_chef_migration_complexity_with_ai(
1983
+ cookbook_paths: str,
1984
+ migration_scope: str = "full",
1985
+ target_platform: str = "ansible_awx",
1986
+ ai_provider: str = "anthropic",
1987
+ api_key: str = "",
1988
+ model: str = "claude-3-5-sonnet-20241022",
1989
+ temperature: float = 0.3,
1990
+ max_tokens: int = 2000,
1991
+ project_id: str = "",
1992
+ base_url: str = "",
1561
1993
  ) -> str:
1562
1994
  """
1563
- Format validation results as summary.
1995
+ Assess the complexity of migrating Chef cookbooks to Ansible using AI analysis.
1996
+
1997
+ This function uses AI to provide more intelligent analysis of cookbook complexity,
1998
+ migration challenges, and recommendations. Falls back to rule-based analysis
1999
+ if AI is not available.
1564
2000
 
1565
2001
  Args:
1566
- conversion_type: Type of conversion.
1567
- summary: Summary of validation results.
2002
+ cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
2003
+ migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
2004
+ target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
2005
+ ai_provider: AI provider (anthropic, openai, watson)
2006
+ api_key: API key for the AI provider
2007
+ model: AI model to use
2008
+ temperature: AI temperature setting
2009
+ max_tokens: Maximum tokens for AI response
2010
+ project_id: Project ID for IBM Watsonx (required for watson provider)
2011
+ base_url: Custom base URL for the AI provider
2012
+
2013
+ Returns:
2014
+ Comprehensive migration complexity assessment with AI-enhanced recommendations
2015
+
2016
+ """
2017
+ try:
2018
+ # Validate and parse inputs
2019
+ error_msg = _validate_assessment_inputs(
2020
+ cookbook_paths, migration_scope, target_platform
2021
+ )
2022
+ if error_msg:
2023
+ return error_msg
2024
+
2025
+ # Check if AI is available
2026
+ ai_available = _is_ai_available(ai_provider, api_key)
2027
+ if not ai_available:
2028
+ # Fall back to rule-based analysis
2029
+ return assess_chef_migration_complexity(
2030
+ cookbook_paths, migration_scope, target_platform
2031
+ )
2032
+
2033
+ # Process cookbook analysis with AI
2034
+ return _process_cookbook_assessment_with_ai(
2035
+ cookbook_paths,
2036
+ migration_scope,
2037
+ target_platform,
2038
+ ai_provider,
2039
+ api_key,
2040
+ model,
2041
+ temperature,
2042
+ max_tokens,
2043
+ project_id,
2044
+ base_url,
2045
+ )
2046
+
2047
+ except Exception as e:
2048
+ return format_error_with_context(
2049
+ e, "assessing Chef migration complexity with AI", cookbook_paths
2050
+ )
2051
+
2052
+
2053
+ def _is_ai_available(ai_provider: str, api_key: str) -> bool:
2054
+ """Check if AI analysis is available."""
2055
+ if not api_key:
2056
+ return False
2057
+
2058
+ if ai_provider == "anthropic" or ai_provider == "openai":
2059
+ return requests is not None
2060
+ elif ai_provider == "watson":
2061
+ return APIClient is not None
2062
+ else:
2063
+ return False
2064
+
2065
+
2066
+ def _process_cookbook_assessment_with_ai(
2067
+ cookbook_paths: str,
2068
+ migration_scope: str,
2069
+ target_platform: str,
2070
+ ai_provider: str,
2071
+ api_key: str,
2072
+ model: str,
2073
+ temperature: float,
2074
+ max_tokens: int,
2075
+ project_id: str = "",
2076
+ base_url: str = "",
2077
+ ) -> str:
2078
+ """Process the cookbook assessment workflow with AI analysis."""
2079
+ # Parse cookbook paths (may be empty if none exist)
2080
+ valid_paths = _parse_cookbook_paths(cookbook_paths)
2081
+
2082
+ # Analyze all cookbooks with AI enhancement
2083
+ cookbook_assessments, overall_metrics = _analyse_cookbook_metrics_with_ai(
2084
+ valid_paths,
2085
+ ai_provider,
2086
+ api_key,
2087
+ model,
2088
+ temperature,
2089
+ max_tokens,
2090
+ project_id,
2091
+ base_url,
2092
+ )
2093
+
2094
+ # Generate AI-enhanced recommendations and reports
2095
+ recommendations = _generate_ai_migration_recommendations(
2096
+ cookbook_assessments,
2097
+ overall_metrics,
2098
+ target_platform,
2099
+ ai_provider,
2100
+ api_key,
2101
+ model,
2102
+ temperature,
2103
+ max_tokens,
2104
+ project_id,
2105
+ base_url,
2106
+ )
2107
+ roadmap = _create_ai_migration_roadmap(
2108
+ cookbook_assessments,
2109
+ ai_provider,
2110
+ api_key,
2111
+ model,
2112
+ temperature,
2113
+ max_tokens,
2114
+ project_id,
2115
+ base_url,
2116
+ )
2117
+
2118
+ # Format final assessment report
2119
+ return _format_ai_assessment_report(
2120
+ migration_scope,
2121
+ target_platform,
2122
+ overall_metrics,
2123
+ cookbook_assessments,
2124
+ recommendations,
2125
+ roadmap,
2126
+ )
2127
+
2128
+
2129
+ def _analyse_cookbook_metrics_with_ai(
2130
+ valid_paths: list[Any],
2131
+ ai_provider: str,
2132
+ api_key: str,
2133
+ model: str,
2134
+ temperature: float,
2135
+ max_tokens: int,
2136
+ project_id: str = "",
2137
+ base_url: str = "",
2138
+ ) -> tuple[list[Any], dict[str, int]]:
2139
+ """
2140
+ Analyse metrics for all cookbooks with AI enhancement.
2141
+
2142
+ Args:
2143
+ valid_paths: List of valid cookbook paths
2144
+ ai_provider: AI provider name
2145
+ api_key: API key
2146
+ model: AI model
2147
+ temperature: AI temperature
2148
+ max_tokens: Max tokens
2149
+ project_id: Project ID for IBM Watsonx (required for watson provider)
2150
+ base_url: Custom base URL for the AI provider
2151
+
2152
+ Returns:
2153
+ Tuple of (cookbook_assessments, overall_metrics)
2154
+
2155
+ """
2156
+ cookbook_assessments = []
2157
+ overall_metrics = {
2158
+ "total_cookbooks": 0,
2159
+ "total_recipes": 0,
2160
+ "total_resources": 0,
2161
+ "complexity_score": 0,
2162
+ "estimated_effort_days": 0,
2163
+ }
2164
+
2165
+ for cookbook_path in valid_paths:
2166
+ # deepcode ignore PT: path normalized via _normalize_path
2167
+ assessment = _assess_single_cookbook_with_ai(
2168
+ cookbook_path,
2169
+ ai_provider,
2170
+ api_key,
2171
+ model,
2172
+ temperature,
2173
+ max_tokens,
2174
+ project_id,
2175
+ base_url,
2176
+ )
2177
+ cookbook_assessments.append(assessment)
2178
+
2179
+ # Aggregate metrics
2180
+ overall_metrics["total_cookbooks"] += 1
2181
+ overall_metrics["total_recipes"] += assessment["metrics"]["recipe_count"]
2182
+ overall_metrics["total_resources"] += assessment["metrics"]["resource_count"]
2183
+ overall_metrics["complexity_score"] += assessment["complexity_score"]
2184
+ overall_metrics["estimated_effort_days"] += assessment["estimated_effort_days"]
2185
+
2186
+ # Calculate averages
2187
+ if cookbook_assessments:
2188
+ overall_metrics["avg_complexity"] = int(
2189
+ overall_metrics["complexity_score"] / len(cookbook_assessments)
2190
+ )
2191
+
2192
+ return cookbook_assessments, overall_metrics
2193
+
2194
+
2195
+ def _assess_single_cookbook_with_ai(
2196
+ cookbook_path: Path,
2197
+ ai_provider: str,
2198
+ api_key: str,
2199
+ model: str,
2200
+ temperature: float,
2201
+ max_tokens: int,
2202
+ project_id: str = "",
2203
+ base_url: str = "",
2204
+ ) -> dict:
2205
+ """Assess complexity of a single cookbook using AI analysis."""
2206
+ # cookbook_path is already normalized to a Path object
2207
+ cookbook = cookbook_path
2208
+
2209
+ # Collect basic metrics (same as rule-based)
2210
+ artifact_counts = _count_cookbook_artifacts(cookbook)
2211
+ recipe_complexity = _analyse_recipe_complexity(cookbook)
2212
+ metrics = {**artifact_counts, **recipe_complexity}
2213
+
2214
+ # Get AI analysis for this cookbook
2215
+ ai_analysis = _get_ai_cookbook_analysis(
2216
+ cookbook,
2217
+ metrics,
2218
+ ai_provider,
2219
+ api_key,
2220
+ model,
2221
+ temperature,
2222
+ max_tokens,
2223
+ project_id,
2224
+ base_url,
2225
+ )
2226
+
2227
+ # Use AI-provided complexity score if available, otherwise fall back to rule-based
2228
+ if ai_analysis and "complexity_score" in ai_analysis:
2229
+ complexity_score = ai_analysis["complexity_score"]
2230
+ else:
2231
+ complexity_score = _calculate_complexity_score(metrics)
2232
+
2233
+ # Use AI-provided effort estimate if available, otherwise fall back to rule-based
2234
+ if ai_analysis and "estimated_effort_days" in ai_analysis:
2235
+ estimated_effort = ai_analysis["estimated_effort_days"]
2236
+ else:
2237
+ base_effort = metrics["recipe_count"] * 0.125 # 0.125 days per recipe
2238
+ complexity_multiplier = 1 + (complexity_score / 100)
2239
+ estimated_effort = round(base_effort * complexity_multiplier, 1)
2240
+
2241
+ # Build assessment with AI insights
2242
+ assessment = {
2243
+ "cookbook_name": cookbook.name,
2244
+ "cookbook_path": str(cookbook),
2245
+ "metrics": metrics,
2246
+ "complexity_score": complexity_score,
2247
+ "estimated_effort_days": estimated_effort,
2248
+ "challenges": ai_analysis.get("challenges", [])
2249
+ if ai_analysis
2250
+ else _identify_migration_challenges(metrics, complexity_score),
2251
+ "migration_priority": ai_analysis.get(
2252
+ "migration_priority", _determine_migration_priority(complexity_score)
2253
+ )
2254
+ if ai_analysis
2255
+ else _determine_migration_priority(complexity_score),
2256
+ "ai_insights": ai_analysis.get("insights", "") if ai_analysis else "",
2257
+ "dependencies": [],
2258
+ }
2259
+
2260
+ return assessment
2261
+
2262
+
2263
+ def _get_ai_cookbook_analysis(
2264
+ cookbook_path: Path,
2265
+ metrics: dict,
2266
+ ai_provider: str,
2267
+ api_key: str,
2268
+ model: str,
2269
+ temperature: float,
2270
+ max_tokens: int,
2271
+ project_id: str = "",
2272
+ base_url: str = "",
2273
+ ) -> dict | None:
2274
+ """Get AI analysis for a single cookbook."""
2275
+ try:
2276
+ # Read key files for AI analysis
2277
+ recipe_content = _get_recipe_content_sample(cookbook_path)
2278
+ metadata_content = _get_metadata_content(cookbook_path)
2279
+
2280
+ # Prepare prompt for AI
2281
+ prompt = f"""Analyze this Chef cookbook for migration to Ansible. Provide a detailed assessment including:
2282
+
2283
+ 1. Complexity score (0-100, where 100 is most complex)
2284
+ 2. Estimated effort in days (realistic estimate for an experienced engineer)
2285
+ 3. Key migration challenges and risks
2286
+ 4. Migration priority (low/medium/high)
2287
+ 5. Specific insights about this cookbook's conversion difficulty
2288
+
2289
+ Cookbook: {cookbook_path.name}
2290
+ Basic metrics: {json.dumps(metrics, indent=2)}
2291
+
2292
+ Metadata:
2293
+ {metadata_content}
2294
+
2295
+ Sample recipe content:
2296
+ {recipe_content}
2297
+
2298
+ Provide your analysis in JSON format with keys: complexity_score, estimated_effort_days, challenges (array), migration_priority, insights."""
2299
+
2300
+ # Call AI API
2301
+ ai_response = _call_ai_api(
2302
+ prompt,
2303
+ ai_provider,
2304
+ api_key,
2305
+ model,
2306
+ temperature,
2307
+ max_tokens,
2308
+ project_id,
2309
+ base_url,
2310
+ )
2311
+
2312
+ if ai_response:
2313
+ # Parse JSON response
2314
+ try:
2315
+ parsed = json.loads(ai_response.strip())
2316
+ return dict(parsed) # Cast to dict to satisfy type checker
2317
+ except json.JSONDecodeError:
2318
+ # Try to extract JSON from response
2319
+ json_match = re.search(r"\{.*\}", ai_response, re.DOTALL)
2320
+ if json_match:
2321
+ parsed = json.loads(json_match.group())
2322
+ return dict(parsed) # Cast to dict to satisfy type checker
2323
+ else:
2324
+ return None
2325
+ return None
2326
+
2327
+ except Exception:
2328
+ # If AI analysis fails, return None to fall back to rule-based
2329
+ return None
2330
+
2331
+
2332
+ def _get_recipe_content_sample(cookbook_path: Path) -> str:
2333
+ """Get a sample of ALL recipe content for AI analysis."""
2334
+ recipes_dir = _safe_join(cookbook_path, "recipes")
2335
+ if not recipes_dir.exists():
2336
+ return "No recipes directory found"
2337
+
2338
+ recipe_files = list(recipes_dir.glob("*.rb"))
2339
+ if not recipe_files:
2340
+ return "No recipe files found"
2341
+
2342
+ # Read ALL recipe files, with reasonable size limits
2343
+ all_recipes_content = []
2344
+ total_chars = 0
2345
+ max_total_chars = 8000 # Increased limit to cover multiple recipes
2346
+
2347
+ for recipe_file in recipe_files:
2348
+ try:
2349
+ content = recipe_file.read_text(encoding="utf-8", errors="ignore")
2350
+ recipe_header = f"\n=== {recipe_file.name} ===\n"
2351
+
2352
+ # Add this recipe if we have room
2353
+ if total_chars + len(recipe_header) + len(content) < max_total_chars:
2354
+ all_recipes_content.append(recipe_header + content)
2355
+ total_chars += len(recipe_header) + len(content)
2356
+ else:
2357
+ # Add truncated version
2358
+ remaining = max_total_chars - total_chars - len(recipe_header)
2359
+ if remaining > 100:
2360
+ all_recipes_content.append(
2361
+ recipe_header + content[:remaining] + "..."
2362
+ )
2363
+ break
2364
+ except Exception:
2365
+ continue
2366
+
2367
+ if not all_recipes_content:
2368
+ return "Could not read recipe content"
2369
+
2370
+ return "\n".join(all_recipes_content)
2371
+
2372
+
2373
+ def _get_metadata_content(cookbook_path: Path) -> str:
2374
+ """Get metadata content for AI analysis."""
2375
+ metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
2376
+ if not metadata_file.exists():
2377
+ return "No metadata.rb found"
2378
+
2379
+ try:
2380
+ return metadata_file.read_text(encoding="utf-8", errors="ignore")
2381
+ except Exception:
2382
+ return "Could not read metadata"
2383
+
2384
+
2385
+ def _call_ai_api(
2386
+ prompt: str,
2387
+ ai_provider: str,
2388
+ api_key: str,
2389
+ model: str,
2390
+ temperature: float,
2391
+ max_tokens: int,
2392
+ project_id: str | None = None,
2393
+ base_url: str | None = None,
2394
+ ) -> str | None:
2395
+ """Call the AI API for analysis."""
2396
+ try:
2397
+ if ai_provider == "anthropic":
2398
+ return _call_anthropic_api(prompt, api_key, model, temperature, max_tokens)
2399
+ elif ai_provider == "openai":
2400
+ return _call_openai_api(prompt, api_key, model, temperature, max_tokens)
2401
+ elif ai_provider == "watson":
2402
+ return _call_watson_api(
2403
+ prompt, api_key, model, temperature, max_tokens, project_id, base_url
2404
+ )
2405
+ else:
2406
+ return None
2407
+ except Exception:
2408
+ return None
2409
+
2410
+
2411
+ def _call_anthropic_api(
2412
+ prompt: str, api_key: str, model: str, temperature: float, max_tokens: int
2413
+ ) -> str | None:
2414
+ """Call Anthropic Claude API."""
2415
+ if not requests:
2416
+ return None
2417
+
2418
+ try:
2419
+ response = requests.post(
2420
+ "https://api.anthropic.com/v1/messages",
2421
+ headers={
2422
+ "x-api-key": api_key,
2423
+ "anthropic-version": "2023-06-01",
2424
+ "content-type": "application/json",
2425
+ },
2426
+ json={
2427
+ "model": model,
2428
+ "max_tokens": max_tokens,
2429
+ "temperature": temperature,
2430
+ "system": "You are an expert in Chef to Ansible migration analysis. Provide accurate, detailed assessments.",
2431
+ "messages": [{"role": "user", "content": prompt}],
2432
+ },
2433
+ timeout=30,
2434
+ )
2435
+
2436
+ if response.status_code == 200:
2437
+ result = response.json()
2438
+ return str(result["content"][0]["text"])
2439
+ return None
2440
+ except Exception:
2441
+ return None
2442
+
2443
+
2444
+ def _call_openai_api(
2445
+ prompt: str, api_key: str, model: str, temperature: float, max_tokens: int
2446
+ ) -> str | None:
2447
+ """Call OpenAI API."""
2448
+ if not requests:
2449
+ return None
2450
+
2451
+ try:
2452
+ response = requests.post(
2453
+ "https://api.openai.com/v1/chat/completions",
2454
+ headers={
2455
+ "Authorization": f"Bearer {api_key}",
2456
+ "Content-Type": "application/json",
2457
+ },
2458
+ json={
2459
+ "model": model,
2460
+ "messages": [
2461
+ {
2462
+ "role": "system",
2463
+ "content": "You are an expert in Chef to Ansible migration analysis. Provide accurate, detailed assessments.",
2464
+ },
2465
+ {"role": "user", "content": prompt},
2466
+ ],
2467
+ "temperature": temperature,
2468
+ "max_tokens": max_tokens,
2469
+ },
2470
+ timeout=30,
2471
+ )
2472
+
2473
+ if response.status_code == 200:
2474
+ result = response.json()
2475
+ return str(result["choices"][0]["message"]["content"])
2476
+ return None
2477
+ except Exception:
2478
+ return None
2479
+
2480
+
2481
+ def _call_watson_api(
2482
+ prompt: str,
2483
+ api_key: str,
2484
+ model: str,
2485
+ temperature: float,
2486
+ max_tokens: int,
2487
+ project_id: str | None = None,
2488
+ base_url: str | None = None,
2489
+ ) -> str | None:
2490
+ """Call IBM Watsonx API."""
2491
+ if not APIClient:
2492
+ return None
2493
+
2494
+ try:
2495
+ # Initialize Watsonx API client
2496
+ client = (
2497
+ APIClient(api_key=api_key, url=base_url)
2498
+ if base_url
2499
+ else APIClient(api_key=api_key)
2500
+ )
2501
+
2502
+ # Prepare request parameters
2503
+ request_params = {
2504
+ "prompt": prompt,
2505
+ "max_tokens": max_tokens,
2506
+ "temperature": temperature,
2507
+ "model_id": model,
2508
+ "project_id": project_id,
2509
+ }
2510
+
2511
+ # Call Watsonx API for text generation
2512
+ response = (
2513
+ client.deployments.text_generation_stream(**request_params)
2514
+ if hasattr(client, "deployments")
2515
+ else None
2516
+ )
2517
+
2518
+ if response:
2519
+ # Collect streamed response
2520
+ generated_text = ""
2521
+ for chunk in response:
2522
+ if hasattr(chunk, "results") and chunk.results:
2523
+ generated_text += str(chunk.results[0].generated_text)
2524
+ return generated_text if generated_text else None
2525
+
2526
+ return None
2527
+ except Exception:
2528
+ return None
2529
+
2530
+
2531
+ def _generate_ai_migration_recommendations(
2532
+ assessments: list,
2533
+ metrics: dict,
2534
+ target_platform: str,
2535
+ ai_provider: str,
2536
+ api_key: str,
2537
+ model: str,
2538
+ temperature: float,
2539
+ max_tokens: int,
2540
+ project_id: str | None = None,
2541
+ base_url: str | None = None,
2542
+ ) -> str:
2543
+ """Generate AI-enhanced migration recommendations."""
2544
+ try:
2545
+ # Prepare assessment summary for AI
2546
+ assessment_summary = {
2547
+ "total_cookbooks": metrics["total_cookbooks"],
2548
+ "avg_complexity": metrics.get("avg_complexity", 0),
2549
+ "total_effort_days": metrics["estimated_effort_days"],
2550
+ "target_platform": target_platform,
2551
+ "cookbook_highlights": [
2552
+ {
2553
+ "name": a["cookbook_name"],
2554
+ "complexity": a["complexity_score"],
2555
+ "effort": a["estimated_effort_days"],
2556
+ "insights": a.get("ai_insights", ""),
2557
+ }
2558
+ for a in assessments[:5] # Top 5 cookbooks
2559
+ ],
2560
+ }
2561
+
2562
+ prompt = f"""Based on this Chef to Ansible migration assessment, provide specific, actionable recommendations:
2563
+
2564
+ Assessment Summary: {json.dumps(assessment_summary, indent=2)}
2565
+
2566
+ Provide recommendations covering:
2567
+ 1. Migration strategy (phased vs big-bang vs parallel)
2568
+ 2. Team composition and skills needed
2569
+ 3. Timeline considerations
2570
+ 4. Risk mitigation approaches
2571
+ 5. Platform-specific advice for {target_platform}
2572
+ 6. Priority ordering for cookbook migration
2573
+
2574
+ Format as a bulleted list of specific recommendations."""
2575
+
2576
+ ai_response = _call_ai_api(
2577
+ prompt,
2578
+ ai_provider,
2579
+ api_key,
2580
+ model,
2581
+ temperature,
2582
+ max_tokens,
2583
+ project_id,
2584
+ base_url,
2585
+ )
2586
+
2587
+ if ai_response:
2588
+ return ai_response
2589
+ else:
2590
+ # Fall back to rule-based recommendations
2591
+ return _generate_migration_recommendations_from_assessment(
2592
+ assessments, metrics, target_platform
2593
+ )
2594
+
2595
+ except Exception:
2596
+ # Fall back to rule-based recommendations
2597
+ return _generate_migration_recommendations_from_assessment(
2598
+ assessments, metrics, target_platform
2599
+ )
2600
+
2601
+
2602
+ def _create_ai_migration_roadmap(
2603
+ assessments: list,
2604
+ ai_provider: str,
2605
+ api_key: str,
2606
+ model: str,
2607
+ temperature: float,
2608
+ max_tokens: int,
2609
+ project_id: str | None = None,
2610
+ base_url: str | None = None,
2611
+ ) -> str:
2612
+ """Create AI-enhanced migration roadmap."""
2613
+ try:
2614
+ # Prepare cookbook complexity data for AI
2615
+ cookbook_data = [
2616
+ {
2617
+ "name": a["cookbook_name"],
2618
+ "complexity": a["complexity_score"],
2619
+ "effort": a["estimated_effort_days"],
2620
+ "priority": a["migration_priority"],
2621
+ "insights": a.get("ai_insights", ""),
2622
+ }
2623
+ for a in assessments
2624
+ ]
2625
+
2626
+ prompt = f"""Create a detailed migration roadmap for these Chef cookbooks. Consider complexity, dependencies, and migration priorities.
2627
+
2628
+ Cookbook Data: {json.dumps(cookbook_data, indent=2)}
2629
+
2630
+ Provide a phased migration plan with:
2631
+ 1. Phase breakdown (Foundation, Core Migration, Advanced, Finalization)
2632
+ 2. Week-by-week milestones
2633
+ 3. Success criteria for each phase
2634
+ 4. Risk mitigation strategies
2635
+ 5. Team resource allocation recommendations
2636
+
2637
+ Format as structured markdown with clear phases and timelines."""
2638
+
2639
+ ai_response = _call_ai_api(
2640
+ prompt,
2641
+ ai_provider,
2642
+ api_key,
2643
+ model,
2644
+ temperature,
2645
+ max_tokens,
2646
+ project_id,
2647
+ base_url,
2648
+ )
2649
+
2650
+ if ai_response:
2651
+ return ai_response
2652
+ else:
2653
+ # Fall back to rule-based roadmap
2654
+ return _create_migration_roadmap(assessments)
2655
+
2656
+ except Exception:
2657
+ # Fall back to rule-based roadmap
2658
+ return _create_migration_roadmap(assessments)
2659
+
2660
+
2661
+ def _format_ai_assessment_report(
2662
+ migration_scope: str,
2663
+ target_platform: str,
2664
+ overall_metrics: dict[str, int],
2665
+ cookbook_assessments: list[dict],
2666
+ recommendations: str,
2667
+ roadmap: str,
2668
+ ) -> str:
2669
+ """
2670
+ Format the AI-enhanced assessment report.
2671
+
2672
+ Args:
2673
+ migration_scope: Scope of migration
2674
+ target_platform: Target platform
2675
+ overall_metrics: Overall metrics dictionary
2676
+ cookbook_assessments: List of cookbook assessments
2677
+ recommendations: AI-generated recommendations
2678
+ roadmap: AI-generated roadmap
1568
2679
 
1569
2680
  Returns:
1570
- Formatted summary output.
2681
+ Formatted AI-enhanced report string
1571
2682
 
1572
2683
  """
1573
- return f"""# Validation Summary
2684
+ ai_indicator = "\n🤖 **AI-Enhanced Analysis**: This report includes AI-powered insights for more accurate complexity assessment and migration planning.\n"
2685
+
2686
+ return f"""# Chef to Ansible Migration Assessment (AI-Enhanced)
2687
+ # Scope: {migration_scope}
2688
+ # Target Platform: {target_platform}
2689
+ {ai_indicator}
2690
+
2691
+ ## Overall Migration Metrics:
2692
+ {_format_overall_metrics(overall_metrics)}
2693
+
2694
+ ## Cookbook Assessments:
2695
+ {_format_ai_cookbook_assessments(cookbook_assessments)}
2696
+
2697
+ ## Migration Complexity Analysis:
2698
+ {_format_ai_complexity_analysis(cookbook_assessments)}
2699
+
2700
+ ## AI-Generated Migration Recommendations:
2701
+ {recommendations}
2702
+
2703
+ ## AI-Generated Migration Roadmap:
2704
+ {roadmap}
1574
2705
 
1575
- Conversion Type: {conversion_type}
1576
- Errors: {summary["errors"]}
1577
- • Warnings: {summary["warnings"]}
1578
- • Info: {summary["info"]}
2706
+ ## Risk Assessment:
2707
+ {_assess_migration_risks(cookbook_assessments, target_platform)}
1579
2708
 
1580
- {"✅ No critical issues found!" if summary["errors"] == 0 else "❌ Critical issues found - review errors"}
2709
+ ## Resource Requirements:
2710
+ {_estimate_resource_requirements(overall_metrics, target_platform)}
1581
2711
  """
2712
+
2713
+
2714
+ def _format_ai_cookbook_assessments(assessments: list) -> str:
2715
+ """Format individual cookbook assessments with AI insights."""
2716
+ if not assessments:
2717
+ return "No cookbooks assessed."
2718
+
2719
+ def _get_priority_icon(priority: str) -> str:
2720
+ """Get priority icon based on migration priority level."""
2721
+ if priority == "high":
2722
+ return "🔴"
2723
+ elif priority == "medium":
2724
+ return "🟡"
2725
+ else:
2726
+ return "🟢"
2727
+
2728
+ formatted = []
2729
+ for assessment in assessments:
2730
+ priority_icon = _get_priority_icon(assessment["migration_priority"])
2731
+ ai_insights = assessment.get("ai_insights", "")
2732
+ insights_section = (
2733
+ f"\n 🤖 **AI Insights**: {ai_insights}" if ai_insights else ""
2734
+ )
2735
+
2736
+ formatted.append(f"""### {assessment["cookbook_name"]} {priority_icon}
2737
+ • Complexity Score: {assessment["complexity_score"]:.1f}/100
2738
+ • Estimated Effort: {assessment["estimated_effort_days"]} days
2739
+ • Recipes: {assessment["metrics"]["recipe_count"]}
2740
+ • Resources: {assessment["metrics"]["resource_count"]}
2741
+ • Custom Resources: {assessment["metrics"]["custom_resources"]}
2742
+ • Challenges: {len(assessment["challenges"])}{insights_section}""")
2743
+
2744
+ return "\n\n".join(formatted)
2745
+
2746
+
2747
+ def _format_ai_complexity_analysis(assessments: list) -> str:
2748
+ """Format AI-enhanced complexity analysis."""
2749
+ if not assessments:
2750
+ return "No complexity analysis available."
2751
+
2752
+ high_complexity = [a for a in assessments if a["complexity_score"] > 70]
2753
+ medium_complexity = [a for a in assessments if 30 <= a["complexity_score"] <= 70]
2754
+ low_complexity = [a for a in assessments if a["complexity_score"] < 30]
2755
+
2756
+ # Check for AI insights
2757
+ ai_insights_count = sum(1 for a in assessments if a.get("ai_insights"))
2758
+
2759
+ analysis = f"""• High Complexity (>70): {len(high_complexity)} cookbooks
2760
+ • Medium Complexity (30-70): {len(medium_complexity)} cookbooks
2761
+ • Low Complexity (<30): {len(low_complexity)} cookbooks
2762
+ • AI-Enhanced Assessments: {ai_insights_count}/{len(assessments)} cookbooks
2763
+
2764
+ **Top Migration Challenges:**
2765
+ {_identify_top_challenges(assessments)}
2766
+
2767
+ **AI Analysis Summary:**
2768
+ • {ai_insights_count} cookbooks received AI-powered complexity analysis
2769
+ • Enhanced accuracy for effort estimation and risk identification
2770
+ • Context-aware migration recommendations"""
2771
+
2772
+ return analysis