mcp-souschef 2.1.2__py3-none-any.whl → 2.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/server.py CHANGED
@@ -21,12 +21,16 @@ from souschef.assessment import (
21
21
  from souschef.assessment import (
22
22
  generate_migration_report as _generate_migration_report,
23
23
  )
24
+ from souschef.assessment import (
25
+ parse_chef_migration_assessment as _parse_chef_migration_assessment,
26
+ )
24
27
  from souschef.assessment import (
25
28
  validate_conversion as _validate_conversion,
26
29
  )
27
30
 
28
31
  # Import extracted modules
29
32
  # Import private helper functions still used in server.py
33
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
30
34
  from souschef.converters.habitat import ( # noqa: F401
31
35
  _add_service_build,
32
36
  _add_service_dependencies,
@@ -48,6 +52,7 @@ from souschef.converters.habitat import (
48
52
  )
49
53
 
50
54
  # Re-exports of playbook internal functions for backward compatibility (tests)
55
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
51
56
  from souschef.converters.playbook import ( # noqa: F401
52
57
  _add_general_recommendations,
53
58
  _convert_chef_block_to_ansible,
@@ -82,9 +87,8 @@ from souschef.converters.playbook import (
82
87
  from souschef.converters.playbook import (
83
88
  generate_dynamic_inventory_script as _generate_dynamic_inventory_script,
84
89
  )
85
- from souschef.converters.playbook import (
86
- generate_playbook_from_recipe as _generate_playbook_from_recipe,
87
- )
90
+
91
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
88
92
  from souschef.converters.resource import ( # noqa: F401
89
93
  _convert_chef_resource_to_ansible,
90
94
  _format_ansible_task,
@@ -97,6 +101,7 @@ from souschef.converters.resource import (
97
101
 
98
102
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
99
103
  # These imports are intentionally exposed for external test access
104
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
100
105
  from souschef.core.constants import ( # noqa: F401
101
106
  ACTION_TO_STATE,
102
107
  ANSIBLE_SERVICE_MODULE,
@@ -106,16 +111,21 @@ from souschef.core.constants import ( # noqa: F401
106
111
  )
107
112
 
108
113
  # Import core utilities
114
+ from souschef.core.errors import format_error_with_context
115
+
116
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
109
117
  from souschef.core.path_utils import _normalize_path, _safe_join # noqa: F401
110
118
 
111
119
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
112
120
  # These imports are intentionally exposed for external test access
121
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
113
122
  from souschef.core.ruby_utils import ( # noqa: F401
114
123
  _normalize_ruby_value,
115
124
  )
116
125
 
117
126
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
118
127
  # These imports are intentionally exposed for external test access
128
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
119
129
  from souschef.core.validation import ( # noqa: F401
120
130
  ValidationCategory,
121
131
  ValidationEngine,
@@ -127,6 +137,7 @@ from souschef.core.validation import ( # noqa: F401
127
137
  # Re-exports of deployment internal functions for backward compatibility (tests)
128
138
  # Public re-exports of deployment functions for test backward compatibility
129
139
  # Note: MCP tool wrappers exist for some of these, but tests import directly
140
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
130
141
  from souschef.deployment import ( # noqa: F401
131
142
  _analyze_cookbook_for_awx,
132
143
  _analyze_cookbooks_directory,
@@ -180,6 +191,8 @@ from souschef.deployment import (
180
191
  # Import filesystem operations
181
192
  from souschef.filesystem import list_directory as _list_directory
182
193
  from souschef.filesystem import read_file as _read_file
194
+
195
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
183
196
  from souschef.parsers.attributes import ( # noqa: F401
184
197
  _extract_attributes,
185
198
  _format_attributes,
@@ -190,6 +203,8 @@ from souschef.parsers.attributes import ( # noqa: F401
190
203
 
191
204
  # Import parser functions
192
205
  from souschef.parsers.attributes import parse_attributes as _parse_attributes
206
+
207
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
193
208
  from souschef.parsers.habitat import ( # noqa: F401
194
209
  _extract_plan_array,
195
210
  _extract_plan_exports,
@@ -202,13 +217,24 @@ from souschef.parsers.habitat import ( # noqa: F401
202
217
  from souschef.parsers.habitat import parse_habitat_plan as _parse_habitat_plan
203
218
 
204
219
  # Re-export InSpec internal functions for backward compatibility (tests)
220
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
205
221
  from souschef.parsers.inspec import ( # noqa: F401
206
222
  _convert_inspec_to_ansible_assert,
223
+ _convert_inspec_to_goss,
224
+ _convert_inspec_to_serverspec,
207
225
  _convert_inspec_to_testinfra,
208
226
  _extract_inspec_describe_blocks,
209
227
  _generate_inspec_from_resource,
210
228
  _parse_inspec_control,
211
229
  )
230
+ from souschef.parsers.inspec import (
231
+ convert_inspec_to_test as _convert_inspec_test,
232
+ )
233
+ from souschef.parsers.inspec import (
234
+ parse_inspec_profile as _parse_inspec,
235
+ )
236
+
237
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
212
238
  from souschef.parsers.metadata import ( # noqa: F401
213
239
  _extract_metadata,
214
240
  _format_cookbook_structure,
@@ -217,18 +243,27 @@ from souschef.parsers.metadata import ( # noqa: F401
217
243
  from souschef.parsers.metadata import (
218
244
  list_cookbook_structure as _list_cookbook_structure,
219
245
  )
246
+ from souschef.parsers.metadata import (
247
+ parse_cookbook_metadata as _parse_cookbook_metadata,
248
+ )
220
249
  from souschef.parsers.metadata import read_cookbook_metadata as _read_cookbook_metadata
250
+
251
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
221
252
  from souschef.parsers.recipe import ( # noqa: F401
222
253
  _extract_conditionals,
223
254
  _extract_resources,
224
255
  _format_resources,
225
256
  )
226
257
  from souschef.parsers.recipe import parse_recipe as _parse_recipe
258
+
259
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
227
260
  from souschef.parsers.resource import ( # noqa: F401
228
261
  _extract_resource_actions,
229
262
  _extract_resource_properties,
230
263
  )
231
264
  from souschef.parsers.resource import parse_custom_resource as _parse_custom_resource
265
+
266
+ # codeql[py/unused-import]: Backward compatibility exports for test suite
232
267
  from souschef.parsers.template import ( # noqa: F401
233
268
  _convert_erb_to_jinja2,
234
269
  _extract_code_block_variables,
@@ -330,6 +365,21 @@ def read_cookbook_metadata(path: str) -> str:
330
365
  return _read_cookbook_metadata(path)
331
366
 
332
367
 
368
+ @mcp.tool()
369
+ def parse_cookbook_metadata(path: str) -> dict[str, str | list[str]]:
370
+ """
371
+ Parse Chef cookbook metadata.rb file and return as dictionary.
372
+
373
+ Args:
374
+ path: Path to the metadata.rb file.
375
+
376
+ Returns:
377
+ Dictionary containing extracted metadata fields.
378
+
379
+ """
380
+ return _parse_cookbook_metadata(path)
381
+
382
+
333
383
  @mcp.tool()
334
384
  def parse_recipe(path: str) -> str:
335
385
  """
@@ -513,6 +563,7 @@ def _parse_controls_from_file(profile_path: Path) -> list[dict[str, Any]]:
513
563
  raise RuntimeError(f"Error reading file: {e}") from e
514
564
 
515
565
 
566
+ @mcp.tool()
516
567
  def parse_inspec_profile(path: str) -> str:
517
568
  """
518
569
  Parse an InSpec profile and extract controls.
@@ -524,93 +575,23 @@ def parse_inspec_profile(path: str) -> str:
524
575
  JSON string with parsed controls, or error message.
525
576
 
526
577
  """
527
- try:
528
- profile_path = _normalize_path(path)
529
-
530
- if not profile_path.exists():
531
- return f"Error: Path does not exist: {path}"
532
-
533
- if profile_path.is_dir():
534
- controls = _parse_controls_from_directory(profile_path)
535
- elif profile_path.is_file():
536
- controls = _parse_controls_from_file(profile_path)
537
- else:
538
- return f"Error: Invalid path type: {path}"
539
-
540
- return json.dumps(
541
- {
542
- "profile_path": str(profile_path),
543
- "controls_count": len(controls),
544
- "controls": controls,
545
- },
546
- indent=2,
547
- )
548
-
549
- except (FileNotFoundError, RuntimeError) as e:
550
- return f"Error: {e}"
551
- except Exception as e:
552
- return f"An error occurred while parsing InSpec profile: {e}"
578
+ return _parse_inspec(path)
553
579
 
554
580
 
555
581
  @mcp.tool()
556
582
  def convert_inspec_to_test(inspec_path: str, output_format: str = "testinfra") -> str:
557
583
  """
558
- Convert InSpec controls to Ansible test format.
584
+ Convert InSpec controls to test framework format.
559
585
 
560
586
  Args:
561
587
  inspec_path: Path to InSpec profile or control file.
562
- output_format: Output format ('testinfra' or 'ansible_assert').
588
+ output_format: Output format ('testinfra', 'ansible_assert', 'serverspec', or 'goss').
563
589
 
564
590
  Returns:
565
591
  Converted test code or error message.
566
592
 
567
593
  """
568
- try:
569
- # First parse the InSpec profile
570
- parse_result = parse_inspec_profile(inspec_path)
571
-
572
- # Check if parsing failed
573
- if parse_result.startswith(ERROR_PREFIX):
574
- return parse_result
575
-
576
- # Parse JSON result
577
- profile_data = json.loads(parse_result)
578
- controls = profile_data["controls"]
579
-
580
- if not controls:
581
- return "Error: No controls found in InSpec profile"
582
-
583
- # Convert each control
584
- converted_tests = []
585
-
586
- if output_format == "testinfra":
587
- converted_tests.append("import pytest")
588
- converted_tests.append("")
589
- converted_tests.append("")
590
- for control in controls:
591
- test_code = _convert_inspec_to_testinfra(control)
592
- converted_tests.append(test_code)
593
-
594
- elif output_format == "ansible_assert":
595
- converted_tests.append("---")
596
- converted_tests.append("# Validation tasks converted from InSpec")
597
- converted_tests.append("")
598
- for control in controls:
599
- assert_code = _convert_inspec_to_ansible_assert(control)
600
- converted_tests.append(assert_code)
601
- converted_tests.append("")
602
-
603
- else:
604
- error_msg = (
605
- f"Error: Unsupported format '{output_format}'. "
606
- "Use 'testinfra' or 'ansible_assert'"
607
- )
608
- return error_msg
609
-
610
- return "\n".join(converted_tests)
611
-
612
- except Exception as e:
613
- return f"An error occurred while converting InSpec: {e}"
594
+ return _convert_inspec_test(inspec_path, output_format)
614
595
 
615
596
 
616
597
  def _extract_resources_from_parse_result(parse_result: str) -> list[dict[str, Any]]:
@@ -696,7 +677,9 @@ def generate_inspec_from_recipe(recipe_path: str) -> str:
696
677
  return "\n".join(controls)
697
678
 
698
679
  except Exception as e:
699
- return f"An error occurred while generating InSpec controls: {e}"
680
+ return format_error_with_context(
681
+ e, "generating InSpec controls from recipe", recipe_path
682
+ )
700
683
 
701
684
 
702
685
  @mcp.tool()
@@ -724,11 +707,34 @@ def convert_chef_databag_to_vars(
724
707
  try:
725
708
  import yaml
726
709
 
710
+ # Validate inputs
711
+ if not databag_content or not databag_content.strip():
712
+ return (
713
+ "Error: Databag content cannot be empty\n\n"
714
+ "Suggestion: Provide valid JSON content from a Chef data bag"
715
+ )
716
+
717
+ if not databag_name or not databag_name.strip():
718
+ return (
719
+ "Error: Databag name cannot be empty\n\n"
720
+ "Suggestion: Provide a valid data bag name"
721
+ )
722
+
723
+ valid_scopes = ["group_vars", "host_vars", "playbook"]
724
+ if target_scope not in valid_scopes:
725
+ return (
726
+ f"Error: Invalid target scope '{target_scope}'\n\n"
727
+ f"Suggestion: Use one of {', '.join(valid_scopes)}"
728
+ )
729
+
727
730
  # Parse the data bag content
728
731
  try:
729
732
  parsed_databag = json.loads(databag_content)
730
733
  except json.JSONDecodeError as e:
731
- return f"Error: Invalid JSON format in data bag: {e}"
734
+ return (
735
+ f"Error: Invalid JSON format in data bag: {e}\n\n"
736
+ "Suggestion: Ensure the databag content is valid JSON"
737
+ )
732
738
 
733
739
  # Convert to Ansible variables format
734
740
  ansible_vars = _convert_databag_to_ansible_vars(
@@ -763,14 +769,95 @@ def convert_chef_databag_to_vars(
763
769
  {yaml_content.rstrip()}
764
770
  """
765
771
  except Exception as e:
766
- return f"Error converting data bag to Ansible variables: {e}"
772
+ return format_error_with_context(
773
+ e, f"converting data bag '{databag_name}' to Ansible variables"
774
+ )
767
775
 
768
776
 
769
777
  @mcp.tool()
778
+ def _validate_databags_directory(
779
+ databags_directory: str,
780
+ ) -> tuple[Path | None, str | None]:
781
+ """
782
+ Validate databags directory input.
783
+
784
+ Args:
785
+ databags_directory: Path to the data bags directory.
786
+
787
+ Returns:
788
+ Tuple of (normalized_path, error_message).
789
+ If validation succeeds: (Path, None)
790
+ If validation fails: (None, error_message)
791
+
792
+ """
793
+ if not databags_directory or not databags_directory.strip():
794
+ return None, (
795
+ "Error: Databags directory path cannot be empty\n\n"
796
+ "Suggestion: Provide the path to your Chef data_bags directory"
797
+ )
798
+
799
+ databags_path = _normalize_path(databags_directory)
800
+ if not databags_path.exists():
801
+ return None, (
802
+ f"Error: Data bags directory not found: {databags_directory}\n\n"
803
+ "Suggestion: Check that the path is correct and the directory exists"
804
+ )
805
+
806
+ if not databags_path.is_dir():
807
+ return None, (
808
+ f"Error: Path is not a directory: {databags_directory}\n\n"
809
+ "Suggestion: Provide a path to the data_bags directory"
810
+ )
811
+
812
+ return databags_path, None
813
+
814
+
815
+ def _convert_databag_item(item_file, databag_name: str, output_directory: str) -> dict:
816
+ """Convert a single databag item file to Ansible format."""
817
+ item_name = item_file.stem
818
+
819
+ try:
820
+ with item_file.open() as f:
821
+ content = f.read()
822
+
823
+ # Detect if encrypted
824
+ is_encrypted = _detect_encrypted_databag(content)
825
+
826
+ # Convert to Ansible format
827
+ result = convert_chef_databag_to_vars(
828
+ content, databag_name, item_name, is_encrypted, output_directory
829
+ )
830
+
831
+ vault_suffix = "_vault" if is_encrypted else ""
832
+ target_file = f"{output_directory}/{databag_name}{vault_suffix}.yml"
833
+
834
+ return {
835
+ "databag": databag_name,
836
+ "item": item_name,
837
+ "encrypted": is_encrypted,
838
+ "target_file": target_file,
839
+ "content": result,
840
+ }
841
+
842
+ except Exception as e:
843
+ return {"databag": databag_name, "item": item_name, "error": str(e)}
844
+
845
+
846
+ def _process_databag_directory(databag_dir, output_directory: str) -> list[dict]:
847
+ """Process all items in a single databag directory."""
848
+ results = []
849
+ databag_name = databag_dir.name
850
+
851
+ for item_file in databag_dir.glob("*.json"):
852
+ result = _convert_databag_item(item_file, databag_name, output_directory)
853
+ results.append(result)
854
+
855
+ return results
856
+
857
+
770
858
  def generate_ansible_vault_from_databags(
771
859
  databags_directory: str,
772
860
  output_directory: str = "group_vars",
773
- encryption_key_hint: str = "",
774
861
  ) -> str:
775
862
  """
776
863
  Generate Ansible Vault files from Chef data bags directory.
@@ -778,16 +865,21 @@ def generate_ansible_vault_from_databags(
778
865
  Args:
779
866
  databags_directory: Path to Chef data_bags directory
780
867
  output_directory: Target directory for Ansible variables (group_vars/host_vars)
781
- encryption_key_hint: Hint for identifying encrypted data bags
782
868
 
783
869
  Returns:
784
870
  Summary of converted data bags and instructions
785
871
 
786
872
  """
787
873
  try:
788
- databags_path = _normalize_path(databags_directory)
789
- if not databags_path.exists():
790
- return f"Error: Data bags directory not found: {databags_directory}"
874
+ # Validate inputs
875
+ databags_path, error = _validate_databags_directory(databags_directory)
876
+ if error:
877
+ assert isinstance(error, str), "error must be string when present"
878
+ return error
879
+
880
+ assert databags_path is not None, (
881
+ "databags_path must be non-None after successful validation"
882
+ )
791
883
 
792
884
  conversion_results = []
793
885
 
@@ -796,40 +888,8 @@ def generate_ansible_vault_from_databags(
796
888
  if not databag_dir.is_dir():
797
889
  continue
798
890
 
799
- databag_name = databag_dir.name
800
-
801
- # Process each item in the data bag
802
- for item_file in databag_dir.glob("*.json"):
803
- item_name = item_file.stem
804
-
805
- try:
806
- with item_file.open() as f:
807
- content = f.read()
808
-
809
- # Detect if encrypted (Chef encrypted data bags have specific structure)
810
- is_encrypted = _detect_encrypted_databag(content)
811
-
812
- # Convert to Ansible format
813
- result = convert_chef_databag_to_vars(
814
- content, databag_name, item_name, is_encrypted, output_directory
815
- )
816
-
817
- vault_suffix = "_vault" if is_encrypted else ""
818
- target_file = f"{output_directory}/{databag_name}{vault_suffix}.yml"
819
- conversion_results.append(
820
- {
821
- "databag": databag_name,
822
- "item": item_name,
823
- "encrypted": is_encrypted,
824
- "target_file": target_file,
825
- "content": result,
826
- }
827
- )
828
-
829
- except Exception as e:
830
- conversion_results.append(
831
- {"databag": databag_name, "item": item_name, "error": str(e)}
832
- )
891
+ results = _process_databag_directory(databag_dir, output_directory)
892
+ conversion_results.extend(results)
833
893
 
834
894
  # Generate summary and file structure
835
895
  return _generate_databag_conversion_summary(
@@ -837,7 +897,9 @@ def generate_ansible_vault_from_databags(
837
897
  )
838
898
 
839
899
  except Exception as e:
840
- return f"Error processing data bags directory: {e}"
900
+ return format_error_with_context(
901
+ e, "processing data bags directory", databags_directory
902
+ )
841
903
 
842
904
 
843
905
  @mcp.tool()
@@ -891,7 +953,7 @@ def analyze_chef_databag_usage(cookbook_path: str, databags_path: str = "") -> s
891
953
  4. Encrypt sensitive data with ansible-vault
892
954
  """
893
955
  except Exception as e:
894
- return f"Data bag parsing failed: {e}"
956
+ return format_error_with_context(e, "analyzing data bag usage", cookbook_path)
895
957
 
896
958
 
897
959
  @mcp.tool()
@@ -936,7 +998,9 @@ def convert_chef_environment_to_inventory_group(
936
998
  # {environment_name}
937
999
  """
938
1000
  except Exception as e:
939
- return f"Environment conversion failed: {e}"
1001
+ return format_error_with_context(
1002
+ e, "converting Chef environment to inventory group", environment_name
1003
+ )
940
1004
 
941
1005
 
942
1006
  @mcp.tool()
@@ -994,7 +1058,9 @@ def generate_inventory_from_chef_environments(
994
1058
  )
995
1059
 
996
1060
  except Exception as e:
997
- return f"Error generating inventory from Chef environments: {e}"
1061
+ return format_error_with_context(
1062
+ e, "generating inventory from Chef environments", environments_directory
1063
+ )
998
1064
 
999
1065
 
1000
1066
  @mcp.tool()
@@ -1053,7 +1119,9 @@ def analyze_chef_environment_usage(
1053
1119
  5. Test environment-specific deployments with new inventory structure
1054
1120
  """
1055
1121
  except Exception as e:
1056
- return f"Error analyzing Chef environment usage: {e}"
1122
+ return format_error_with_context(
1123
+ e, "analyzing Chef environment usage", cookbook_path
1124
+ )
1057
1125
 
1058
1126
 
1059
1127
  def _parse_chef_environment_content(content: str) -> dict:
@@ -1094,6 +1162,183 @@ def _parse_chef_environment_content(content: str) -> dict:
1094
1162
  return env_data
1095
1163
 
1096
1164
 
1165
+ def _convert_ruby_literal(value: str) -> Any:
1166
+ """
1167
+ Convert Ruby literal values to equivalent Python types.
1168
+
1169
+ This function handles the conversion of Ruby's basic literal values
1170
+ to their Python equivalents during Chef environment parsing.
1171
+
1172
+ Args:
1173
+ value: String representation of a Ruby literal value.
1174
+
1175
+ Returns:
1176
+ The converted Python value:
1177
+ - "true" -> True (bool)
1178
+ - "false" -> False (bool)
1179
+ - "nil" -> None
1180
+ - Integer strings -> int (e.g., "42" -> 42)
1181
+ - Float strings -> float (e.g., "3.14" -> 3.14, "1e10" -> 10000000000.0)
1182
+ - Unrecognized values -> original string unchanged
1183
+
1184
+ Examples:
1185
+ >>> _convert_ruby_literal("true")
1186
+ True
1187
+ >>> _convert_ruby_literal("42")
1188
+ 42
1189
+ >>> _convert_ruby_literal("3.14")
1190
+ 3.14
1191
+ >>> _convert_ruby_literal("nil")
1192
+ None
1193
+ >>> _convert_ruby_literal("some_string")
1194
+ 'some_string'
1195
+
1196
+ """
1197
+ # Handle boolean and nil values
1198
+ literal_map = {
1199
+ "true": True,
1200
+ "false": False,
1201
+ "nil": None,
1202
+ }
1203
+
1204
+ if value in literal_map:
1205
+ return literal_map[value]
1206
+
1207
+ # Handle numeric values
1208
+ try:
1209
+ # Try integer first
1210
+ if "." not in value and "e" not in value.lower():
1211
+ return int(value)
1212
+ else:
1213
+ return float(value)
1214
+ except ValueError:
1215
+ pass
1216
+
1217
+ # Return as string if no conversion applies
1218
+ return value
1219
+
1220
+
1221
+ def _parse_quoted_key(content: str, i: int) -> tuple[str, int]:
1222
+ """Parse a quoted key and return (key, new_index)."""
1223
+ if content[i] not in "'\"":
1224
+ raise ValueError("Expected quote at start of key")
1225
+
1226
+ quote = content[i]
1227
+ i += 1
1228
+ key_start = i
1229
+ while i < len(content) and content[i] != quote:
1230
+ i += 1
1231
+ key = content[key_start:i]
1232
+ i += 1 # skip closing quote
1233
+ return key, i
1234
+
1235
+
1236
+ def _parse_nested_hash(content: str, i: int) -> tuple[dict, int]:
1237
+ """Parse a nested hash and return (parsed_dict, new_index)."""
1238
+ if content[i] != "{":
1239
+ raise ValueError("Expected opening brace for nested hash")
1240
+
1241
+ brace_count = 1
1242
+ start = i
1243
+ i += 1
1244
+ while i < len(content) and brace_count > 0:
1245
+ if content[i] == "{":
1246
+ brace_count += 1
1247
+ elif content[i] == "}":
1248
+ brace_count -= 1
1249
+ i += 1
1250
+
1251
+ nested_content = content[start + 1 : i - 1] # exclude braces
1252
+ return parse_ruby_hash(nested_content), i
1253
+
1254
+
1255
+ def _parse_simple_value(content: str, i: int) -> tuple[str, int]:
1256
+ """Parse a simple value and return (value, new_index)."""
1257
+ value_start = i
1258
+ while i < len(content) and content[i] not in ",}":
1259
+ i += 1
1260
+ value = content[value_start:i].strip()
1261
+ # Remove quotes if present
1262
+ if (value.startswith("'") and value.endswith("'")) or (
1263
+ value.startswith('"') and value.endswith('"')
1264
+ ):
1265
+ value = value[1:-1]
1266
+ else:
1267
+ # Convert Ruby literals to Python types
1268
+ value = _convert_ruby_literal(value)
1269
+ return value, i
1270
+
1271
+
1272
+ def _skip_to_next_item(content: str, i: int) -> int:
1273
+ """Skip to the next item, handling delimiters."""
1274
+ while i < len(content) and content[i] not in ",}":
1275
+ i += 1
1276
+ if i < len(content) and (content[i] == "," or content[i] == "}"):
1277
+ i += 1
1278
+ return i
1279
+
1280
+
1281
+ def parse_ruby_hash(content: str) -> dict:
1282
+ """Parse Ruby hash syntax recursively."""
1283
+ result = {}
1284
+
1285
+ # Simple recursive parser for Ruby hash syntax
1286
+ # This handles nested braces by counting them
1287
+ i = 0
1288
+ while i < len(content):
1289
+ # Skip whitespace
1290
+ i = _skip_whitespace(content, i)
1291
+ if i >= len(content):
1292
+ break
1293
+
1294
+ # Parse key-value pair
1295
+ key, value, i = _parse_key_value_pair(content, i)
1296
+ if key is not None:
1297
+ result[key] = value
1298
+
1299
+ # Skip to next item
1300
+ i = _skip_to_next_item(content, i)
1301
+
1302
+ return result
1303
+
1304
+
1305
+ def _skip_whitespace(content: str, i: int) -> int:
1306
+ """Skip whitespace characters and return new index."""
1307
+ while i < len(content) and content[i].isspace():
1308
+ i += 1
1309
+ return i
1310
+
1311
+
1312
+ def _parse_key_value_pair(content: str, i: int) -> tuple[str | None, Any, int]:
1313
+ """Parse a single key => value pair and return (key, value, new_index)."""
1314
+ # Look for key => value patterns
1315
+ if content[i] in "'\"":
1316
+ # Parse quoted key
1317
+ key, i = _parse_quoted_key(content, i)
1318
+
1319
+ # Skip whitespace and =>
1320
+ i = _skip_whitespace_and_arrows(content, i)
1321
+
1322
+ value: Any
1323
+ if i < len(content) and content[i] == "{":
1324
+ # Nested hash
1325
+ value, i = _parse_nested_hash(content, i)
1326
+ else:
1327
+ # Simple value
1328
+ value, i = _parse_simple_value(content, i)
1329
+
1330
+ return key, value, i
1331
+
1332
+ return None, None, i
1333
+
1334
+
1335
+ def _skip_whitespace_and_arrows(content: str, i: int) -> int:
1336
+ """Skip whitespace and => symbols."""
1337
+ while i < len(content) and (content[i].isspace() or content[i] in "=>"):
1338
+ i += 1
1339
+ return i
1340
+
1341
+
1097
1342
  def _extract_attributes_block(content: str, block_type: str) -> dict:
1098
1343
  """Extract attribute blocks from Chef environment content."""
1099
1344
  # Find the block start
@@ -1105,37 +1350,7 @@ def _extract_attributes_block(content: str, block_type: str) -> dict:
1105
1350
 
1106
1351
  block_content = match.group(1).strip()
1107
1352
 
1108
- # Simple parsing of Ruby hash-like structure
1109
- # Ruby attribute hashes use => syntax, which we convert to Python dict
1110
- # This is intentionally simple - complex Chef DSL needs full Ruby parser
1111
- attributes = {}
1112
-
1113
- # Parse simple key-value pairs like 'port' => '8080'
1114
- key_value_pattern = (
1115
- r"['\"]([^'\"]{0,100})['\"][\s:]*=>[\s:]*['\"]([^'\"]{0,200})['\"]"
1116
- )
1117
- for match in re.finditer(key_value_pattern, block_content):
1118
- attr_key = match.group(1)
1119
- attr_value = match.group(2)
1120
- attributes[attr_key] = attr_value
1121
-
1122
- # Parse nested structures (basic support)
1123
- nested_pattern = (
1124
- r"['\"](([^'\"\n]{0,100}))['\"](\\s|:)*=>(\\s|:)*\\{([^}]{0,500})\\}"
1125
- )
1126
- for match in re.finditer(nested_pattern, block_content):
1127
- key = match.group(1)
1128
- nested_content = match.group(5)
1129
- nested_attrs = {}
1130
-
1131
- for nested_match in re.finditer(key_value_pattern, nested_content):
1132
- nested_key = nested_match.group(1)
1133
- nested_value = nested_match.group(2)
1134
- nested_attrs[nested_key] = nested_value
1135
-
1136
- if nested_attrs:
1137
- attributes[key] = nested_attrs
1138
-
1353
+ attributes = parse_ruby_hash(block_content)
1139
1354
  return attributes
1140
1355
 
1141
1356
 
@@ -1196,18 +1411,27 @@ def _generate_inventory_group_from_environment(
1196
1411
  return yaml.dump(group_vars, default_flow_style=False, indent=2)
1197
1412
 
1198
1413
 
1199
- def _generate_complete_inventory_from_environments(
1200
- environments: dict, results: list, output_format: str
1201
- ) -> str:
1202
- """Generate complete Ansible inventory from multiple Chef environments."""
1203
- import yaml
1414
+ def _build_conversion_summary(results: list) -> str:
1415
+ """
1416
+ Build summary of environment conversion results.
1417
+
1418
+ Args:
1419
+ results: List of conversion result dicts
1420
+
1421
+ Returns:
1422
+ Formatted summary string
1423
+
1424
+ """
1425
+ total = len(results)
1426
+ successful = len([r for r in results if r["status"] == "success"])
1427
+ failed = len([r for r in results if r["status"] == "error"])
1204
1428
 
1205
1429
  summary = f"""# Chef Environments to Ansible Inventory Conversion
1206
1430
 
1207
1431
  ## Processing Summary:
1208
- - Total environments processed: {len(results)}
1209
- - Successfully converted: {len([r for r in results if r["status"] == "success"])}
1210
- - Failed conversions: {len([r for r in results if r["status"] == "error"])}
1432
+ - Total environments processed: {total}
1433
+ - Successfully converted: {successful}
1434
+ - Failed conversions: {failed}
1211
1435
 
1212
1436
  ## Environment Details:
1213
1437
  """
@@ -1223,35 +1447,71 @@ def _generate_complete_inventory_from_environments(
1223
1447
  else:
1224
1448
  summary += f"❌ {result['environment']}: {result['error']}\n"
1225
1449
 
1226
- if output_format in ["yaml", "both"]:
1227
- summary += "\n## YAML Inventory Structure:\n\n```yaml\n"
1450
+ return summary
1228
1451
 
1229
- # Generate YAML inventory
1230
- inventory: dict[str, Any] = {"all": {"children": {}}}
1231
1452
 
1232
- for env_name, env_data in environments.items():
1233
- inventory["all"]["children"][env_name] = {
1234
- "hosts": {}, # Hosts to be added manually
1235
- "vars": _flatten_environment_vars(env_data),
1236
- }
1453
+ def _generate_yaml_inventory(environments: dict) -> str:
1454
+ """
1455
+ Generate YAML format inventory from environments.
1237
1456
 
1238
- summary += yaml.dump(inventory, default_flow_style=False, indent=2)
1239
- summary += "```\n"
1457
+ Args:
1458
+ environments: Dict of environment name to data
1240
1459
 
1241
- if output_format in ["ini", "both"]:
1242
- summary += "\n## INI Inventory Structure:\n\n```ini\n"
1243
- summary += "[all:children]\n"
1244
- for env_name in environments:
1245
- summary += f"{env_name}\n"
1460
+ Returns:
1461
+ YAML inventory string
1462
+
1463
+ """
1464
+ import yaml
1246
1465
 
1247
- summary += "\n"
1248
- for env_name in environments:
1249
- summary += f"[{env_name}]\n"
1250
- summary += "# Add your hosts here\n\n"
1466
+ inventory: dict[str, Any] = {"all": {"children": {}}}
1251
1467
 
1252
- summary += "```\n"
1468
+ for env_name, env_data in environments.items():
1469
+ inventory["all"]["children"][env_name] = {
1470
+ "hosts": {}, # Hosts to be added manually
1471
+ "vars": _flatten_environment_vars(env_data),
1472
+ }
1473
+
1474
+ yaml_output = yaml.dump(inventory, default_flow_style=False, indent=2)
1475
+ return f"\n## YAML Inventory Structure:\n\n```yaml\n{yaml_output}```\n"
1476
+
1477
+
1478
+ def _generate_ini_inventory(environments: dict) -> str:
1479
+ """
1480
+ Generate INI format inventory from environments.
1481
+
1482
+ Args:
1483
+ environments: Dict of environment name to data
1484
+
1485
+ Returns:
1486
+ INI inventory string
1487
+
1488
+ """
1489
+ output = "\n## INI Inventory Structure:\n\n```ini\n"
1490
+ output += "[all:children]\n"
1491
+ for env_name in environments:
1492
+ output += f"{env_name}\n"
1493
+
1494
+ output += "\n"
1495
+ for env_name in environments:
1496
+ output += f"[{env_name}]\n"
1497
+ output += "# Add your hosts here\n\n"
1498
+
1499
+ output += "```\n"
1500
+ return output
1501
+
1502
+
1503
+ def _generate_next_steps_guide(environments: dict) -> str:
1504
+ """
1505
+ Generate next steps and file structure guide.
1506
+
1507
+ Args:
1508
+ environments: Dict of environment name to data
1253
1509
 
1254
- summary += """
1510
+ Returns:
1511
+ Guide string
1512
+
1513
+ """
1514
+ guide = """
1255
1515
  ## Next Steps:
1256
1516
  1. Create group_vars directory structure
1257
1517
  2. Add environment-specific variable files
@@ -1262,7 +1522,40 @@ def _generate_complete_inventory_from_environments(
1262
1522
  ## File Structure to Create:
1263
1523
  """
1264
1524
  for env_name in environments:
1265
- summary += f"- inventory/group_vars/{env_name}.yml\n"
1525
+ guide += f"- inventory/group_vars/{env_name}.yml\n"
1526
+
1527
+ return guide
1528
+
1529
+
1530
+ def _generate_complete_inventory_from_environments(
1531
+ environments: dict, results: list, output_format: str
1532
+ ) -> str:
1533
+ """
1534
+ Generate complete Ansible inventory from multiple Chef environments.
1535
+
1536
+ Orchestrates summary, YAML/INI generation, and guidance.
1537
+
1538
+ Args:
1539
+ environments: Dict of environment name to data
1540
+ results: List of conversion results
1541
+ output_format: Output format ("yaml", "ini", or "both")
1542
+
1543
+ Returns:
1544
+ Complete formatted inventory with summary and guidance
1545
+
1546
+ """
1547
+ # Build conversion summary
1548
+ summary = _build_conversion_summary(results)
1549
+
1550
+ # Generate requested inventory formats
1551
+ if output_format in ["yaml", "both"]:
1552
+ summary += _generate_yaml_inventory(environments)
1553
+
1554
+ if output_format in ["ini", "both"]:
1555
+ summary += _generate_ini_inventory(environments)
1556
+
1557
+ # Add next steps guide
1558
+ summary += _generate_next_steps_guide(environments)
1266
1559
 
1267
1560
  return summary
1268
1561
 
@@ -1323,7 +1616,7 @@ def _find_environment_patterns_in_content(content: str, file_path: str) -> list:
1323
1616
  # Common Chef environment patterns
1324
1617
  environment_patterns = [
1325
1618
  (r"node\.chef_environment", "node.chef_environment"),
1326
- (r"node\[['\"]\environment['\"]\]", 'node["environment"]'),
1619
+ (r"node\[['\"]environment['\"]\]", 'node["environment"]'),
1327
1620
  (r"environment\s+['\"]([^'\"\n]{0,100})['\"]", "environment declaration"),
1328
1621
  (
1329
1622
  r"if\s+node\.chef_environment\s*==\s*['\"]([^'\"\n]{0,100})['\"]",
@@ -1592,43 +1885,118 @@ def _detect_encrypted_databag(content: str) -> bool:
1592
1885
  return False
1593
1886
 
1594
1887
 
1595
- def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
1596
- """Generate summary of data bag conversion results."""
1597
- total_bags = len(results)
1598
- successful = len([r for r in results if "error" not in r])
1599
- encrypted = len([r for r in results if r.get("encrypted", False)])
1888
+ def _calculate_conversion_statistics(results: list) -> dict[str, int]:
1889
+ """
1890
+ Calculate statistics from conversion results.
1600
1891
 
1601
- summary = f"""# Data Bag Conversion Summary
1892
+ Args:
1893
+ results: List of conversion result dictionaries.
1602
1894
 
1603
- ## Statistics:
1604
- - Total data bags processed: {total_bags}
1605
- - Successfully converted: {successful}
1606
- - Failed conversions: {total_bags - successful}
1607
- - Encrypted data bags: {encrypted}
1895
+ Returns:
1896
+ Dictionary with 'total', 'successful', and 'encrypted' counts.
1897
+
1898
+ """
1899
+ return {
1900
+ "total": len(results),
1901
+ "successful": len([r for r in results if "error" not in r]),
1902
+ "encrypted": len([r for r in results if r.get("encrypted", False)]),
1903
+ }
1904
+
1905
+
1906
+ def _build_statistics_section(stats: dict[str, int]) -> str:
1907
+ """
1908
+ Build the statistics section of the summary.
1909
+
1910
+ Args:
1911
+ stats: Dictionary with conversion statistics.
1912
+
1913
+ Returns:
1914
+ Formatted statistics section as markdown.
1915
+
1916
+ """
1917
+ return f"""# Data Bag Conversion Summary
1608
1918
 
1609
- ## Generated Files:
1919
+ ## Statistics:
1920
+ - Total data bags processed: {stats["total"]}
1921
+ - Successfully converted: {stats["successful"]}
1922
+ - Failed conversions: {stats["total"] - stats["successful"]}
1923
+ - Encrypted data bags: {stats["encrypted"]}
1610
1924
  """
1925
+
1926
+
1927
+ def _extract_generated_files(results: list) -> list[str]:
1928
+ """
1929
+ Extract unique generated file paths from results.
1930
+
1931
+ Args:
1932
+ results: List of conversion result dictionaries.
1933
+
1934
+ Returns:
1935
+ Sorted list of unique file paths.
1936
+
1937
+ """
1611
1938
  files_created = set()
1612
1939
  for result in results:
1613
1940
  if "error" not in result:
1614
1941
  target_file = result["target_file"]
1615
1942
  files_created.add(target_file)
1943
+ return sorted(files_created)
1616
1944
 
1617
- for file in sorted(files_created):
1618
- summary += f"- {file}\n"
1619
1945
 
1620
- summary += "\n## Conversion Details:\n"
1946
+ def _build_files_section(files: list[str]) -> str:
1947
+ """
1948
+ Build the generated files section.
1949
+
1950
+ Args:
1951
+ files: List of generated file paths.
1952
+
1953
+ Returns:
1954
+ Formatted files section as markdown.
1955
+
1956
+ """
1957
+ section = "\n## Generated Files:\n"
1958
+ for file in files:
1959
+ section += f"- {file}\n"
1960
+ return section
1961
+
1962
+
1963
+ def _build_conversion_details_section(results: list) -> str:
1964
+ """
1965
+ Build the conversion details section.
1966
+
1967
+ Args:
1968
+ results: List of conversion result dictionaries.
1969
+
1970
+ Returns:
1971
+ Formatted conversion details section as markdown.
1972
+
1973
+ """
1974
+ section = "\n## Conversion Details:\n"
1621
1975
 
1622
1976
  for result in results:
1623
1977
  if "error" in result:
1624
- summary += f"❌ {result['databag']}/{result['item']}: {result['error']}\n"
1978
+ section += f"❌ {result['databag']}/{result['item']}: {result['error']}\n"
1625
1979
  else:
1626
- status = "🔒 Encrypted" if result["encrypted"] else "📄 Plain"
1980
+ status = "🔒 Encrypted" if result.get("encrypted", False) else "📄 Plain"
1627
1981
  databag_item = f"{result['databag']}/{result['item']}"
1628
1982
  target = result["target_file"]
1629
- summary += f"✅ {databag_item} → {target} ({status})\n"
1983
+ section += f"✅ {databag_item} → {target} ({status})\n"
1630
1984
 
1631
- summary += f"""
1985
+ return section
1986
+
1987
+
1988
+ def _build_next_steps_section(output_dir: str) -> str:
1989
+ """
1990
+ Build the next steps section.
1991
+
1992
+ Args:
1993
+ output_dir: Output directory path.
1994
+
1995
+ Returns:
1996
+ Formatted next steps section as markdown.
1997
+
1998
+ """
1999
+ return f"""
1632
2000
  ## Next Steps:
1633
2001
  1. Review generated variable files in {output_dir}/
1634
2002
  2. Encrypt vault files: `ansible-vault encrypt {output_dir}/*_vault.yml`
@@ -1636,7 +2004,29 @@ def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
1636
2004
  4. Test variable access in playbooks
1637
2005
  5. Remove original Chef data bags after validation
1638
2006
  """
1639
- return summary
2007
+
2008
+
2009
+ def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
2010
+ """
2011
+ Generate summary of data bag conversion results.
2012
+
2013
+ Args:
2014
+ results: List of conversion result dictionaries.
2015
+ output_dir: Output directory path.
2016
+
2017
+ Returns:
2018
+ Complete formatted summary as markdown.
2019
+
2020
+ """
2021
+ stats = _calculate_conversion_statistics(results)
2022
+ files = _extract_generated_files(results)
2023
+
2024
+ return (
2025
+ _build_statistics_section(stats)
2026
+ + _build_files_section(files)
2027
+ + _build_conversion_details_section(results)
2028
+ + _build_next_steps_section(output_dir)
2029
+ )
1640
2030
 
1641
2031
 
1642
2032
  def _extract_databag_usage_from_cookbook(cookbook_path) -> list:
@@ -1745,65 +2135,126 @@ def _analyze_databag_structure(databags_path) -> dict:
1745
2135
  return structure
1746
2136
 
1747
2137
 
1748
- def _generate_databag_migration_recommendations(
1749
- usage_patterns: list, databag_structure: dict
1750
- ) -> str:
1751
- """Generate migration recommendations based on usage analysis."""
1752
- recommendations = []
2138
+ def _analyze_usage_patterns(usage_patterns: list) -> list[str]:
2139
+ """
2140
+ Analyze databag usage patterns and generate recommendations.
1753
2141
 
1754
- # Analyze usage patterns
1755
- if usage_patterns:
1756
- unique_databags = {
1757
- p.get("databag_name") for p in usage_patterns if p.get("databag_name")
1758
- }
2142
+ Args:
2143
+ usage_patterns: List of usage pattern dicts
2144
+
2145
+ Returns:
2146
+ List of recommendation strings
2147
+
2148
+ """
2149
+ recommendations: list[str] = []
2150
+
2151
+ if not usage_patterns:
2152
+ return recommendations
2153
+
2154
+ unique_databags = {
2155
+ p.get("databag_name") for p in usage_patterns if p.get("databag_name")
2156
+ }
2157
+ recommendations.append(
2158
+ f"• Found {len(usage_patterns)} data bag references "
2159
+ f"across {len(unique_databags)} different data bags"
2160
+ )
2161
+
2162
+ # Check for encrypted usage
2163
+ encrypted_usage = [p for p in usage_patterns if "encrypted" in p.get("type", "")]
2164
+ if encrypted_usage:
1759
2165
  recommendations.append(
1760
- f"• Found {len(usage_patterns)} data bag references "
1761
- f"across {len(unique_databags)} different data bags"
2166
+ f"• {len(encrypted_usage)} encrypted data bag references "
2167
+ f"- convert to Ansible Vault"
1762
2168
  )
1763
2169
 
1764
- # Check for encrypted usage
1765
- encrypted_usage = [
1766
- p for p in usage_patterns if "encrypted" in p.get("type", "")
1767
- ]
1768
- if encrypted_usage:
1769
- recommendations.append(
1770
- f"• {len(encrypted_usage)} encrypted data bag references "
1771
- f"- convert to Ansible Vault"
1772
- )
2170
+ # Check for complex patterns
2171
+ search_patterns = [p for p in usage_patterns if "search" in p.get("type", "")]
2172
+ if search_patterns:
2173
+ recommendations.append(
2174
+ f"• {len(search_patterns)} search patterns involving data bags "
2175
+ f"- may need inventory integration"
2176
+ )
1773
2177
 
1774
- # Check for complex patterns
1775
- search_patterns = [p for p in usage_patterns if "search" in p.get("type", "")]
1776
- if search_patterns:
1777
- recommendations.append(
1778
- f"• {len(search_patterns)} search patterns involving data bags "
1779
- f"- may need inventory integration"
1780
- )
2178
+ return recommendations
1781
2179
 
1782
- # Analyze structure
1783
- if databag_structure:
1784
- total_bags = databag_structure.get("total_databags", 0)
1785
- encrypted_items = databag_structure.get("encrypted_items", 0)
1786
2180
 
1787
- if total_bags > 0:
1788
- recommendations.append(
1789
- f"• Convert {total_bags} data bags to group_vars/host_vars structure"
1790
- )
2181
+ def _analyze_databag_structure_recommendations(databag_structure: dict) -> list[str]:
2182
+ """
2183
+ Analyze databag structure and generate recommendations.
2184
+
2185
+ Args:
2186
+ databag_structure: Dict with structure analysis
2187
+
2188
+ Returns:
2189
+ List of recommendation strings
2190
+
2191
+ """
2192
+ recommendations: list[str] = []
2193
+
2194
+ if not databag_structure:
2195
+ return recommendations
2196
+
2197
+ total_bags = databag_structure.get("total_databags", 0)
2198
+ encrypted_items = databag_structure.get("encrypted_items", 0)
2199
+
2200
+ if total_bags > 0:
2201
+ recommendations.append(
2202
+ f"• Convert {total_bags} data bags to group_vars/host_vars structure"
2203
+ )
2204
+
2205
+ if encrypted_items > 0:
2206
+ recommendations.append(
2207
+ f"• {encrypted_items} encrypted items need Ansible Vault conversion"
2208
+ )
2209
+
2210
+ return recommendations
1791
2211
 
1792
- if encrypted_items > 0:
1793
- recommendations.append(
1794
- f"• {encrypted_items} encrypted items need Ansible Vault conversion"
1795
- )
1796
2212
 
1797
- # Variable scope recommendations
2213
+ def _get_variable_scope_recommendations() -> list[str]:
2214
+ """
2215
+ Get standard variable scope recommendations.
2216
+
2217
+ Returns:
2218
+ List of recommendation strings
2219
+
2220
+ """
2221
+ return [
2222
+ "• Use group_vars/ for environment-specific data (production, staging)",
2223
+ "• Use host_vars/ for node-specific configurations",
2224
+ "• Consider splitting large data bags into logical variable files",
2225
+ "• Implement variable precedence hierarchy matching Chef environments",
2226
+ ]
2227
+
2228
+
2229
+ def _generate_databag_migration_recommendations(
2230
+ usage_patterns: list, databag_structure: dict
2231
+ ) -> str:
2232
+ """
2233
+ Generate migration recommendations based on usage analysis.
2234
+
2235
+ Combines usage pattern analysis, structure analysis, and best practices.
2236
+
2237
+ Args:
2238
+ usage_patterns: List of databag usage patterns
2239
+ databag_structure: Dict with databag structure info
2240
+
2241
+ Returns:
2242
+ Formatted recommendations string
2243
+
2244
+ """
2245
+ recommendations = []
2246
+
2247
+ # Analyze usage patterns
2248
+ recommendations.extend(_analyze_usage_patterns(usage_patterns))
2249
+
2250
+ # Analyze structure
1798
2251
  recommendations.extend(
1799
- [
1800
- "• Use group_vars/ for environment-specific data (production, staging)",
1801
- "• Use host_vars/ for node-specific configurations",
1802
- "• Consider splitting large data bags into logical variable files",
1803
- "• Implement variable precedence hierarchy matching Chef environments",
1804
- ]
2252
+ _analyze_databag_structure_recommendations(databag_structure)
1805
2253
  )
1806
2254
 
2255
+ # Add variable scope best practices
2256
+ recommendations.extend(_get_variable_scope_recommendations())
2257
+
1807
2258
  return "\n".join(recommendations)
1808
2259
 
1809
2260
 
@@ -2073,7 +2524,11 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
2073
2524
  Generated Ansible playbook content.
2074
2525
 
2075
2526
  """
2076
- return _generate_playbook_from_recipe(recipe_path)
2527
+ from souschef.converters.playbook import (
2528
+ generate_playbook_from_recipe as _generate_playbook,
2529
+ )
2530
+
2531
+ return _generate_playbook(recipe_path)
2077
2532
 
2078
2533
 
2079
2534
  def convert_chef_search_to_inventory(search_query: str) -> str:
@@ -2118,6 +2573,244 @@ def analyze_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
2118
2573
  return _analyze_chef_search_patterns(recipe_or_cookbook_path)
2119
2574
 
2120
2575
 
2576
+ @mcp.tool()
2577
+ def profile_cookbook_performance(cookbook_path: str) -> str:
2578
+ """
2579
+ Profile cookbook parsing performance and generate optimization report.
2580
+
2581
+ Analyzes the performance of parsing all cookbook components (recipes,
2582
+ attributes, resources, templates) and provides recommendations for
2583
+ optimization. Useful for large cookbooks or batch processing operations.
2584
+
2585
+ Args:
2586
+ cookbook_path: Path to the Chef cookbook to profile.
2587
+
2588
+ Returns:
2589
+ Formatted performance report with timing, memory usage, and recommendations.
2590
+
2591
+ """
2592
+ from souschef.profiling import generate_cookbook_performance_report
2593
+
2594
+ try:
2595
+ report = generate_cookbook_performance_report(cookbook_path)
2596
+ return str(report)
2597
+ except Exception as e:
2598
+ return format_error_with_context(
2599
+ e, "profiling cookbook performance", cookbook_path
2600
+ )
2601
+
2602
+
2603
+ @mcp.tool()
2604
+ def profile_parsing_operation(
2605
+ operation: str, file_path: str, detailed: bool = False
2606
+ ) -> str:
2607
+ """
2608
+ Profile a single parsing operation with detailed performance metrics.
2609
+
2610
+ Measures execution time, memory usage, and optionally provides detailed
2611
+ function call statistics for a specific parsing operation.
2612
+
2613
+ Args:
2614
+ operation: Type of operation to profile ('recipe', 'attributes', 'resource', 'template').
2615
+ file_path: Path to the file to parse.
2616
+ detailed: If True, include detailed function call statistics.
2617
+
2618
+ Returns:
2619
+ Performance metrics for the operation.
2620
+
2621
+ """
2622
+ from souschef.profiling import detailed_profile_function, profile_function
2623
+
2624
+ operation_map = {
2625
+ "recipe": parse_recipe,
2626
+ "attributes": parse_attributes,
2627
+ "resource": parse_custom_resource,
2628
+ "template": parse_template,
2629
+ }
2630
+
2631
+ if operation not in operation_map:
2632
+ return (
2633
+ f"Error: Invalid operation '{operation}'\n\n"
2634
+ f"Supported operations: {', '.join(operation_map.keys())}"
2635
+ )
2636
+
2637
+ func = operation_map[operation]
2638
+
2639
+ try:
2640
+ if detailed:
2641
+ _, profile_result = detailed_profile_function(func, file_path)
2642
+ result = str(profile_result)
2643
+ if profile_result.function_stats.get("top_functions"):
2644
+ result += "\n\nDetailed Function Statistics:\n"
2645
+ result += profile_result.function_stats["top_functions"]
2646
+ return result
2647
+ else:
2648
+ _, profile_result = profile_function(func, file_path)
2649
+ return str(profile_result)
2650
+ except Exception as e:
2651
+ return format_error_with_context(e, f"profiling {operation} parsing", file_path)
2652
+
2653
+
2654
+ # CI/CD Pipeline Generation Tools
2655
+
2656
+
2657
+ @mcp.tool()
2658
+ def generate_jenkinsfile_from_chef(
2659
+ cookbook_path: str,
2660
+ pipeline_name: str = "chef-to-ansible-pipeline",
2661
+ pipeline_type: str = "declarative",
2662
+ enable_parallel: str = "yes",
2663
+ ) -> str:
2664
+ """
2665
+ Generate Jenkins pipeline from Chef cookbook CI/CD patterns.
2666
+
2667
+ Analyzes Chef testing tools (Test Kitchen, ChefSpec, InSpec, Foodcritic)
2668
+ and generates equivalent Jenkins pipeline stages (Declarative or Scripted).
2669
+
2670
+ Args:
2671
+ cookbook_path: Path to Chef cookbook directory.
2672
+ pipeline_name: Name for the Jenkins pipeline.
2673
+ pipeline_type: Pipeline type - 'declarative' (recommended) or 'scripted'.
2674
+ enable_parallel: Enable parallel test execution - 'yes' or 'no'.
2675
+
2676
+ Returns:
2677
+ Jenkinsfile content (Groovy DSL) for Jenkins pipeline.
2678
+
2679
+ """
2680
+ from souschef.ci.jenkins_pipeline import generate_jenkinsfile_from_chef_ci
2681
+
2682
+ try:
2683
+ # Convert string to boolean
2684
+ enable_parallel_bool = enable_parallel.lower() in ("yes", "true", "1")
2685
+
2686
+ result = generate_jenkinsfile_from_chef_ci(
2687
+ cookbook_path=cookbook_path,
2688
+ pipeline_name=pipeline_name,
2689
+ pipeline_type=pipeline_type,
2690
+ enable_parallel=enable_parallel_bool,
2691
+ )
2692
+ return result
2693
+ except FileNotFoundError as e:
2694
+ return format_error_with_context(e, "generating Jenkinsfile", cookbook_path)
2695
+ except Exception as e:
2696
+ return format_error_with_context(e, "generating Jenkinsfile", cookbook_path)
2697
+
2698
+
2699
+ @mcp.tool()
2700
+ def generate_gitlab_ci_from_chef(
2701
+ cookbook_path: str,
2702
+ project_name: str = "chef-to-ansible",
2703
+ enable_cache: str = "yes",
2704
+ enable_artifacts: str = "yes",
2705
+ ) -> str:
2706
+ """
2707
+ Generate GitLab CI configuration from Chef cookbook CI/CD patterns.
2708
+
2709
+ Analyzes Chef testing tools and generates equivalent GitLab CI stages
2710
+ with caching, artifacts, and parallel execution support.
2711
+
2712
+ Args:
2713
+ cookbook_path: Path to Chef cookbook directory.
2714
+ project_name: GitLab project name.
2715
+ enable_cache: Enable caching for dependencies - 'yes' or 'no'.
2716
+ enable_artifacts: Enable artifacts for test results - 'yes' or 'no'.
2717
+
2718
+ Returns:
2719
+ .gitlab-ci.yml content (YAML) for GitLab CI/CD.
2720
+
2721
+ """
2722
+ from souschef.ci.gitlab_ci import generate_gitlab_ci_from_chef_ci
2723
+
2724
+ try:
2725
+ enable_cache_bool = enable_cache.lower() in ("yes", "true", "1")
2726
+ enable_artifacts_bool = enable_artifacts.lower() in ("yes", "true", "1")
2727
+ result = generate_gitlab_ci_from_chef_ci(
2728
+ cookbook_path=cookbook_path,
2729
+ project_name=project_name,
2730
+ enable_cache=enable_cache_bool,
2731
+ enable_artifacts=enable_artifacts_bool,
2732
+ )
2733
+ return result
2734
+ except FileNotFoundError as e:
2735
+ return format_error_with_context(
2736
+ e,
2737
+ "generating .gitlab-ci.yml",
2738
+ cookbook_path,
2739
+ )
2740
+ except Exception as e:
2741
+ return format_error_with_context(e, "generating .gitlab-ci.yml", cookbook_path)
2742
+
2743
+
2744
+ @mcp.tool()
2745
+ def generate_github_workflow_from_chef(
2746
+ cookbook_path: str,
2747
+ workflow_name: str = "Chef Cookbook CI",
2748
+ enable_cache: str = "yes",
2749
+ enable_artifacts: str = "yes",
2750
+ ) -> str:
2751
+ """
2752
+ Generate GitHub Actions workflow from Chef cookbook CI/CD patterns.
2753
+
2754
+ Analyzes Chef testing tools and generates equivalent GitHub Actions workflow
2755
+ with caching, artifacts, and matrix strategy support.
2756
+
2757
+ Args:
2758
+ cookbook_path: Path to Chef cookbook directory.
2759
+ workflow_name: GitHub Actions workflow name.
2760
+ enable_cache: Enable caching for dependencies - 'yes' or 'no'.
2761
+ enable_artifacts: Enable artifacts for test results - 'yes' or 'no'.
2762
+
2763
+ Returns:
2764
+ GitHub Actions workflow YAML content (.github/workflows/*.yml).
2765
+
2766
+ """
2767
+ from souschef.ci.github_actions import generate_github_workflow_from_chef_ci
2768
+
2769
+ try:
2770
+ enable_cache_bool = enable_cache.lower() in ("yes", "true", "1")
2771
+ enable_artifacts_bool = enable_artifacts.lower() in ("yes", "true", "1")
2772
+ result = generate_github_workflow_from_chef_ci(
2773
+ cookbook_path=cookbook_path,
2774
+ workflow_name=workflow_name,
2775
+ enable_cache=enable_cache_bool,
2776
+ enable_artifacts=enable_artifacts_bool,
2777
+ )
2778
+ return result
2779
+ except FileNotFoundError as e:
2780
+ return format_error_with_context(
2781
+ e,
2782
+ "generating GitHub Actions workflow",
2783
+ cookbook_path,
2784
+ )
2785
+ except Exception as e:
2786
+ return format_error_with_context(
2787
+ e, "generating GitHub Actions workflow", cookbook_path
2788
+ )
2789
+
2790
+
2791
+ @mcp.tool()
2792
+ def parse_chef_migration_assessment(
2793
+ cookbook_paths: str,
2794
+ migration_scope: str = "full",
2795
+ target_platform: str = "ansible_awx",
2796
+ ) -> dict[str, Any]:
2797
+ """
2798
+ Parse Chef cookbook migration assessment and return as dictionary.
2799
+
2800
+ Args:
2801
+ cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
2802
+ migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
2803
+ target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
2804
+
2805
+ Returns:
2806
+ Dictionary containing assessment data with complexity, recommendations, etc.
2807
+
2808
+ """
2809
+ return _parse_chef_migration_assessment(
2810
+ cookbook_paths, migration_scope, target_platform
2811
+ )
2812
+
2813
+
2121
2814
  # AWX/AAP deployment wrappers for backward compatibility
2122
2815
  def main() -> None:
2123
2816
  """