mcp-souschef 2.1.2__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/server.py CHANGED
@@ -27,6 +27,7 @@ from souschef.assessment import (
27
27
 
28
28
  # Import extracted modules
29
29
  # Import private helper functions still used in server.py
30
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
30
31
  from souschef.converters.habitat import ( # noqa: F401
31
32
  _add_service_build,
32
33
  _add_service_dependencies,
@@ -48,6 +49,7 @@ from souschef.converters.habitat import (
48
49
  )
49
50
 
50
51
  # Re-exports of playbook internal functions for backward compatibility (tests)
52
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
51
53
  from souschef.converters.playbook import ( # noqa: F401
52
54
  _add_general_recommendations,
53
55
  _convert_chef_block_to_ansible,
@@ -85,6 +87,8 @@ from souschef.converters.playbook import (
85
87
  from souschef.converters.playbook import (
86
88
  generate_playbook_from_recipe as _generate_playbook_from_recipe,
87
89
  )
90
+
91
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
88
92
  from souschef.converters.resource import ( # noqa: F401
89
93
  _convert_chef_resource_to_ansible,
90
94
  _format_ansible_task,
@@ -97,6 +101,7 @@ from souschef.converters.resource import (
97
101
 
98
102
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
99
103
  # These imports are intentionally exposed for external test access
104
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
100
105
  from souschef.core.constants import ( # noqa: F401
101
106
  ACTION_TO_STATE,
102
107
  ANSIBLE_SERVICE_MODULE,
@@ -106,16 +111,21 @@ from souschef.core.constants import ( # noqa: F401
106
111
  )
107
112
 
108
113
  # Import core utilities
114
+ from souschef.core.errors import format_error_with_context
115
+
116
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
109
117
  from souschef.core.path_utils import _normalize_path, _safe_join # noqa: F401
110
118
 
111
119
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
112
120
  # These imports are intentionally exposed for external test access
121
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
113
122
  from souschef.core.ruby_utils import ( # noqa: F401
114
123
  _normalize_ruby_value,
115
124
  )
116
125
 
117
126
  # Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
118
127
  # These imports are intentionally exposed for external test access
128
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
119
129
  from souschef.core.validation import ( # noqa: F401
120
130
  ValidationCategory,
121
131
  ValidationEngine,
@@ -127,6 +137,7 @@ from souschef.core.validation import ( # noqa: F401
127
137
  # Re-exports of deployment internal functions for backward compatibility (tests)
128
138
  # Public re-exports of deployment functions for test backward compatibility
129
139
  # Note: MCP tool wrappers exist for some of these, but tests import directly
140
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
130
141
  from souschef.deployment import ( # noqa: F401
131
142
  _analyze_cookbook_for_awx,
132
143
  _analyze_cookbooks_directory,
@@ -180,6 +191,8 @@ from souschef.deployment import (
180
191
  # Import filesystem operations
181
192
  from souschef.filesystem import list_directory as _list_directory
182
193
  from souschef.filesystem import read_file as _read_file
194
+
195
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
183
196
  from souschef.parsers.attributes import ( # noqa: F401
184
197
  _extract_attributes,
185
198
  _format_attributes,
@@ -190,6 +203,8 @@ from souschef.parsers.attributes import ( # noqa: F401
190
203
 
191
204
  # Import parser functions
192
205
  from souschef.parsers.attributes import parse_attributes as _parse_attributes
206
+
207
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
193
208
  from souschef.parsers.habitat import ( # noqa: F401
194
209
  _extract_plan_array,
195
210
  _extract_plan_exports,
@@ -202,6 +217,7 @@ from souschef.parsers.habitat import ( # noqa: F401
202
217
  from souschef.parsers.habitat import parse_habitat_plan as _parse_habitat_plan
203
218
 
204
219
  # Re-export InSpec internal functions for backward compatibility (tests)
220
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
205
221
  from souschef.parsers.inspec import ( # noqa: F401
206
222
  _convert_inspec_to_ansible_assert,
207
223
  _convert_inspec_to_testinfra,
@@ -209,6 +225,8 @@ from souschef.parsers.inspec import ( # noqa: F401
209
225
  _generate_inspec_from_resource,
210
226
  _parse_inspec_control,
211
227
  )
228
+
229
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
212
230
  from souschef.parsers.metadata import ( # noqa: F401
213
231
  _extract_metadata,
214
232
  _format_cookbook_structure,
@@ -218,17 +236,23 @@ from souschef.parsers.metadata import (
218
236
  list_cookbook_structure as _list_cookbook_structure,
219
237
  )
220
238
  from souschef.parsers.metadata import read_cookbook_metadata as _read_cookbook_metadata
239
+
240
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
221
241
  from souschef.parsers.recipe import ( # noqa: F401
222
242
  _extract_conditionals,
223
243
  _extract_resources,
224
244
  _format_resources,
225
245
  )
226
246
  from souschef.parsers.recipe import parse_recipe as _parse_recipe
247
+
248
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
227
249
  from souschef.parsers.resource import ( # noqa: F401
228
250
  _extract_resource_actions,
229
251
  _extract_resource_properties,
230
252
  )
231
253
  from souschef.parsers.resource import parse_custom_resource as _parse_custom_resource
254
+
255
+ # lgtm[py/unused-import]: Backward compatibility exports for test suite
232
256
  from souschef.parsers.template import ( # noqa: F401
233
257
  _convert_erb_to_jinja2,
234
258
  _extract_code_block_variables,
@@ -525,17 +549,30 @@ def parse_inspec_profile(path: str) -> str:
525
549
 
526
550
  """
527
551
  try:
552
+ # Validate input
553
+ if not path or not path.strip():
554
+ return (
555
+ "Error: Path cannot be empty\n\n"
556
+ "Suggestion: Provide a path to an InSpec profile directory or control file"
557
+ )
558
+
528
559
  profile_path = _normalize_path(path)
529
560
 
530
561
  if not profile_path.exists():
531
- return f"Error: Path does not exist: {path}"
562
+ return (
563
+ f"Error: Path does not exist: {path}\n\n"
564
+ "Suggestion: Check that the path is correct and the InSpec profile exists"
565
+ )
532
566
 
533
567
  if profile_path.is_dir():
534
568
  controls = _parse_controls_from_directory(profile_path)
535
569
  elif profile_path.is_file():
536
570
  controls = _parse_controls_from_file(profile_path)
537
571
  else:
538
- return f"Error: Invalid path type: {path}"
572
+ return (
573
+ f"Error: Invalid path type: {path}\n\n"
574
+ "Suggestion: Provide a directory or file path, not a special file type"
575
+ )
539
576
 
540
577
  return json.dumps(
541
578
  {
@@ -547,9 +584,9 @@ def parse_inspec_profile(path: str) -> str:
547
584
  )
548
585
 
549
586
  except (FileNotFoundError, RuntimeError) as e:
550
- return f"Error: {e}"
587
+ return format_error_with_context(e, "parsing InSpec profile", path)
551
588
  except Exception as e:
552
- return f"An error occurred while parsing InSpec profile: {e}"
589
+ return format_error_with_context(e, "parsing InSpec profile", path)
553
590
 
554
591
 
555
592
  @mcp.tool()
@@ -610,7 +647,9 @@ def convert_inspec_to_test(inspec_path: str, output_format: str = "testinfra") -
610
647
  return "\n".join(converted_tests)
611
648
 
612
649
  except Exception as e:
613
- return f"An error occurred while converting InSpec: {e}"
650
+ return format_error_with_context(
651
+ e, f"converting InSpec to {output_format}", inspec_path
652
+ )
614
653
 
615
654
 
616
655
  def _extract_resources_from_parse_result(parse_result: str) -> list[dict[str, Any]]:
@@ -696,7 +735,9 @@ def generate_inspec_from_recipe(recipe_path: str) -> str:
696
735
  return "\n".join(controls)
697
736
 
698
737
  except Exception as e:
699
- return f"An error occurred while generating InSpec controls: {e}"
738
+ return format_error_with_context(
739
+ e, "generating InSpec controls from recipe", recipe_path
740
+ )
700
741
 
701
742
 
702
743
  @mcp.tool()
@@ -724,11 +765,34 @@ def convert_chef_databag_to_vars(
724
765
  try:
725
766
  import yaml
726
767
 
768
+ # Validate inputs
769
+ if not databag_content or not databag_content.strip():
770
+ return (
771
+ "Error: Databag content cannot be empty\n\n"
772
+ "Suggestion: Provide valid JSON content from a Chef data bag"
773
+ )
774
+
775
+ if not databag_name or not databag_name.strip():
776
+ return (
777
+ "Error: Databag name cannot be empty\n\n"
778
+ "Suggestion: Provide a valid data bag name"
779
+ )
780
+
781
+ valid_scopes = ["group_vars", "host_vars", "playbook"]
782
+ if target_scope not in valid_scopes:
783
+ return (
784
+ f"Error: Invalid target scope '{target_scope}'\n\n"
785
+ f"Suggestion: Use one of {', '.join(valid_scopes)}"
786
+ )
787
+
727
788
  # Parse the data bag content
728
789
  try:
729
790
  parsed_databag = json.loads(databag_content)
730
791
  except json.JSONDecodeError as e:
731
- return f"Error: Invalid JSON format in data bag: {e}"
792
+ return (
793
+ f"Error: Invalid JSON format in data bag: {e}\n\n"
794
+ "Suggestion: Ensure the databag content is valid JSON"
795
+ )
732
796
 
733
797
  # Convert to Ansible variables format
734
798
  ansible_vars = _convert_databag_to_ansible_vars(
@@ -763,14 +827,95 @@ def convert_chef_databag_to_vars(
763
827
  {yaml_content.rstrip()}
764
828
  """
765
829
  except Exception as e:
766
- return f"Error converting data bag to Ansible variables: {e}"
830
+ return format_error_with_context(
831
+ e, f"converting data bag '{databag_name}' to Ansible variables"
832
+ )
767
833
 
768
834
 
769
835
  @mcp.tool()
836
+ def _validate_databags_directory(
837
+ databags_directory: str,
838
+ ) -> tuple[Path | None, str | None]:
839
+ """
840
+ Validate databags directory input.
841
+
842
+ Args:
843
+ databags_directory: Path to the data bags directory.
844
+
845
+ Returns:
846
+ Tuple of (normalized_path, error_message).
847
+ If validation succeeds: (Path, None)
848
+ If validation fails: (None, error_message)
849
+
850
+ """
851
+ if not databags_directory or not databags_directory.strip():
852
+ return None, (
853
+ "Error: Databags directory path cannot be empty\n\n"
854
+ "Suggestion: Provide the path to your Chef data_bags directory"
855
+ )
856
+
857
+ databags_path = _normalize_path(databags_directory)
858
+ if not databags_path.exists():
859
+ return None, (
860
+ f"Error: Data bags directory not found: {databags_directory}\n\n"
861
+ "Suggestion: Check that the path is correct and the directory exists"
862
+ )
863
+
864
+ if not databags_path.is_dir():
865
+ return None, (
866
+ f"Error: Path is not a directory: {databags_directory}\n\n"
867
+ "Suggestion: Provide a path to the data_bags directory"
868
+ )
869
+
870
+ return databags_path, None
871
+
872
+
873
+ def _convert_databag_item(item_file, databag_name: str, output_directory: str) -> dict:
874
+ """Convert a single databag item file to Ansible format."""
875
+ item_name = item_file.stem
876
+
877
+ try:
878
+ with item_file.open() as f:
879
+ content = f.read()
880
+
881
+ # Detect if encrypted
882
+ is_encrypted = _detect_encrypted_databag(content)
883
+
884
+ # Convert to Ansible format
885
+ result = convert_chef_databag_to_vars(
886
+ content, databag_name, item_name, is_encrypted, output_directory
887
+ )
888
+
889
+ vault_suffix = "_vault" if is_encrypted else ""
890
+ target_file = f"{output_directory}/{databag_name}{vault_suffix}.yml"
891
+
892
+ return {
893
+ "databag": databag_name,
894
+ "item": item_name,
895
+ "encrypted": is_encrypted,
896
+ "target_file": target_file,
897
+ "content": result,
898
+ }
899
+
900
+ except Exception as e:
901
+ return {"databag": databag_name, "item": item_name, "error": str(e)}
902
+
903
+
904
+ def _process_databag_directory(databag_dir, output_directory: str) -> list[dict]:
905
+ """Process all items in a single databag directory."""
906
+ results = []
907
+ databag_name = databag_dir.name
908
+
909
+ for item_file in databag_dir.glob("*.json"):
910
+ result = _convert_databag_item(item_file, databag_name, output_directory)
911
+ results.append(result)
912
+
913
+ return results
914
+
915
+
770
916
  def generate_ansible_vault_from_databags(
771
917
  databags_directory: str,
772
918
  output_directory: str = "group_vars",
773
- encryption_key_hint: str = "",
774
919
  ) -> str:
775
920
  """
776
921
  Generate Ansible Vault files from Chef data bags directory.
@@ -778,16 +923,21 @@ def generate_ansible_vault_from_databags(
778
923
  Args:
779
924
  databags_directory: Path to Chef data_bags directory
780
925
  output_directory: Target directory for Ansible variables (group_vars/host_vars)
781
- encryption_key_hint: Hint for identifying encrypted data bags
782
926
 
783
927
  Returns:
784
928
  Summary of converted data bags and instructions
785
929
 
786
930
  """
787
931
  try:
788
- databags_path = _normalize_path(databags_directory)
789
- if not databags_path.exists():
790
- return f"Error: Data bags directory not found: {databags_directory}"
932
+ # Validate inputs
933
+ databags_path, error = _validate_databags_directory(databags_directory)
934
+ if error:
935
+ assert isinstance(error, str), "error must be string when present"
936
+ return error
937
+
938
+ assert databags_path is not None, (
939
+ "databags_path must be non-None after successful validation"
940
+ )
791
941
 
792
942
  conversion_results = []
793
943
 
@@ -796,40 +946,8 @@ def generate_ansible_vault_from_databags(
796
946
  if not databag_dir.is_dir():
797
947
  continue
798
948
 
799
- databag_name = databag_dir.name
800
-
801
- # Process each item in the data bag
802
- for item_file in databag_dir.glob("*.json"):
803
- item_name = item_file.stem
804
-
805
- try:
806
- with item_file.open() as f:
807
- content = f.read()
808
-
809
- # Detect if encrypted (Chef encrypted data bags have specific structure)
810
- is_encrypted = _detect_encrypted_databag(content)
811
-
812
- # Convert to Ansible format
813
- result = convert_chef_databag_to_vars(
814
- content, databag_name, item_name, is_encrypted, output_directory
815
- )
816
-
817
- vault_suffix = "_vault" if is_encrypted else ""
818
- target_file = f"{output_directory}/{databag_name}{vault_suffix}.yml"
819
- conversion_results.append(
820
- {
821
- "databag": databag_name,
822
- "item": item_name,
823
- "encrypted": is_encrypted,
824
- "target_file": target_file,
825
- "content": result,
826
- }
827
- )
828
-
829
- except Exception as e:
830
- conversion_results.append(
831
- {"databag": databag_name, "item": item_name, "error": str(e)}
832
- )
949
+ results = _process_databag_directory(databag_dir, output_directory)
950
+ conversion_results.extend(results)
833
951
 
834
952
  # Generate summary and file structure
835
953
  return _generate_databag_conversion_summary(
@@ -837,7 +955,9 @@ def generate_ansible_vault_from_databags(
837
955
  )
838
956
 
839
957
  except Exception as e:
840
- return f"Error processing data bags directory: {e}"
958
+ return format_error_with_context(
959
+ e, "processing data bags directory", databags_directory
960
+ )
841
961
 
842
962
 
843
963
  @mcp.tool()
@@ -891,7 +1011,7 @@ def analyze_chef_databag_usage(cookbook_path: str, databags_path: str = "") -> s
891
1011
  4. Encrypt sensitive data with ansible-vault
892
1012
  """
893
1013
  except Exception as e:
894
- return f"Data bag parsing failed: {e}"
1014
+ return format_error_with_context(e, "analyzing data bag usage", cookbook_path)
895
1015
 
896
1016
 
897
1017
  @mcp.tool()
@@ -936,7 +1056,9 @@ def convert_chef_environment_to_inventory_group(
936
1056
  # {environment_name}
937
1057
  """
938
1058
  except Exception as e:
939
- return f"Environment conversion failed: {e}"
1059
+ return format_error_with_context(
1060
+ e, "converting Chef environment to inventory group", environment_name
1061
+ )
940
1062
 
941
1063
 
942
1064
  @mcp.tool()
@@ -994,7 +1116,9 @@ def generate_inventory_from_chef_environments(
994
1116
  )
995
1117
 
996
1118
  except Exception as e:
997
- return f"Error generating inventory from Chef environments: {e}"
1119
+ return format_error_with_context(
1120
+ e, "generating inventory from Chef environments", environments_directory
1121
+ )
998
1122
 
999
1123
 
1000
1124
  @mcp.tool()
@@ -1053,7 +1177,9 @@ def analyze_chef_environment_usage(
1053
1177
  5. Test environment-specific deployments with new inventory structure
1054
1178
  """
1055
1179
  except Exception as e:
1056
- return f"Error analyzing Chef environment usage: {e}"
1180
+ return format_error_with_context(
1181
+ e, "analyzing Chef environment usage", cookbook_path
1182
+ )
1057
1183
 
1058
1184
 
1059
1185
  def _parse_chef_environment_content(content: str) -> dict:
@@ -1196,18 +1322,27 @@ def _generate_inventory_group_from_environment(
1196
1322
  return yaml.dump(group_vars, default_flow_style=False, indent=2)
1197
1323
 
1198
1324
 
1199
- def _generate_complete_inventory_from_environments(
1200
- environments: dict, results: list, output_format: str
1201
- ) -> str:
1202
- """Generate complete Ansible inventory from multiple Chef environments."""
1203
- import yaml
1325
+ def _build_conversion_summary(results: list) -> str:
1326
+ """
1327
+ Build summary of environment conversion results.
1328
+
1329
+ Args:
1330
+ results: List of conversion result dicts
1331
+
1332
+ Returns:
1333
+ Formatted summary string
1334
+
1335
+ """
1336
+ total = len(results)
1337
+ successful = len([r for r in results if r["status"] == "success"])
1338
+ failed = len([r for r in results if r["status"] == "error"])
1204
1339
 
1205
1340
  summary = f"""# Chef Environments to Ansible Inventory Conversion
1206
1341
 
1207
1342
  ## Processing Summary:
1208
- - Total environments processed: {len(results)}
1209
- - Successfully converted: {len([r for r in results if r["status"] == "success"])}
1210
- - Failed conversions: {len([r for r in results if r["status"] == "error"])}
1343
+ - Total environments processed: {total}
1344
+ - Successfully converted: {successful}
1345
+ - Failed conversions: {failed}
1211
1346
 
1212
1347
  ## Environment Details:
1213
1348
  """
@@ -1223,35 +1358,71 @@ def _generate_complete_inventory_from_environments(
1223
1358
  else:
1224
1359
  summary += f"❌ {result['environment']}: {result['error']}\n"
1225
1360
 
1226
- if output_format in ["yaml", "both"]:
1227
- summary += "\n## YAML Inventory Structure:\n\n```yaml\n"
1361
+ return summary
1228
1362
 
1229
- # Generate YAML inventory
1230
- inventory: dict[str, Any] = {"all": {"children": {}}}
1231
1363
 
1232
- for env_name, env_data in environments.items():
1233
- inventory["all"]["children"][env_name] = {
1234
- "hosts": {}, # Hosts to be added manually
1235
- "vars": _flatten_environment_vars(env_data),
1236
- }
1364
+ def _generate_yaml_inventory(environments: dict) -> str:
1365
+ """
1366
+ Generate YAML format inventory from environments.
1237
1367
 
1238
- summary += yaml.dump(inventory, default_flow_style=False, indent=2)
1239
- summary += "```\n"
1368
+ Args:
1369
+ environments: Dict of environment name to data
1240
1370
 
1241
- if output_format in ["ini", "both"]:
1242
- summary += "\n## INI Inventory Structure:\n\n```ini\n"
1243
- summary += "[all:children]\n"
1244
- for env_name in environments:
1245
- summary += f"{env_name}\n"
1371
+ Returns:
1372
+ YAML inventory string
1373
+
1374
+ """
1375
+ import yaml
1376
+
1377
+ inventory: dict[str, Any] = {"all": {"children": {}}}
1246
1378
 
1247
- summary += "\n"
1248
- for env_name in environments:
1249
- summary += f"[{env_name}]\n"
1250
- summary += "# Add your hosts here\n\n"
1379
+ for env_name, env_data in environments.items():
1380
+ inventory["all"]["children"][env_name] = {
1381
+ "hosts": {}, # Hosts to be added manually
1382
+ "vars": _flatten_environment_vars(env_data),
1383
+ }
1251
1384
 
1252
- summary += "```\n"
1385
+ yaml_output = yaml.dump(inventory, default_flow_style=False, indent=2)
1386
+ return f"\n## YAML Inventory Structure:\n\n```yaml\n{yaml_output}```\n"
1253
1387
 
1254
- summary += """
1388
+
1389
+ def _generate_ini_inventory(environments: dict) -> str:
1390
+ """
1391
+ Generate INI format inventory from environments.
1392
+
1393
+ Args:
1394
+ environments: Dict of environment name to data
1395
+
1396
+ Returns:
1397
+ INI inventory string
1398
+
1399
+ """
1400
+ output = "\n## INI Inventory Structure:\n\n```ini\n"
1401
+ output += "[all:children]\n"
1402
+ for env_name in environments:
1403
+ output += f"{env_name}\n"
1404
+
1405
+ output += "\n"
1406
+ for env_name in environments:
1407
+ output += f"[{env_name}]\n"
1408
+ output += "# Add your hosts here\n\n"
1409
+
1410
+ output += "```\n"
1411
+ return output
1412
+
1413
+
1414
+ def _generate_next_steps_guide(environments: dict) -> str:
1415
+ """
1416
+ Generate next steps and file structure guide.
1417
+
1418
+ Args:
1419
+ environments: Dict of environment name to data
1420
+
1421
+ Returns:
1422
+ Guide string
1423
+
1424
+ """
1425
+ guide = """
1255
1426
  ## Next Steps:
1256
1427
  1. Create group_vars directory structure
1257
1428
  2. Add environment-specific variable files
@@ -1262,7 +1433,40 @@ def _generate_complete_inventory_from_environments(
1262
1433
  ## File Structure to Create:
1263
1434
  """
1264
1435
  for env_name in environments:
1265
- summary += f"- inventory/group_vars/{env_name}.yml\n"
1436
+ guide += f"- inventory/group_vars/{env_name}.yml\n"
1437
+
1438
+ return guide
1439
+
1440
+
1441
+ def _generate_complete_inventory_from_environments(
1442
+ environments: dict, results: list, output_format: str
1443
+ ) -> str:
1444
+ """
1445
+ Generate complete Ansible inventory from multiple Chef environments.
1446
+
1447
+ Orchestrates summary, YAML/INI generation, and guidance.
1448
+
1449
+ Args:
1450
+ environments: Dict of environment name to data
1451
+ results: List of conversion results
1452
+ output_format: Output format ("yaml", "ini", or "both")
1453
+
1454
+ Returns:
1455
+ Complete formatted inventory with summary and guidance
1456
+
1457
+ """
1458
+ # Build conversion summary
1459
+ summary = _build_conversion_summary(results)
1460
+
1461
+ # Generate requested inventory formats
1462
+ if output_format in ["yaml", "both"]:
1463
+ summary += _generate_yaml_inventory(environments)
1464
+
1465
+ if output_format in ["ini", "both"]:
1466
+ summary += _generate_ini_inventory(environments)
1467
+
1468
+ # Add next steps guide
1469
+ summary += _generate_next_steps_guide(environments)
1266
1470
 
1267
1471
  return summary
1268
1472
 
@@ -1592,43 +1796,118 @@ def _detect_encrypted_databag(content: str) -> bool:
1592
1796
  return False
1593
1797
 
1594
1798
 
1595
- def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
1596
- """Generate summary of data bag conversion results."""
1597
- total_bags = len(results)
1598
- successful = len([r for r in results if "error" not in r])
1599
- encrypted = len([r for r in results if r.get("encrypted", False)])
1799
+ def _calculate_conversion_statistics(results: list) -> dict[str, int]:
1800
+ """
1801
+ Calculate statistics from conversion results.
1600
1802
 
1601
- summary = f"""# Data Bag Conversion Summary
1803
+ Args:
1804
+ results: List of conversion result dictionaries.
1602
1805
 
1603
- ## Statistics:
1604
- - Total data bags processed: {total_bags}
1605
- - Successfully converted: {successful}
1606
- - Failed conversions: {total_bags - successful}
1607
- - Encrypted data bags: {encrypted}
1806
+ Returns:
1807
+ Dictionary with 'total', 'successful', and 'encrypted' counts.
1608
1808
 
1609
- ## Generated Files:
1809
+ """
1810
+ return {
1811
+ "total": len(results),
1812
+ "successful": len([r for r in results if "error" not in r]),
1813
+ "encrypted": len([r for r in results if r.get("encrypted", False)]),
1814
+ }
1815
+
1816
+
1817
+ def _build_statistics_section(stats: dict[str, int]) -> str:
1818
+ """
1819
+ Build the statistics section of the summary.
1820
+
1821
+ Args:
1822
+ stats: Dictionary with conversion statistics.
1823
+
1824
+ Returns:
1825
+ Formatted statistics section as markdown.
1826
+
1827
+ """
1828
+ return f"""# Data Bag Conversion Summary
1829
+
1830
+ ## Statistics:
1831
+ - Total data bags processed: {stats["total"]}
1832
+ - Successfully converted: {stats["successful"]}
1833
+ - Failed conversions: {stats["total"] - stats["successful"]}
1834
+ - Encrypted data bags: {stats["encrypted"]}
1610
1835
  """
1836
+
1837
+
1838
+ def _extract_generated_files(results: list) -> list[str]:
1839
+ """
1840
+ Extract unique generated file paths from results.
1841
+
1842
+ Args:
1843
+ results: List of conversion result dictionaries.
1844
+
1845
+ Returns:
1846
+ Sorted list of unique file paths.
1847
+
1848
+ """
1611
1849
  files_created = set()
1612
1850
  for result in results:
1613
1851
  if "error" not in result:
1614
1852
  target_file = result["target_file"]
1615
1853
  files_created.add(target_file)
1854
+ return sorted(files_created)
1855
+
1856
+
1857
+ def _build_files_section(files: list[str]) -> str:
1858
+ """
1859
+ Build the generated files section.
1616
1860
 
1617
- for file in sorted(files_created):
1618
- summary += f"- {file}\n"
1861
+ Args:
1862
+ files: List of generated file paths.
1619
1863
 
1620
- summary += "\n## Conversion Details:\n"
1864
+ Returns:
1865
+ Formatted files section as markdown.
1866
+
1867
+ """
1868
+ section = "\n## Generated Files:\n"
1869
+ for file in files:
1870
+ section += f"- {file}\n"
1871
+ return section
1872
+
1873
+
1874
+ def _build_conversion_details_section(results: list) -> str:
1875
+ """
1876
+ Build the conversion details section.
1877
+
1878
+ Args:
1879
+ results: List of conversion result dictionaries.
1880
+
1881
+ Returns:
1882
+ Formatted conversion details section as markdown.
1883
+
1884
+ """
1885
+ section = "\n## Conversion Details:\n"
1621
1886
 
1622
1887
  for result in results:
1623
1888
  if "error" in result:
1624
- summary += f"❌ {result['databag']}/{result['item']}: {result['error']}\n"
1889
+ section += f"❌ {result['databag']}/{result['item']}: {result['error']}\n"
1625
1890
  else:
1626
1891
  status = "🔒 Encrypted" if result["encrypted"] else "📄 Plain"
1627
1892
  databag_item = f"{result['databag']}/{result['item']}"
1628
1893
  target = result["target_file"]
1629
- summary += f"✅ {databag_item} → {target} ({status})\n"
1894
+ section += f"✅ {databag_item} → {target} ({status})\n"
1630
1895
 
1631
- summary += f"""
1896
+ return section
1897
+
1898
+
1899
+ def _build_next_steps_section(output_dir: str) -> str:
1900
+ """
1901
+ Build the next steps section.
1902
+
1903
+ Args:
1904
+ output_dir: Output directory path.
1905
+
1906
+ Returns:
1907
+ Formatted next steps section as markdown.
1908
+
1909
+ """
1910
+ return f"""
1632
1911
  ## Next Steps:
1633
1912
  1. Review generated variable files in {output_dir}/
1634
1913
  2. Encrypt vault files: `ansible-vault encrypt {output_dir}/*_vault.yml`
@@ -1636,7 +1915,29 @@ def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
1636
1915
  4. Test variable access in playbooks
1637
1916
  5. Remove original Chef data bags after validation
1638
1917
  """
1639
- return summary
1918
+
1919
+
1920
+ def _generate_databag_conversion_summary(results: list, output_dir: str) -> str:
1921
+ """
1922
+ Generate summary of data bag conversion results.
1923
+
1924
+ Args:
1925
+ results: List of conversion result dictionaries.
1926
+ output_dir: Output directory path.
1927
+
1928
+ Returns:
1929
+ Complete formatted summary as markdown.
1930
+
1931
+ """
1932
+ stats = _calculate_conversion_statistics(results)
1933
+ files = _extract_generated_files(results)
1934
+
1935
+ return (
1936
+ _build_statistics_section(stats)
1937
+ + _build_files_section(files)
1938
+ + _build_conversion_details_section(results)
1939
+ + _build_next_steps_section(output_dir)
1940
+ )
1640
1941
 
1641
1942
 
1642
1943
  def _extract_databag_usage_from_cookbook(cookbook_path) -> list:
@@ -1745,65 +2046,126 @@ def _analyze_databag_structure(databags_path) -> dict:
1745
2046
  return structure
1746
2047
 
1747
2048
 
1748
- def _generate_databag_migration_recommendations(
1749
- usage_patterns: list, databag_structure: dict
1750
- ) -> str:
1751
- """Generate migration recommendations based on usage analysis."""
1752
- recommendations = []
2049
+ def _analyze_usage_patterns(usage_patterns: list) -> list[str]:
2050
+ """
2051
+ Analyze databag usage patterns and generate recommendations.
1753
2052
 
1754
- # Analyze usage patterns
1755
- if usage_patterns:
1756
- unique_databags = {
1757
- p.get("databag_name") for p in usage_patterns if p.get("databag_name")
1758
- }
2053
+ Args:
2054
+ usage_patterns: List of usage pattern dicts
2055
+
2056
+ Returns:
2057
+ List of recommendation strings
2058
+
2059
+ """
2060
+ recommendations: list[str] = []
2061
+
2062
+ if not usage_patterns:
2063
+ return recommendations
2064
+
2065
+ unique_databags = {
2066
+ p.get("databag_name") for p in usage_patterns if p.get("databag_name")
2067
+ }
2068
+ recommendations.append(
2069
+ f"• Found {len(usage_patterns)} data bag references "
2070
+ f"across {len(unique_databags)} different data bags"
2071
+ )
2072
+
2073
+ # Check for encrypted usage
2074
+ encrypted_usage = [p for p in usage_patterns if "encrypted" in p.get("type", "")]
2075
+ if encrypted_usage:
1759
2076
  recommendations.append(
1760
- f"• Found {len(usage_patterns)} data bag references "
1761
- f"across {len(unique_databags)} different data bags"
2077
+ f"• {len(encrypted_usage)} encrypted data bag references "
2078
+ f"- convert to Ansible Vault"
1762
2079
  )
1763
2080
 
1764
- # Check for encrypted usage
1765
- encrypted_usage = [
1766
- p for p in usage_patterns if "encrypted" in p.get("type", "")
1767
- ]
1768
- if encrypted_usage:
1769
- recommendations.append(
1770
- f"• {len(encrypted_usage)} encrypted data bag references "
1771
- f"- convert to Ansible Vault"
1772
- )
2081
+ # Check for complex patterns
2082
+ search_patterns = [p for p in usage_patterns if "search" in p.get("type", "")]
2083
+ if search_patterns:
2084
+ recommendations.append(
2085
+ f"• {len(search_patterns)} search patterns involving data bags "
2086
+ f"- may need inventory integration"
2087
+ )
1773
2088
 
1774
- # Check for complex patterns
1775
- search_patterns = [p for p in usage_patterns if "search" in p.get("type", "")]
1776
- if search_patterns:
1777
- recommendations.append(
1778
- f"• {len(search_patterns)} search patterns involving data bags "
1779
- f"- may need inventory integration"
1780
- )
2089
+ return recommendations
1781
2090
 
1782
- # Analyze structure
1783
- if databag_structure:
1784
- total_bags = databag_structure.get("total_databags", 0)
1785
- encrypted_items = databag_structure.get("encrypted_items", 0)
1786
2091
 
1787
- if total_bags > 0:
1788
- recommendations.append(
1789
- f"• Convert {total_bags} data bags to group_vars/host_vars structure"
1790
- )
2092
+ def _analyze_databag_structure_recommendations(databag_structure: dict) -> list[str]:
2093
+ """
2094
+ Analyze databag structure and generate recommendations.
1791
2095
 
1792
- if encrypted_items > 0:
1793
- recommendations.append(
1794
- f"• {encrypted_items} encrypted items need Ansible Vault conversion"
1795
- )
2096
+ Args:
2097
+ databag_structure: Dict with structure analysis
2098
+
2099
+ Returns:
2100
+ List of recommendation strings
2101
+
2102
+ """
2103
+ recommendations: list[str] = []
1796
2104
 
1797
- # Variable scope recommendations
2105
+ if not databag_structure:
2106
+ return recommendations
2107
+
2108
+ total_bags = databag_structure.get("total_databags", 0)
2109
+ encrypted_items = databag_structure.get("encrypted_items", 0)
2110
+
2111
+ if total_bags > 0:
2112
+ recommendations.append(
2113
+ f"• Convert {total_bags} data bags to group_vars/host_vars structure"
2114
+ )
2115
+
2116
+ if encrypted_items > 0:
2117
+ recommendations.append(
2118
+ f"• {encrypted_items} encrypted items need Ansible Vault conversion"
2119
+ )
2120
+
2121
+ return recommendations
2122
+
2123
+
2124
+ def _get_variable_scope_recommendations() -> list[str]:
2125
+ """
2126
+ Get standard variable scope recommendations.
2127
+
2128
+ Returns:
2129
+ List of recommendation strings
2130
+
2131
+ """
2132
+ return [
2133
+ "• Use group_vars/ for environment-specific data (production, staging)",
2134
+ "• Use host_vars/ for node-specific configurations",
2135
+ "• Consider splitting large data bags into logical variable files",
2136
+ "• Implement variable precedence hierarchy matching Chef environments",
2137
+ ]
2138
+
2139
+
2140
+ def _generate_databag_migration_recommendations(
2141
+ usage_patterns: list, databag_structure: dict
2142
+ ) -> str:
2143
+ """
2144
+ Generate migration recommendations based on usage analysis.
2145
+
2146
+ Combines usage pattern analysis, structure analysis, and best practices.
2147
+
2148
+ Args:
2149
+ usage_patterns: List of databag usage patterns
2150
+ databag_structure: Dict with databag structure info
2151
+
2152
+ Returns:
2153
+ Formatted recommendations string
2154
+
2155
+ """
2156
+ recommendations = []
2157
+
2158
+ # Analyze usage patterns
2159
+ recommendations.extend(_analyze_usage_patterns(usage_patterns))
2160
+
2161
+ # Analyze structure
1798
2162
  recommendations.extend(
1799
- [
1800
- "• Use group_vars/ for environment-specific data (production, staging)",
1801
- "• Use host_vars/ for node-specific configurations",
1802
- "• Consider splitting large data bags into logical variable files",
1803
- "• Implement variable precedence hierarchy matching Chef environments",
1804
- ]
2163
+ _analyze_databag_structure_recommendations(databag_structure)
1805
2164
  )
1806
2165
 
2166
+ # Add variable scope best practices
2167
+ recommendations.extend(_get_variable_scope_recommendations())
2168
+
1807
2169
  return "\n".join(recommendations)
1808
2170
 
1809
2171
 
@@ -2118,6 +2480,84 @@ def analyze_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
2118
2480
  return _analyze_chef_search_patterns(recipe_or_cookbook_path)
2119
2481
 
2120
2482
 
2483
+ @mcp.tool()
2484
+ def profile_cookbook_performance(cookbook_path: str) -> str:
2485
+ """
2486
+ Profile cookbook parsing performance and generate optimization report.
2487
+
2488
+ Analyzes the performance of parsing all cookbook components (recipes,
2489
+ attributes, resources, templates) and provides recommendations for
2490
+ optimization. Useful for large cookbooks or batch processing operations.
2491
+
2492
+ Args:
2493
+ cookbook_path: Path to the Chef cookbook to profile.
2494
+
2495
+ Returns:
2496
+ Formatted performance report with timing, memory usage, and recommendations.
2497
+
2498
+ """
2499
+ from souschef.profiling import generate_cookbook_performance_report
2500
+
2501
+ try:
2502
+ report = generate_cookbook_performance_report(cookbook_path)
2503
+ return str(report)
2504
+ except Exception as e:
2505
+ return format_error_with_context(
2506
+ e, "profiling cookbook performance", cookbook_path
2507
+ )
2508
+
2509
+
2510
+ @mcp.tool()
2511
+ def profile_parsing_operation(
2512
+ operation: str, file_path: str, detailed: bool = False
2513
+ ) -> str:
2514
+ """
2515
+ Profile a single parsing operation with detailed performance metrics.
2516
+
2517
+ Measures execution time, memory usage, and optionally provides detailed
2518
+ function call statistics for a specific parsing operation.
2519
+
2520
+ Args:
2521
+ operation: Type of operation to profile ('recipe', 'attributes', 'resource', 'template').
2522
+ file_path: Path to the file to parse.
2523
+ detailed: If True, include detailed function call statistics.
2524
+
2525
+ Returns:
2526
+ Performance metrics for the operation.
2527
+
2528
+ """
2529
+ from souschef.profiling import detailed_profile_function, profile_function
2530
+
2531
+ operation_map = {
2532
+ "recipe": parse_recipe,
2533
+ "attributes": parse_attributes,
2534
+ "resource": parse_custom_resource,
2535
+ "template": parse_template,
2536
+ }
2537
+
2538
+ if operation not in operation_map:
2539
+ return (
2540
+ f"Error: Invalid operation '{operation}'\n\n"
2541
+ f"Supported operations: {', '.join(operation_map.keys())}"
2542
+ )
2543
+
2544
+ func = operation_map[operation]
2545
+
2546
+ try:
2547
+ if detailed:
2548
+ _, profile_result = detailed_profile_function(func, file_path)
2549
+ result = str(profile_result)
2550
+ if profile_result.function_stats.get("top_functions"):
2551
+ result += "\n\nDetailed Function Statistics:\n"
2552
+ result += profile_result.function_stats["top_functions"]
2553
+ return result
2554
+ else:
2555
+ _, profile_result = profile_function(func, file_path)
2556
+ return str(profile_result)
2557
+ except Exception as e:
2558
+ return format_error_with_context(e, f"profiling {operation} parsing", file_path)
2559
+
2560
+
2121
2561
  # AWX/AAP deployment wrappers for backward compatibility
2122
2562
  def main() -> None:
2123
2563
  """