mcp-souschef 2.2.0__py3-none-any.whl → 2.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/METADATA +226 -38
- mcp_souschef-2.8.0.dist-info/RECORD +42 -0
- mcp_souschef-2.8.0.dist-info/entry_points.txt +4 -0
- souschef/__init__.py +10 -2
- souschef/assessment.py +113 -30
- souschef/ci/__init__.py +11 -0
- souschef/ci/github_actions.py +379 -0
- souschef/ci/gitlab_ci.py +299 -0
- souschef/ci/jenkins_pipeline.py +343 -0
- souschef/cli.py +605 -5
- souschef/converters/__init__.py +2 -2
- souschef/converters/cookbook_specific.py +125 -0
- souschef/converters/cookbook_specific.py.backup +109 -0
- souschef/converters/playbook.py +853 -15
- souschef/converters/resource.py +103 -1
- souschef/core/constants.py +13 -0
- souschef/core/path_utils.py +12 -9
- souschef/core/validation.py +35 -2
- souschef/deployment.py +29 -27
- souschef/filesystem/operations.py +0 -7
- souschef/parsers/__init__.py +6 -1
- souschef/parsers/attributes.py +397 -32
- souschef/parsers/inspec.py +343 -18
- souschef/parsers/metadata.py +30 -0
- souschef/parsers/recipe.py +48 -10
- souschef/server.py +429 -178
- souschef/ui/__init__.py +8 -0
- souschef/ui/app.py +2998 -0
- souschef/ui/health_check.py +36 -0
- souschef/ui/pages/ai_settings.py +497 -0
- souschef/ui/pages/cookbook_analysis.py +1360 -0
- mcp_souschef-2.2.0.dist-info/RECORD +0 -31
- mcp_souschef-2.2.0.dist-info/entry_points.txt +0 -4
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/licenses/LICENSE +0 -0
souschef/server.py
CHANGED
|
@@ -10,7 +10,7 @@ from mcp.server.fastmcp import FastMCP
|
|
|
10
10
|
|
|
11
11
|
# Import assessment functions with aliases to avoid name conflicts
|
|
12
12
|
from souschef.assessment import (
|
|
13
|
-
|
|
13
|
+
analyse_cookbook_dependencies as _analyse_cookbook_dependencies,
|
|
14
14
|
)
|
|
15
15
|
from souschef.assessment import (
|
|
16
16
|
assess_chef_migration_complexity as _assess_chef_migration_complexity,
|
|
@@ -21,13 +21,16 @@ from souschef.assessment import (
|
|
|
21
21
|
from souschef.assessment import (
|
|
22
22
|
generate_migration_report as _generate_migration_report,
|
|
23
23
|
)
|
|
24
|
+
from souschef.assessment import (
|
|
25
|
+
parse_chef_migration_assessment as _parse_chef_migration_assessment,
|
|
26
|
+
)
|
|
24
27
|
from souschef.assessment import (
|
|
25
28
|
validate_conversion as _validate_conversion,
|
|
26
29
|
)
|
|
27
30
|
|
|
28
31
|
# Import extracted modules
|
|
29
32
|
# Import private helper functions still used in server.py
|
|
30
|
-
#
|
|
33
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
31
34
|
from souschef.converters.habitat import ( # noqa: F401
|
|
32
35
|
_add_service_build,
|
|
33
36
|
_add_service_dependencies,
|
|
@@ -49,7 +52,7 @@ from souschef.converters.habitat import (
|
|
|
49
52
|
)
|
|
50
53
|
|
|
51
54
|
# Re-exports of playbook internal functions for backward compatibility (tests)
|
|
52
|
-
#
|
|
55
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
53
56
|
from souschef.converters.playbook import ( # noqa: F401
|
|
54
57
|
_add_general_recommendations,
|
|
55
58
|
_convert_chef_block_to_ansible,
|
|
@@ -76,7 +79,7 @@ from souschef.converters.playbook import ( # noqa: F401
|
|
|
76
79
|
|
|
77
80
|
# Import playbook converter functions
|
|
78
81
|
from souschef.converters.playbook import (
|
|
79
|
-
|
|
82
|
+
analyse_chef_search_patterns as _analyse_chef_search_patterns,
|
|
80
83
|
)
|
|
81
84
|
from souschef.converters.playbook import (
|
|
82
85
|
convert_chef_search_to_inventory as _convert_chef_search_to_inventory,
|
|
@@ -84,11 +87,8 @@ from souschef.converters.playbook import (
|
|
|
84
87
|
from souschef.converters.playbook import (
|
|
85
88
|
generate_dynamic_inventory_script as _generate_dynamic_inventory_script,
|
|
86
89
|
)
|
|
87
|
-
from souschef.converters.playbook import (
|
|
88
|
-
generate_playbook_from_recipe as _generate_playbook_from_recipe,
|
|
89
|
-
)
|
|
90
90
|
|
|
91
|
-
#
|
|
91
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
92
92
|
from souschef.converters.resource import ( # noqa: F401
|
|
93
93
|
_convert_chef_resource_to_ansible,
|
|
94
94
|
_format_ansible_task,
|
|
@@ -101,7 +101,7 @@ from souschef.converters.resource import (
|
|
|
101
101
|
|
|
102
102
|
# Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
|
|
103
103
|
# These imports are intentionally exposed for external test access
|
|
104
|
-
#
|
|
104
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
105
105
|
from souschef.core.constants import ( # noqa: F401
|
|
106
106
|
ACTION_TO_STATE,
|
|
107
107
|
ANSIBLE_SERVICE_MODULE,
|
|
@@ -113,19 +113,19 @@ from souschef.core.constants import ( # noqa: F401
|
|
|
113
113
|
# Import core utilities
|
|
114
114
|
from souschef.core.errors import format_error_with_context
|
|
115
115
|
|
|
116
|
-
#
|
|
116
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
117
117
|
from souschef.core.path_utils import _normalize_path, _safe_join # noqa: F401
|
|
118
118
|
|
|
119
119
|
# Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
|
|
120
120
|
# These imports are intentionally exposed for external test access
|
|
121
|
-
#
|
|
121
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
122
122
|
from souschef.core.ruby_utils import ( # noqa: F401
|
|
123
123
|
_normalize_ruby_value,
|
|
124
124
|
)
|
|
125
125
|
|
|
126
126
|
# Re-exports for backward compatibility (used by tests) - DO NOT REMOVE
|
|
127
127
|
# These imports are intentionally exposed for external test access
|
|
128
|
-
#
|
|
128
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
129
129
|
from souschef.core.validation import ( # noqa: F401
|
|
130
130
|
ValidationCategory,
|
|
131
131
|
ValidationEngine,
|
|
@@ -137,10 +137,10 @@ from souschef.core.validation import ( # noqa: F401
|
|
|
137
137
|
# Re-exports of deployment internal functions for backward compatibility (tests)
|
|
138
138
|
# Public re-exports of deployment functions for test backward compatibility
|
|
139
139
|
# Note: MCP tool wrappers exist for some of these, but tests import directly
|
|
140
|
-
#
|
|
140
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
141
141
|
from souschef.deployment import ( # noqa: F401
|
|
142
|
-
|
|
143
|
-
|
|
142
|
+
_analyse_cookbook_for_awx,
|
|
143
|
+
_analyse_cookbooks_directory,
|
|
144
144
|
_detect_deployment_patterns_in_recipe,
|
|
145
145
|
_extract_cookbook_attributes,
|
|
146
146
|
_extract_cookbook_dependencies,
|
|
@@ -151,7 +151,7 @@ from souschef.deployment import ( # noqa: F401
|
|
|
151
151
|
_generate_survey_fields_from_attributes,
|
|
152
152
|
_parse_chef_runlist,
|
|
153
153
|
_recommend_ansible_strategies,
|
|
154
|
-
|
|
154
|
+
analyse_chef_application_patterns,
|
|
155
155
|
convert_chef_deployment_to_ansible_strategy,
|
|
156
156
|
generate_awx_inventory_source_from_chef,
|
|
157
157
|
generate_awx_job_template_from_cookbook,
|
|
@@ -163,9 +163,6 @@ from souschef.deployment import ( # noqa: F401
|
|
|
163
163
|
|
|
164
164
|
# Re-exports for backward compatibility (used by tests)
|
|
165
165
|
# These are imported and re-exported intentionally
|
|
166
|
-
from souschef.deployment import (
|
|
167
|
-
analyze_chef_application_patterns as _analyze_chef_application_patterns,
|
|
168
|
-
)
|
|
169
166
|
from souschef.deployment import (
|
|
170
167
|
convert_chef_deployment_to_ansible_strategy as _convert_chef_deployment_to_ansible_strategy,
|
|
171
168
|
)
|
|
@@ -192,7 +189,7 @@ from souschef.deployment import (
|
|
|
192
189
|
from souschef.filesystem import list_directory as _list_directory
|
|
193
190
|
from souschef.filesystem import read_file as _read_file
|
|
194
191
|
|
|
195
|
-
#
|
|
192
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
196
193
|
from souschef.parsers.attributes import ( # noqa: F401
|
|
197
194
|
_extract_attributes,
|
|
198
195
|
_format_attributes,
|
|
@@ -204,7 +201,7 @@ from souschef.parsers.attributes import ( # noqa: F401
|
|
|
204
201
|
# Import parser functions
|
|
205
202
|
from souschef.parsers.attributes import parse_attributes as _parse_attributes
|
|
206
203
|
|
|
207
|
-
#
|
|
204
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
208
205
|
from souschef.parsers.habitat import ( # noqa: F401
|
|
209
206
|
_extract_plan_array,
|
|
210
207
|
_extract_plan_exports,
|
|
@@ -217,16 +214,24 @@ from souschef.parsers.habitat import ( # noqa: F401
|
|
|
217
214
|
from souschef.parsers.habitat import parse_habitat_plan as _parse_habitat_plan
|
|
218
215
|
|
|
219
216
|
# Re-export InSpec internal functions for backward compatibility (tests)
|
|
220
|
-
#
|
|
217
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
221
218
|
from souschef.parsers.inspec import ( # noqa: F401
|
|
222
219
|
_convert_inspec_to_ansible_assert,
|
|
220
|
+
_convert_inspec_to_goss,
|
|
221
|
+
_convert_inspec_to_serverspec,
|
|
223
222
|
_convert_inspec_to_testinfra,
|
|
224
223
|
_extract_inspec_describe_blocks,
|
|
225
224
|
_generate_inspec_from_resource,
|
|
226
225
|
_parse_inspec_control,
|
|
227
226
|
)
|
|
227
|
+
from souschef.parsers.inspec import (
|
|
228
|
+
convert_inspec_to_test as _convert_inspec_test,
|
|
229
|
+
)
|
|
230
|
+
from souschef.parsers.inspec import (
|
|
231
|
+
parse_inspec_profile as _parse_inspec,
|
|
232
|
+
)
|
|
228
233
|
|
|
229
|
-
#
|
|
234
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
230
235
|
from souschef.parsers.metadata import ( # noqa: F401
|
|
231
236
|
_extract_metadata,
|
|
232
237
|
_format_cookbook_structure,
|
|
@@ -235,9 +240,12 @@ from souschef.parsers.metadata import ( # noqa: F401
|
|
|
235
240
|
from souschef.parsers.metadata import (
|
|
236
241
|
list_cookbook_structure as _list_cookbook_structure,
|
|
237
242
|
)
|
|
243
|
+
from souschef.parsers.metadata import (
|
|
244
|
+
parse_cookbook_metadata as _parse_cookbook_metadata,
|
|
245
|
+
)
|
|
238
246
|
from souschef.parsers.metadata import read_cookbook_metadata as _read_cookbook_metadata
|
|
239
247
|
|
|
240
|
-
#
|
|
248
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
241
249
|
from souschef.parsers.recipe import ( # noqa: F401
|
|
242
250
|
_extract_conditionals,
|
|
243
251
|
_extract_resources,
|
|
@@ -245,14 +253,14 @@ from souschef.parsers.recipe import ( # noqa: F401
|
|
|
245
253
|
)
|
|
246
254
|
from souschef.parsers.recipe import parse_recipe as _parse_recipe
|
|
247
255
|
|
|
248
|
-
#
|
|
256
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
249
257
|
from souschef.parsers.resource import ( # noqa: F401
|
|
250
258
|
_extract_resource_actions,
|
|
251
259
|
_extract_resource_properties,
|
|
252
260
|
)
|
|
253
261
|
from souschef.parsers.resource import parse_custom_resource as _parse_custom_resource
|
|
254
262
|
|
|
255
|
-
#
|
|
263
|
+
# codeql[py/unused-import]: Backward compatibility exports for test suite
|
|
256
264
|
from souschef.parsers.template import ( # noqa: F401
|
|
257
265
|
_convert_erb_to_jinja2,
|
|
258
266
|
_extract_code_block_variables,
|
|
@@ -354,6 +362,21 @@ def read_cookbook_metadata(path: str) -> str:
|
|
|
354
362
|
return _read_cookbook_metadata(path)
|
|
355
363
|
|
|
356
364
|
|
|
365
|
+
@mcp.tool()
|
|
366
|
+
def parse_cookbook_metadata(path: str) -> dict[str, str | list[str]]:
|
|
367
|
+
"""
|
|
368
|
+
Parse Chef cookbook metadata.rb file and return as dictionary.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
path: Path to the metadata.rb file.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
Dictionary containing extracted metadata fields.
|
|
375
|
+
|
|
376
|
+
"""
|
|
377
|
+
return _parse_cookbook_metadata(path)
|
|
378
|
+
|
|
379
|
+
|
|
357
380
|
@mcp.tool()
|
|
358
381
|
def parse_recipe(path: str) -> str:
|
|
359
382
|
"""
|
|
@@ -537,6 +560,7 @@ def _parse_controls_from_file(profile_path: Path) -> list[dict[str, Any]]:
|
|
|
537
560
|
raise RuntimeError(f"Error reading file: {e}") from e
|
|
538
561
|
|
|
539
562
|
|
|
563
|
+
@mcp.tool()
|
|
540
564
|
def parse_inspec_profile(path: str) -> str:
|
|
541
565
|
"""
|
|
542
566
|
Parse an InSpec profile and extract controls.
|
|
@@ -548,108 +572,23 @@ def parse_inspec_profile(path: str) -> str:
|
|
|
548
572
|
JSON string with parsed controls, or error message.
|
|
549
573
|
|
|
550
574
|
"""
|
|
551
|
-
|
|
552
|
-
# Validate input
|
|
553
|
-
if not path or not path.strip():
|
|
554
|
-
return (
|
|
555
|
-
"Error: Path cannot be empty\n\n"
|
|
556
|
-
"Suggestion: Provide a path to an InSpec profile directory or control file"
|
|
557
|
-
)
|
|
558
|
-
|
|
559
|
-
profile_path = _normalize_path(path)
|
|
560
|
-
|
|
561
|
-
if not profile_path.exists():
|
|
562
|
-
return (
|
|
563
|
-
f"Error: Path does not exist: {path}\n\n"
|
|
564
|
-
"Suggestion: Check that the path is correct and the InSpec profile exists"
|
|
565
|
-
)
|
|
566
|
-
|
|
567
|
-
if profile_path.is_dir():
|
|
568
|
-
controls = _parse_controls_from_directory(profile_path)
|
|
569
|
-
elif profile_path.is_file():
|
|
570
|
-
controls = _parse_controls_from_file(profile_path)
|
|
571
|
-
else:
|
|
572
|
-
return (
|
|
573
|
-
f"Error: Invalid path type: {path}\n\n"
|
|
574
|
-
"Suggestion: Provide a directory or file path, not a special file type"
|
|
575
|
-
)
|
|
576
|
-
|
|
577
|
-
return json.dumps(
|
|
578
|
-
{
|
|
579
|
-
"profile_path": str(profile_path),
|
|
580
|
-
"controls_count": len(controls),
|
|
581
|
-
"controls": controls,
|
|
582
|
-
},
|
|
583
|
-
indent=2,
|
|
584
|
-
)
|
|
585
|
-
|
|
586
|
-
except (FileNotFoundError, RuntimeError) as e:
|
|
587
|
-
return format_error_with_context(e, "parsing InSpec profile", path)
|
|
588
|
-
except Exception as e:
|
|
589
|
-
return format_error_with_context(e, "parsing InSpec profile", path)
|
|
575
|
+
return _parse_inspec(path)
|
|
590
576
|
|
|
591
577
|
|
|
592
578
|
@mcp.tool()
|
|
593
579
|
def convert_inspec_to_test(inspec_path: str, output_format: str = "testinfra") -> str:
|
|
594
580
|
"""
|
|
595
|
-
Convert InSpec controls to
|
|
581
|
+
Convert InSpec controls to test framework format.
|
|
596
582
|
|
|
597
583
|
Args:
|
|
598
584
|
inspec_path: Path to InSpec profile or control file.
|
|
599
|
-
output_format: Output format ('testinfra' or '
|
|
585
|
+
output_format: Output format ('testinfra', 'ansible_assert', 'serverspec', or 'goss').
|
|
600
586
|
|
|
601
587
|
Returns:
|
|
602
588
|
Converted test code or error message.
|
|
603
589
|
|
|
604
590
|
"""
|
|
605
|
-
|
|
606
|
-
# First parse the InSpec profile
|
|
607
|
-
parse_result = parse_inspec_profile(inspec_path)
|
|
608
|
-
|
|
609
|
-
# Check if parsing failed
|
|
610
|
-
if parse_result.startswith(ERROR_PREFIX):
|
|
611
|
-
return parse_result
|
|
612
|
-
|
|
613
|
-
# Parse JSON result
|
|
614
|
-
profile_data = json.loads(parse_result)
|
|
615
|
-
controls = profile_data["controls"]
|
|
616
|
-
|
|
617
|
-
if not controls:
|
|
618
|
-
return "Error: No controls found in InSpec profile"
|
|
619
|
-
|
|
620
|
-
# Convert each control
|
|
621
|
-
converted_tests = []
|
|
622
|
-
|
|
623
|
-
if output_format == "testinfra":
|
|
624
|
-
converted_tests.append("import pytest")
|
|
625
|
-
converted_tests.append("")
|
|
626
|
-
converted_tests.append("")
|
|
627
|
-
for control in controls:
|
|
628
|
-
test_code = _convert_inspec_to_testinfra(control)
|
|
629
|
-
converted_tests.append(test_code)
|
|
630
|
-
|
|
631
|
-
elif output_format == "ansible_assert":
|
|
632
|
-
converted_tests.append("---")
|
|
633
|
-
converted_tests.append("# Validation tasks converted from InSpec")
|
|
634
|
-
converted_tests.append("")
|
|
635
|
-
for control in controls:
|
|
636
|
-
assert_code = _convert_inspec_to_ansible_assert(control)
|
|
637
|
-
converted_tests.append(assert_code)
|
|
638
|
-
converted_tests.append("")
|
|
639
|
-
|
|
640
|
-
else:
|
|
641
|
-
error_msg = (
|
|
642
|
-
f"Error: Unsupported format '{output_format}'. "
|
|
643
|
-
"Use 'testinfra' or 'ansible_assert'"
|
|
644
|
-
)
|
|
645
|
-
return error_msg
|
|
646
|
-
|
|
647
|
-
return "\n".join(converted_tests)
|
|
648
|
-
|
|
649
|
-
except Exception as e:
|
|
650
|
-
return format_error_with_context(
|
|
651
|
-
e, f"converting InSpec to {output_format}", inspec_path
|
|
652
|
-
)
|
|
591
|
+
return _convert_inspec_test(inspec_path, output_format)
|
|
653
592
|
|
|
654
593
|
|
|
655
594
|
def _extract_resources_from_parse_result(parse_result: str) -> list[dict[str, Any]]:
|
|
@@ -961,9 +900,9 @@ def generate_ansible_vault_from_databags(
|
|
|
961
900
|
|
|
962
901
|
|
|
963
902
|
@mcp.tool()
|
|
964
|
-
def
|
|
903
|
+
def analyse_chef_databag_usage(cookbook_path: str, databags_path: str = "") -> str:
|
|
965
904
|
"""
|
|
966
|
-
|
|
905
|
+
Analyse Chef cookbook for data bag usage and provide migration recommendations.
|
|
967
906
|
|
|
968
907
|
Args:
|
|
969
908
|
cookbook_path: Path to Chef cookbook
|
|
@@ -986,7 +925,7 @@ def analyze_chef_databag_usage(cookbook_path: str, databags_path: str = "") -> s
|
|
|
986
925
|
if databags_path:
|
|
987
926
|
databags = _normalize_path(databags_path)
|
|
988
927
|
if databags.exists():
|
|
989
|
-
databag_structure =
|
|
928
|
+
databag_structure = _analyse_databag_structure(databags)
|
|
990
929
|
|
|
991
930
|
# Generate recommendations
|
|
992
931
|
recommendations = _generate_databag_migration_recommendations(
|
|
@@ -1122,11 +1061,11 @@ def generate_inventory_from_chef_environments(
|
|
|
1122
1061
|
|
|
1123
1062
|
|
|
1124
1063
|
@mcp.tool()
|
|
1125
|
-
def
|
|
1064
|
+
def analyse_chef_environment_usage(
|
|
1126
1065
|
cookbook_path: str, environments_path: str = ""
|
|
1127
1066
|
) -> str:
|
|
1128
1067
|
"""
|
|
1129
|
-
|
|
1068
|
+
Analyse Chef cookbook for environment usage.
|
|
1130
1069
|
|
|
1131
1070
|
Provides migration recommendations.
|
|
1132
1071
|
|
|
@@ -1151,7 +1090,7 @@ def analyze_chef_environment_usage(
|
|
|
1151
1090
|
if environments_path:
|
|
1152
1091
|
environments = _normalize_path(environments_path)
|
|
1153
1092
|
if environments.exists():
|
|
1154
|
-
environment_structure =
|
|
1093
|
+
environment_structure = _analyse_environments_structure(environments)
|
|
1155
1094
|
|
|
1156
1095
|
# Generate recommendations
|
|
1157
1096
|
recommendations = _generate_environment_migration_recommendations(
|
|
@@ -1220,6 +1159,183 @@ def _parse_chef_environment_content(content: str) -> dict:
|
|
|
1220
1159
|
return env_data
|
|
1221
1160
|
|
|
1222
1161
|
|
|
1162
|
+
def _convert_ruby_literal(value: str) -> Any:
|
|
1163
|
+
"""
|
|
1164
|
+
Convert Ruby literal values to equivalent Python types.
|
|
1165
|
+
|
|
1166
|
+
This function handles the conversion of Ruby's basic literal values
|
|
1167
|
+
to their Python equivalents during Chef environment parsing.
|
|
1168
|
+
|
|
1169
|
+
Args:
|
|
1170
|
+
value: String representation of a Ruby literal value.
|
|
1171
|
+
|
|
1172
|
+
Returns:
|
|
1173
|
+
The converted Python value:
|
|
1174
|
+
- "true" -> True (bool)
|
|
1175
|
+
- "false" -> False (bool)
|
|
1176
|
+
- "nil" -> None
|
|
1177
|
+
- Integer strings -> int (e.g., "42" -> 42)
|
|
1178
|
+
- Float strings -> float (e.g., "3.14" -> 3.14, "1e10" -> 10000000000.0)
|
|
1179
|
+
- Unrecognized values -> original string unchanged
|
|
1180
|
+
|
|
1181
|
+
Examples:
|
|
1182
|
+
>>> _convert_ruby_literal("true")
|
|
1183
|
+
True
|
|
1184
|
+
>>> _convert_ruby_literal("42")
|
|
1185
|
+
42
|
|
1186
|
+
>>> _convert_ruby_literal("3.14")
|
|
1187
|
+
3.14
|
|
1188
|
+
>>> _convert_ruby_literal("nil")
|
|
1189
|
+
None
|
|
1190
|
+
>>> _convert_ruby_literal("some_string")
|
|
1191
|
+
'some_string'
|
|
1192
|
+
|
|
1193
|
+
"""
|
|
1194
|
+
# Handle boolean and nil values
|
|
1195
|
+
literal_map = {
|
|
1196
|
+
"true": True,
|
|
1197
|
+
"false": False,
|
|
1198
|
+
"nil": None,
|
|
1199
|
+
}
|
|
1200
|
+
|
|
1201
|
+
if value in literal_map:
|
|
1202
|
+
return literal_map[value]
|
|
1203
|
+
|
|
1204
|
+
# Handle numeric values
|
|
1205
|
+
try:
|
|
1206
|
+
# Try integer first
|
|
1207
|
+
if "." not in value and "e" not in value.lower():
|
|
1208
|
+
return int(value)
|
|
1209
|
+
else:
|
|
1210
|
+
return float(value)
|
|
1211
|
+
except ValueError:
|
|
1212
|
+
pass
|
|
1213
|
+
|
|
1214
|
+
# Return as string if no conversion applies
|
|
1215
|
+
return value
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
def _parse_quoted_key(content: str, i: int) -> tuple[str, int]:
|
|
1219
|
+
"""Parse a quoted key and return (key, new_index)."""
|
|
1220
|
+
if content[i] not in "'\"":
|
|
1221
|
+
raise ValueError("Expected quote at start of key")
|
|
1222
|
+
|
|
1223
|
+
quote = content[i]
|
|
1224
|
+
i += 1
|
|
1225
|
+
key_start = i
|
|
1226
|
+
while i < len(content) and content[i] != quote:
|
|
1227
|
+
i += 1
|
|
1228
|
+
key = content[key_start:i]
|
|
1229
|
+
i += 1 # skip closing quote
|
|
1230
|
+
return key, i
|
|
1231
|
+
|
|
1232
|
+
|
|
1233
|
+
def _parse_nested_hash(content: str, i: int) -> tuple[dict, int]:
|
|
1234
|
+
"""Parse a nested hash and return (parsed_dict, new_index)."""
|
|
1235
|
+
if content[i] != "{":
|
|
1236
|
+
raise ValueError("Expected opening brace for nested hash")
|
|
1237
|
+
|
|
1238
|
+
brace_count = 1
|
|
1239
|
+
start = i
|
|
1240
|
+
i += 1
|
|
1241
|
+
while i < len(content) and brace_count > 0:
|
|
1242
|
+
if content[i] == "{":
|
|
1243
|
+
brace_count += 1
|
|
1244
|
+
elif content[i] == "}":
|
|
1245
|
+
brace_count -= 1
|
|
1246
|
+
i += 1
|
|
1247
|
+
|
|
1248
|
+
nested_content = content[start + 1 : i - 1] # exclude braces
|
|
1249
|
+
return parse_ruby_hash(nested_content), i
|
|
1250
|
+
|
|
1251
|
+
|
|
1252
|
+
def _parse_simple_value(content: str, i: int) -> tuple[str, int]:
|
|
1253
|
+
"""Parse a simple value and return (value, new_index)."""
|
|
1254
|
+
value_start = i
|
|
1255
|
+
while i < len(content) and content[i] not in ",}":
|
|
1256
|
+
i += 1
|
|
1257
|
+
value = content[value_start:i].strip()
|
|
1258
|
+
# Remove quotes if present
|
|
1259
|
+
if (value.startswith("'") and value.endswith("'")) or (
|
|
1260
|
+
value.startswith('"') and value.endswith('"')
|
|
1261
|
+
):
|
|
1262
|
+
value = value[1:-1]
|
|
1263
|
+
else:
|
|
1264
|
+
# Convert Ruby literals to Python types
|
|
1265
|
+
value = _convert_ruby_literal(value)
|
|
1266
|
+
return value, i
|
|
1267
|
+
|
|
1268
|
+
|
|
1269
|
+
def _skip_to_next_item(content: str, i: int) -> int:
|
|
1270
|
+
"""Skip to the next item, handling delimiters."""
|
|
1271
|
+
while i < len(content) and content[i] not in ",}":
|
|
1272
|
+
i += 1
|
|
1273
|
+
if i < len(content) and (content[i] == "," or content[i] == "}"):
|
|
1274
|
+
i += 1
|
|
1275
|
+
return i
|
|
1276
|
+
|
|
1277
|
+
|
|
1278
|
+
def parse_ruby_hash(content: str) -> dict:
|
|
1279
|
+
"""Parse Ruby hash syntax recursively."""
|
|
1280
|
+
result = {}
|
|
1281
|
+
|
|
1282
|
+
# Simple recursive parser for Ruby hash syntax
|
|
1283
|
+
# This handles nested braces by counting them
|
|
1284
|
+
i = 0
|
|
1285
|
+
while i < len(content):
|
|
1286
|
+
# Skip whitespace
|
|
1287
|
+
i = _skip_whitespace(content, i)
|
|
1288
|
+
if i >= len(content):
|
|
1289
|
+
break
|
|
1290
|
+
|
|
1291
|
+
# Parse key-value pair
|
|
1292
|
+
key, value, i = _parse_key_value_pair(content, i)
|
|
1293
|
+
if key is not None:
|
|
1294
|
+
result[key] = value
|
|
1295
|
+
|
|
1296
|
+
# Skip to next item
|
|
1297
|
+
i = _skip_to_next_item(content, i)
|
|
1298
|
+
|
|
1299
|
+
return result
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
def _skip_whitespace(content: str, i: int) -> int:
|
|
1303
|
+
"""Skip whitespace characters and return new index."""
|
|
1304
|
+
while i < len(content) and content[i].isspace():
|
|
1305
|
+
i += 1
|
|
1306
|
+
return i
|
|
1307
|
+
|
|
1308
|
+
|
|
1309
|
+
def _parse_key_value_pair(content: str, i: int) -> tuple[str | None, Any, int]:
|
|
1310
|
+
"""Parse a single key => value pair and return (key, value, new_index)."""
|
|
1311
|
+
# Look for key => value patterns
|
|
1312
|
+
if content[i] in "'\"":
|
|
1313
|
+
# Parse quoted key
|
|
1314
|
+
key, i = _parse_quoted_key(content, i)
|
|
1315
|
+
|
|
1316
|
+
# Skip whitespace and =>
|
|
1317
|
+
i = _skip_whitespace_and_arrows(content, i)
|
|
1318
|
+
|
|
1319
|
+
value: Any
|
|
1320
|
+
if i < len(content) and content[i] == "{":
|
|
1321
|
+
# Nested hash
|
|
1322
|
+
value, i = _parse_nested_hash(content, i)
|
|
1323
|
+
else:
|
|
1324
|
+
# Simple value
|
|
1325
|
+
value, i = _parse_simple_value(content, i)
|
|
1326
|
+
|
|
1327
|
+
return key, value, i
|
|
1328
|
+
|
|
1329
|
+
return None, None, i
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def _skip_whitespace_and_arrows(content: str, i: int) -> int:
|
|
1333
|
+
"""Skip whitespace and => symbols."""
|
|
1334
|
+
while i < len(content) and (content[i].isspace() or content[i] in "=>"):
|
|
1335
|
+
i += 1
|
|
1336
|
+
return i
|
|
1337
|
+
|
|
1338
|
+
|
|
1223
1339
|
def _extract_attributes_block(content: str, block_type: str) -> dict:
|
|
1224
1340
|
"""Extract attribute blocks from Chef environment content."""
|
|
1225
1341
|
# Find the block start
|
|
@@ -1231,37 +1347,7 @@ def _extract_attributes_block(content: str, block_type: str) -> dict:
|
|
|
1231
1347
|
|
|
1232
1348
|
block_content = match.group(1).strip()
|
|
1233
1349
|
|
|
1234
|
-
|
|
1235
|
-
# Ruby attribute hashes use => syntax, which we convert to Python dict
|
|
1236
|
-
# This is intentionally simple - complex Chef DSL needs full Ruby parser
|
|
1237
|
-
attributes = {}
|
|
1238
|
-
|
|
1239
|
-
# Parse simple key-value pairs like 'port' => '8080'
|
|
1240
|
-
key_value_pattern = (
|
|
1241
|
-
r"['\"]([^'\"]{0,100})['\"][\s:]*=>[\s:]*['\"]([^'\"]{0,200})['\"]"
|
|
1242
|
-
)
|
|
1243
|
-
for match in re.finditer(key_value_pattern, block_content):
|
|
1244
|
-
attr_key = match.group(1)
|
|
1245
|
-
attr_value = match.group(2)
|
|
1246
|
-
attributes[attr_key] = attr_value
|
|
1247
|
-
|
|
1248
|
-
# Parse nested structures (basic support)
|
|
1249
|
-
nested_pattern = (
|
|
1250
|
-
r"['\"](([^'\"\n]{0,100}))['\"](\\s|:)*=>(\\s|:)*\\{([^}]{0,500})\\}"
|
|
1251
|
-
)
|
|
1252
|
-
for match in re.finditer(nested_pattern, block_content):
|
|
1253
|
-
key = match.group(1)
|
|
1254
|
-
nested_content = match.group(5)
|
|
1255
|
-
nested_attrs = {}
|
|
1256
|
-
|
|
1257
|
-
for nested_match in re.finditer(key_value_pattern, nested_content):
|
|
1258
|
-
nested_key = nested_match.group(1)
|
|
1259
|
-
nested_value = nested_match.group(2)
|
|
1260
|
-
nested_attrs[nested_key] = nested_value
|
|
1261
|
-
|
|
1262
|
-
if nested_attrs:
|
|
1263
|
-
attributes[key] = nested_attrs
|
|
1264
|
-
|
|
1350
|
+
attributes = parse_ruby_hash(block_content)
|
|
1265
1351
|
return attributes
|
|
1266
1352
|
|
|
1267
1353
|
|
|
@@ -1527,7 +1613,7 @@ def _find_environment_patterns_in_content(content: str, file_path: str) -> list:
|
|
|
1527
1613
|
# Common Chef environment patterns
|
|
1528
1614
|
environment_patterns = [
|
|
1529
1615
|
(r"node\.chef_environment", "node.chef_environment"),
|
|
1530
|
-
(r"node\[['\"]
|
|
1616
|
+
(r"node\[['\"]environment['\"]\]", 'node["environment"]'),
|
|
1531
1617
|
(r"environment\s+['\"]([^'\"\n]{0,100})['\"]", "environment declaration"),
|
|
1532
1618
|
(
|
|
1533
1619
|
r"if\s+node\.chef_environment\s*==\s*['\"]([^'\"\n]{0,100})['\"]",
|
|
@@ -1557,8 +1643,8 @@ def _find_environment_patterns_in_content(content: str, file_path: str) -> list:
|
|
|
1557
1643
|
return patterns
|
|
1558
1644
|
|
|
1559
1645
|
|
|
1560
|
-
def
|
|
1561
|
-
"""
|
|
1646
|
+
def _analyse_environments_structure(environments_path) -> dict:
|
|
1647
|
+
"""Analyse the structure of Chef environments directory."""
|
|
1562
1648
|
structure: dict[str, Any] = {"total_environments": 0, "environments": {}}
|
|
1563
1649
|
|
|
1564
1650
|
for env_file in environments_path.glob("*.rb"):
|
|
@@ -1590,8 +1676,8 @@ def _analyze_environments_structure(environments_path) -> dict:
|
|
|
1590
1676
|
return structure
|
|
1591
1677
|
|
|
1592
1678
|
|
|
1593
|
-
def
|
|
1594
|
-
"""
|
|
1679
|
+
def _analyse_usage_pattern_recommendations(usage_patterns: list) -> list[str]:
|
|
1680
|
+
"""Analyse usage patterns and generate recommendations."""
|
|
1595
1681
|
if not usage_patterns:
|
|
1596
1682
|
return []
|
|
1597
1683
|
|
|
@@ -1622,8 +1708,8 @@ def _analyze_usage_pattern_recommendations(usage_patterns: list) -> list[str]:
|
|
|
1622
1708
|
return recommendations
|
|
1623
1709
|
|
|
1624
1710
|
|
|
1625
|
-
def
|
|
1626
|
-
"""
|
|
1711
|
+
def _analyse_structure_recommendations(env_structure: dict) -> list[str]:
|
|
1712
|
+
"""Analyse environment structure and generate recommendations."""
|
|
1627
1713
|
if not env_structure:
|
|
1628
1714
|
return []
|
|
1629
1715
|
|
|
@@ -1671,8 +1757,8 @@ def _generate_environment_migration_recommendations(
|
|
|
1671
1757
|
) -> str:
|
|
1672
1758
|
"""Generate migration recommendations based on environment usage analysis."""
|
|
1673
1759
|
recommendations = []
|
|
1674
|
-
recommendations.extend(
|
|
1675
|
-
recommendations.extend(
|
|
1760
|
+
recommendations.extend(_analyse_usage_pattern_recommendations(usage_patterns))
|
|
1761
|
+
recommendations.extend(_analyse_structure_recommendations(env_structure))
|
|
1676
1762
|
recommendations.extend(_get_general_migration_recommendations())
|
|
1677
1763
|
|
|
1678
1764
|
return "\n".join(recommendations)
|
|
@@ -1888,7 +1974,7 @@ def _build_conversion_details_section(results: list) -> str:
|
|
|
1888
1974
|
if "error" in result:
|
|
1889
1975
|
section += f"❌ {result['databag']}/{result['item']}: {result['error']}\n"
|
|
1890
1976
|
else:
|
|
1891
|
-
status = "🔒 Encrypted" if result
|
|
1977
|
+
status = "🔒 Encrypted" if result.get("encrypted", False) else "📄 Plain"
|
|
1892
1978
|
databag_item = f"{result['databag']}/{result['item']}"
|
|
1893
1979
|
target = result["target_file"]
|
|
1894
1980
|
section += f"✅ {databag_item} → {target} ({status})\n"
|
|
@@ -2001,8 +2087,8 @@ def _find_databag_patterns_in_content(content: str, file_path: str) -> list:
|
|
|
2001
2087
|
return patterns
|
|
2002
2088
|
|
|
2003
2089
|
|
|
2004
|
-
def
|
|
2005
|
-
"""
|
|
2090
|
+
def _analyse_databag_structure(databags_path) -> dict:
|
|
2091
|
+
"""Analyse the structure of Chef data bags directory."""
|
|
2006
2092
|
structure: dict[str, Any] = {
|
|
2007
2093
|
"total_databags": 0,
|
|
2008
2094
|
"total_items": 0,
|
|
@@ -2046,9 +2132,9 @@ def _analyze_databag_structure(databags_path) -> dict:
|
|
|
2046
2132
|
return structure
|
|
2047
2133
|
|
|
2048
2134
|
|
|
2049
|
-
def
|
|
2135
|
+
def _analyse_usage_patterns(usage_patterns: list) -> list[str]:
|
|
2050
2136
|
"""
|
|
2051
|
-
|
|
2137
|
+
Analyse databag usage patterns and generate recommendations.
|
|
2052
2138
|
|
|
2053
2139
|
Args:
|
|
2054
2140
|
usage_patterns: List of usage pattern dicts
|
|
@@ -2089,9 +2175,9 @@ def _analyze_usage_patterns(usage_patterns: list) -> list[str]:
|
|
|
2089
2175
|
return recommendations
|
|
2090
2176
|
|
|
2091
2177
|
|
|
2092
|
-
def
|
|
2178
|
+
def _analyse_databag_structure_recommendations(databag_structure: dict) -> list[str]:
|
|
2093
2179
|
"""
|
|
2094
|
-
|
|
2180
|
+
Analyse databag structure and generate recommendations.
|
|
2095
2181
|
|
|
2096
2182
|
Args:
|
|
2097
2183
|
databag_structure: Dict with structure analysis
|
|
@@ -2156,11 +2242,11 @@ def _generate_databag_migration_recommendations(
|
|
|
2156
2242
|
recommendations = []
|
|
2157
2243
|
|
|
2158
2244
|
# Analyze usage patterns
|
|
2159
|
-
recommendations.extend(
|
|
2245
|
+
recommendations.extend(_analyse_usage_patterns(usage_patterns))
|
|
2160
2246
|
|
|
2161
2247
|
# Analyze structure
|
|
2162
2248
|
recommendations.extend(
|
|
2163
|
-
|
|
2249
|
+
_analyse_databag_structure_recommendations(databag_structure)
|
|
2164
2250
|
)
|
|
2165
2251
|
|
|
2166
2252
|
# Add variable scope best practices
|
|
@@ -2240,7 +2326,7 @@ mcp.tool()(_generate_awx_inventory_source_from_chef)
|
|
|
2240
2326
|
mcp.tool()(_convert_chef_deployment_to_ansible_strategy)
|
|
2241
2327
|
mcp.tool()(_generate_blue_green_deployment_playbook)
|
|
2242
2328
|
mcp.tool()(_generate_canary_deployment_strategy)
|
|
2243
|
-
mcp.tool()(
|
|
2329
|
+
mcp.tool()(analyse_chef_application_patterns)
|
|
2244
2330
|
|
|
2245
2331
|
|
|
2246
2332
|
# ============================================================================
|
|
@@ -2300,9 +2386,9 @@ def generate_migration_plan(
|
|
|
2300
2386
|
|
|
2301
2387
|
|
|
2302
2388
|
@mcp.tool()
|
|
2303
|
-
def
|
|
2389
|
+
def analyse_cookbook_dependencies(cookbook_paths: str) -> str:
|
|
2304
2390
|
"""
|
|
2305
|
-
|
|
2391
|
+
Analyse dependencies between Chef cookbooks.
|
|
2306
2392
|
|
|
2307
2393
|
Maps cookbook dependencies, identifies circular dependencies, and
|
|
2308
2394
|
recommends migration order.
|
|
@@ -2314,7 +2400,7 @@ def analyze_cookbook_dependencies(cookbook_paths: str) -> str:
|
|
|
2314
2400
|
Dependency analysis report in markdown format.
|
|
2315
2401
|
|
|
2316
2402
|
"""
|
|
2317
|
-
return
|
|
2403
|
+
return _analyse_cookbook_dependencies(cookbook_paths)
|
|
2318
2404
|
|
|
2319
2405
|
|
|
2320
2406
|
@mcp.tool()
|
|
@@ -2435,7 +2521,11 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
|
|
|
2435
2521
|
Generated Ansible playbook content.
|
|
2436
2522
|
|
|
2437
2523
|
"""
|
|
2438
|
-
|
|
2524
|
+
from souschef.converters.playbook import (
|
|
2525
|
+
generate_playbook_from_recipe as _generate_playbook,
|
|
2526
|
+
)
|
|
2527
|
+
|
|
2528
|
+
return _generate_playbook(recipe_path)
|
|
2439
2529
|
|
|
2440
2530
|
|
|
2441
2531
|
def convert_chef_search_to_inventory(search_query: str) -> str:
|
|
@@ -2466,9 +2556,10 @@ def generate_dynamic_inventory_script(search_queries: str) -> str:
|
|
|
2466
2556
|
return _generate_dynamic_inventory_script(search_queries)
|
|
2467
2557
|
|
|
2468
2558
|
|
|
2469
|
-
|
|
2559
|
+
@mcp.tool()
|
|
2560
|
+
def analyse_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
|
|
2470
2561
|
"""
|
|
2471
|
-
|
|
2562
|
+
Analyse Chef search patterns in recipe or cookbook.
|
|
2472
2563
|
|
|
2473
2564
|
Args:
|
|
2474
2565
|
recipe_or_cookbook_path: Path to recipe or cookbook.
|
|
@@ -2477,7 +2568,7 @@ def analyze_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
|
|
|
2477
2568
|
Analysis of search patterns found.
|
|
2478
2569
|
|
|
2479
2570
|
"""
|
|
2480
|
-
return
|
|
2571
|
+
return _analyse_chef_search_patterns(recipe_or_cookbook_path)
|
|
2481
2572
|
|
|
2482
2573
|
|
|
2483
2574
|
@mcp.tool()
|
|
@@ -2558,6 +2649,166 @@ def profile_parsing_operation(
|
|
|
2558
2649
|
return format_error_with_context(e, f"profiling {operation} parsing", file_path)
|
|
2559
2650
|
|
|
2560
2651
|
|
|
2652
|
+
# CI/CD Pipeline Generation Tools
|
|
2653
|
+
|
|
2654
|
+
|
|
2655
|
+
@mcp.tool()
|
|
2656
|
+
def generate_jenkinsfile_from_chef(
|
|
2657
|
+
cookbook_path: str,
|
|
2658
|
+
pipeline_name: str = "chef-to-ansible-pipeline",
|
|
2659
|
+
pipeline_type: str = "declarative",
|
|
2660
|
+
enable_parallel: str = "yes",
|
|
2661
|
+
) -> str:
|
|
2662
|
+
"""
|
|
2663
|
+
Generate Jenkins pipeline from Chef cookbook CI/CD patterns.
|
|
2664
|
+
|
|
2665
|
+
Analyzes Chef testing tools (Test Kitchen, ChefSpec, InSpec, Foodcritic)
|
|
2666
|
+
and generates equivalent Jenkins pipeline stages (Declarative or Scripted).
|
|
2667
|
+
|
|
2668
|
+
Args:
|
|
2669
|
+
cookbook_path: Path to Chef cookbook directory.
|
|
2670
|
+
pipeline_name: Name for the Jenkins pipeline.
|
|
2671
|
+
pipeline_type: Pipeline type - 'declarative' (recommended) or 'scripted'.
|
|
2672
|
+
enable_parallel: Enable parallel test execution - 'yes' or 'no'.
|
|
2673
|
+
|
|
2674
|
+
Returns:
|
|
2675
|
+
Jenkinsfile content (Groovy DSL) for Jenkins pipeline.
|
|
2676
|
+
|
|
2677
|
+
"""
|
|
2678
|
+
from souschef.ci.jenkins_pipeline import generate_jenkinsfile_from_chef_ci
|
|
2679
|
+
|
|
2680
|
+
try:
|
|
2681
|
+
# Convert string to boolean
|
|
2682
|
+
enable_parallel_bool = enable_parallel.lower() in ("yes", "true", "1")
|
|
2683
|
+
|
|
2684
|
+
result = generate_jenkinsfile_from_chef_ci(
|
|
2685
|
+
cookbook_path=cookbook_path,
|
|
2686
|
+
pipeline_name=pipeline_name,
|
|
2687
|
+
pipeline_type=pipeline_type,
|
|
2688
|
+
enable_parallel=enable_parallel_bool,
|
|
2689
|
+
)
|
|
2690
|
+
return result
|
|
2691
|
+
except FileNotFoundError as e:
|
|
2692
|
+
return format_error_with_context(e, "generating Jenkinsfile", cookbook_path)
|
|
2693
|
+
except Exception as e:
|
|
2694
|
+
return format_error_with_context(e, "generating Jenkinsfile", cookbook_path)
|
|
2695
|
+
|
|
2696
|
+
|
|
2697
|
+
@mcp.tool()
|
|
2698
|
+
def generate_gitlab_ci_from_chef(
|
|
2699
|
+
cookbook_path: str,
|
|
2700
|
+
project_name: str = "chef-to-ansible",
|
|
2701
|
+
enable_cache: str = "yes",
|
|
2702
|
+
enable_artifacts: str = "yes",
|
|
2703
|
+
) -> str:
|
|
2704
|
+
"""
|
|
2705
|
+
Generate GitLab CI configuration from Chef cookbook CI/CD patterns.
|
|
2706
|
+
|
|
2707
|
+
Analyzes Chef testing tools and generates equivalent GitLab CI stages
|
|
2708
|
+
with caching, artifacts, and parallel execution support.
|
|
2709
|
+
|
|
2710
|
+
Args:
|
|
2711
|
+
cookbook_path: Path to Chef cookbook directory.
|
|
2712
|
+
project_name: GitLab project name.
|
|
2713
|
+
enable_cache: Enable caching for dependencies - 'yes' or 'no'.
|
|
2714
|
+
enable_artifacts: Enable artifacts for test results - 'yes' or 'no'.
|
|
2715
|
+
|
|
2716
|
+
Returns:
|
|
2717
|
+
.gitlab-ci.yml content (YAML) for GitLab CI/CD.
|
|
2718
|
+
|
|
2719
|
+
"""
|
|
2720
|
+
from souschef.ci.gitlab_ci import generate_gitlab_ci_from_chef_ci
|
|
2721
|
+
|
|
2722
|
+
try:
|
|
2723
|
+
enable_cache_bool = enable_cache.lower() in ("yes", "true", "1")
|
|
2724
|
+
enable_artifacts_bool = enable_artifacts.lower() in ("yes", "true", "1")
|
|
2725
|
+
result = generate_gitlab_ci_from_chef_ci(
|
|
2726
|
+
cookbook_path=cookbook_path,
|
|
2727
|
+
project_name=project_name,
|
|
2728
|
+
enable_cache=enable_cache_bool,
|
|
2729
|
+
enable_artifacts=enable_artifacts_bool,
|
|
2730
|
+
)
|
|
2731
|
+
return result
|
|
2732
|
+
except FileNotFoundError as e:
|
|
2733
|
+
return format_error_with_context(
|
|
2734
|
+
e,
|
|
2735
|
+
"generating .gitlab-ci.yml",
|
|
2736
|
+
cookbook_path,
|
|
2737
|
+
)
|
|
2738
|
+
except Exception as e:
|
|
2739
|
+
return format_error_with_context(e, "generating .gitlab-ci.yml", cookbook_path)
|
|
2740
|
+
|
|
2741
|
+
|
|
2742
|
+
@mcp.tool()
|
|
2743
|
+
def generate_github_workflow_from_chef(
|
|
2744
|
+
cookbook_path: str,
|
|
2745
|
+
workflow_name: str = "Chef Cookbook CI",
|
|
2746
|
+
enable_cache: str = "yes",
|
|
2747
|
+
enable_artifacts: str = "yes",
|
|
2748
|
+
) -> str:
|
|
2749
|
+
"""
|
|
2750
|
+
Generate GitHub Actions workflow from Chef cookbook CI/CD patterns.
|
|
2751
|
+
|
|
2752
|
+
Analyzes Chef testing tools and generates equivalent GitHub Actions workflow
|
|
2753
|
+
with caching, artifacts, and matrix strategy support.
|
|
2754
|
+
|
|
2755
|
+
Args:
|
|
2756
|
+
cookbook_path: Path to Chef cookbook directory.
|
|
2757
|
+
workflow_name: GitHub Actions workflow name.
|
|
2758
|
+
enable_cache: Enable caching for dependencies - 'yes' or 'no'.
|
|
2759
|
+
enable_artifacts: Enable artifacts for test results - 'yes' or 'no'.
|
|
2760
|
+
|
|
2761
|
+
Returns:
|
|
2762
|
+
GitHub Actions workflow YAML content (.github/workflows/*.yml).
|
|
2763
|
+
|
|
2764
|
+
"""
|
|
2765
|
+
from souschef.ci.github_actions import generate_github_workflow_from_chef_ci
|
|
2766
|
+
|
|
2767
|
+
try:
|
|
2768
|
+
enable_cache_bool = enable_cache.lower() in ("yes", "true", "1")
|
|
2769
|
+
enable_artifacts_bool = enable_artifacts.lower() in ("yes", "true", "1")
|
|
2770
|
+
result = generate_github_workflow_from_chef_ci(
|
|
2771
|
+
cookbook_path=cookbook_path,
|
|
2772
|
+
workflow_name=workflow_name,
|
|
2773
|
+
enable_cache=enable_cache_bool,
|
|
2774
|
+
enable_artifacts=enable_artifacts_bool,
|
|
2775
|
+
)
|
|
2776
|
+
return result
|
|
2777
|
+
except FileNotFoundError as e:
|
|
2778
|
+
return format_error_with_context(
|
|
2779
|
+
e,
|
|
2780
|
+
"generating GitHub Actions workflow",
|
|
2781
|
+
cookbook_path,
|
|
2782
|
+
)
|
|
2783
|
+
except Exception as e:
|
|
2784
|
+
return format_error_with_context(
|
|
2785
|
+
e, "generating GitHub Actions workflow", cookbook_path
|
|
2786
|
+
)
|
|
2787
|
+
|
|
2788
|
+
|
|
2789
|
+
@mcp.tool()
|
|
2790
|
+
def parse_chef_migration_assessment(
|
|
2791
|
+
cookbook_paths: str,
|
|
2792
|
+
migration_scope: str = "full",
|
|
2793
|
+
target_platform: str = "ansible_awx",
|
|
2794
|
+
) -> dict[str, Any]:
|
|
2795
|
+
"""
|
|
2796
|
+
Parse Chef cookbook migration assessment and return as dictionary.
|
|
2797
|
+
|
|
2798
|
+
Args:
|
|
2799
|
+
cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
|
|
2800
|
+
migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
|
|
2801
|
+
target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
|
|
2802
|
+
|
|
2803
|
+
Returns:
|
|
2804
|
+
Dictionary containing assessment data with complexity, recommendations, etc.
|
|
2805
|
+
|
|
2806
|
+
"""
|
|
2807
|
+
return _parse_chef_migration_assessment(
|
|
2808
|
+
cookbook_paths, migration_scope, target_platform
|
|
2809
|
+
)
|
|
2810
|
+
|
|
2811
|
+
|
|
2561
2812
|
# AWX/AAP deployment wrappers for backward compatibility
|
|
2562
2813
|
def main() -> None:
|
|
2563
2814
|
"""
|