mcp-souschef 2.1.2__py3-none-any.whl → 2.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.5.3.dist-info}/METADATA +200 -19
- mcp_souschef-2.5.3.dist-info/RECORD +38 -0
- mcp_souschef-2.5.3.dist-info/entry_points.txt +4 -0
- souschef/assessment.py +531 -180
- souschef/ci/__init__.py +11 -0
- souschef/ci/github_actions.py +379 -0
- souschef/ci/gitlab_ci.py +299 -0
- souschef/ci/jenkins_pipeline.py +343 -0
- souschef/cli.py +691 -1
- souschef/converters/playbook.py +43 -5
- souschef/converters/resource.py +146 -49
- souschef/core/__init__.py +22 -0
- souschef/core/errors.py +275 -0
- souschef/core/validation.py +35 -2
- souschef/deployment.py +414 -100
- souschef/filesystem/operations.py +0 -7
- souschef/parsers/__init__.py +6 -1
- souschef/parsers/habitat.py +35 -6
- souschef/parsers/inspec.py +415 -52
- souschef/parsers/metadata.py +89 -23
- souschef/profiling.py +568 -0
- souschef/server.py +948 -255
- souschef/ui/__init__.py +8 -0
- souschef/ui/app.py +1837 -0
- souschef/ui/pages/cookbook_analysis.py +425 -0
- mcp_souschef-2.1.2.dist-info/RECORD +0 -29
- mcp_souschef-2.1.2.dist-info/entry_points.txt +0 -4
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.5.3.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.5.3.dist-info}/licenses/LICENSE +0 -0
souschef/assessment.py
CHANGED
|
@@ -7,9 +7,11 @@ generating migration plans, analyzing dependencies, and validating conversions.
|
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
9
|
import re
|
|
10
|
+
from pathlib import Path
|
|
10
11
|
from typing import Any
|
|
11
12
|
|
|
12
|
-
from souschef.core import
|
|
13
|
+
from souschef.core import METADATA_FILENAME, _normalize_path, _safe_join
|
|
14
|
+
from souschef.core.errors import format_error_with_context
|
|
13
15
|
from souschef.core.validation import (
|
|
14
16
|
ValidationEngine,
|
|
15
17
|
ValidationLevel,
|
|
@@ -35,116 +37,189 @@ def assess_chef_migration_complexity(
|
|
|
35
37
|
|
|
36
38
|
"""
|
|
37
39
|
try:
|
|
38
|
-
#
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
"total_cookbooks": 0,
|
|
45
|
-
"total_recipes": 0,
|
|
46
|
-
"total_resources": 0,
|
|
47
|
-
"complexity_score": 0,
|
|
48
|
-
"estimated_effort_days": 0,
|
|
49
|
-
}
|
|
40
|
+
# Validate and parse inputs
|
|
41
|
+
error_msg = _validate_assessment_inputs(
|
|
42
|
+
cookbook_paths, migration_scope, target_platform
|
|
43
|
+
)
|
|
44
|
+
if error_msg:
|
|
45
|
+
return error_msg
|
|
50
46
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
cookbook_assessments.append(assessment)
|
|
56
|
-
|
|
57
|
-
# Aggregate metrics
|
|
58
|
-
overall_metrics["total_cookbooks"] += 1
|
|
59
|
-
overall_metrics["total_recipes"] += assessment["metrics"][
|
|
60
|
-
"recipe_count"
|
|
61
|
-
]
|
|
62
|
-
overall_metrics["total_resources"] += assessment["metrics"][
|
|
63
|
-
"resource_count"
|
|
64
|
-
]
|
|
65
|
-
overall_metrics["complexity_score"] += assessment["complexity_score"]
|
|
66
|
-
overall_metrics["estimated_effort_days"] += assessment[
|
|
67
|
-
"estimated_effort_days"
|
|
68
|
-
]
|
|
69
|
-
|
|
70
|
-
# Calculate averages
|
|
71
|
-
if cookbook_assessments:
|
|
72
|
-
overall_metrics["avg_complexity"] = int(
|
|
73
|
-
overall_metrics["complexity_score"] / len(cookbook_assessments)
|
|
74
|
-
)
|
|
47
|
+
# Process cookbook analysis
|
|
48
|
+
return _process_cookbook_assessment(
|
|
49
|
+
cookbook_paths, migration_scope, target_platform
|
|
50
|
+
)
|
|
75
51
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
52
|
+
except Exception as e:
|
|
53
|
+
return format_error_with_context(
|
|
54
|
+
e, "assessing Chef migration complexity", cookbook_paths
|
|
79
55
|
)
|
|
80
56
|
|
|
81
|
-
# Create migration roadmap
|
|
82
|
-
roadmap = _create_migration_roadmap(cookbook_assessments)
|
|
83
57
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
58
|
+
def _process_cookbook_assessment(
|
|
59
|
+
cookbook_paths: str, migration_scope: str, target_platform: str
|
|
60
|
+
) -> str:
|
|
61
|
+
"""Process the cookbook assessment workflow."""
|
|
62
|
+
# Parse cookbook paths (may be empty if none exist)
|
|
63
|
+
valid_paths = _parse_cookbook_paths(cookbook_paths)
|
|
87
64
|
|
|
88
|
-
|
|
89
|
-
|
|
65
|
+
# Analyze all cookbooks (handles empty list gracefully)
|
|
66
|
+
cookbook_assessments, overall_metrics = _analyze_cookbook_metrics(valid_paths)
|
|
90
67
|
|
|
91
|
-
|
|
92
|
-
|
|
68
|
+
# Generate recommendations and reports
|
|
69
|
+
recommendations = _generate_migration_recommendations_from_assessment(
|
|
70
|
+
cookbook_assessments, overall_metrics, target_platform
|
|
71
|
+
)
|
|
72
|
+
roadmap = _create_migration_roadmap(cookbook_assessments)
|
|
73
|
+
|
|
74
|
+
# Format final assessment report
|
|
75
|
+
return _format_assessment_report(
|
|
76
|
+
migration_scope,
|
|
77
|
+
target_platform,
|
|
78
|
+
overall_metrics,
|
|
79
|
+
cookbook_assessments,
|
|
80
|
+
recommendations,
|
|
81
|
+
roadmap,
|
|
82
|
+
)
|
|
93
83
|
|
|
94
|
-
## Migration Complexity Analysis:
|
|
95
|
-
{_format_complexity_analysis(cookbook_assessments)}
|
|
96
84
|
|
|
97
|
-
|
|
98
|
-
|
|
85
|
+
def parse_chef_migration_assessment(
|
|
86
|
+
cookbook_paths: str,
|
|
87
|
+
migration_scope: str = "full",
|
|
88
|
+
target_platform: str = "ansible_awx",
|
|
89
|
+
) -> dict[str, Any]:
|
|
90
|
+
"""
|
|
91
|
+
Parse Chef cookbook migration assessment and return as dictionary.
|
|
99
92
|
|
|
100
|
-
|
|
101
|
-
|
|
93
|
+
Args:
|
|
94
|
+
cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
|
|
95
|
+
migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
|
|
96
|
+
target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
|
|
102
97
|
|
|
103
|
-
|
|
104
|
-
|
|
98
|
+
Returns:
|
|
99
|
+
Dictionary containing assessment data with complexity, recommendations, etc.
|
|
100
|
+
|
|
101
|
+
"""
|
|
102
|
+
try:
|
|
103
|
+
# Validate inputs
|
|
104
|
+
error_msg = _validate_assessment_inputs(
|
|
105
|
+
cookbook_paths, migration_scope, target_platform
|
|
106
|
+
)
|
|
107
|
+
if error_msg:
|
|
108
|
+
return {"error": error_msg}
|
|
109
|
+
|
|
110
|
+
# Parse cookbook paths (may be empty if none exist)
|
|
111
|
+
valid_paths = _parse_cookbook_paths(cookbook_paths)
|
|
112
|
+
|
|
113
|
+
# Analyze all cookbooks (handles empty list gracefully)
|
|
114
|
+
cookbook_assessments, overall_metrics = _analyze_cookbook_metrics(valid_paths)
|
|
115
|
+
|
|
116
|
+
# Generate recommendations and reports
|
|
117
|
+
recommendations = _generate_migration_recommendations_from_assessment(
|
|
118
|
+
cookbook_assessments, overall_metrics, target_platform
|
|
119
|
+
)
|
|
120
|
+
roadmap = _create_migration_roadmap(cookbook_assessments)
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"migration_scope": migration_scope,
|
|
124
|
+
"target_platform": target_platform,
|
|
125
|
+
"overall_metrics": overall_metrics,
|
|
126
|
+
"cookbook_assessments": cookbook_assessments,
|
|
127
|
+
"recommendations": recommendations,
|
|
128
|
+
"roadmap": roadmap,
|
|
129
|
+
"complexity": _get_overall_complexity_level(overall_metrics),
|
|
130
|
+
"estimated_hours": overall_metrics.get("estimated_effort_days", 0)
|
|
131
|
+
* 8, # Convert days to hours
|
|
132
|
+
}
|
|
105
133
|
|
|
106
|
-
## Resource Requirements:
|
|
107
|
-
{_estimate_resource_requirements(overall_metrics, target_platform)}
|
|
108
|
-
"""
|
|
109
134
|
except Exception as e:
|
|
110
|
-
return
|
|
135
|
+
return {
|
|
136
|
+
"error": format_error_with_context(
|
|
137
|
+
e, "assessing Chef migration complexity", cookbook_paths
|
|
138
|
+
)
|
|
139
|
+
}
|
|
111
140
|
|
|
112
141
|
|
|
113
|
-
def
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
142
|
+
def _get_overall_complexity_level(metrics: dict[str, int]) -> str:
|
|
143
|
+
"""Get overall complexity level based on metrics."""
|
|
144
|
+
avg_complexity = metrics.get("avg_complexity", 0)
|
|
145
|
+
if avg_complexity < 30:
|
|
146
|
+
return "Low"
|
|
147
|
+
elif avg_complexity < 70:
|
|
148
|
+
return "Medium"
|
|
149
|
+
else:
|
|
150
|
+
return "High"
|
|
118
151
|
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
152
|
+
|
|
153
|
+
def _validate_migration_plan_inputs(
|
|
154
|
+
cookbook_paths: str, migration_strategy: str, timeline_weeks: int
|
|
155
|
+
) -> str | None:
|
|
156
|
+
"""
|
|
157
|
+
Validate migration plan inputs.
|
|
123
158
|
|
|
124
159
|
Returns:
|
|
125
|
-
|
|
160
|
+
Error message if validation fails, None if valid.
|
|
126
161
|
|
|
127
162
|
"""
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
163
|
+
if not cookbook_paths or not cookbook_paths.strip():
|
|
164
|
+
return (
|
|
165
|
+
"Error: Cookbook paths cannot be empty\n\n"
|
|
166
|
+
"Suggestion: Provide comma-separated paths to Chef cookbooks"
|
|
167
|
+
)
|
|
132
168
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
169
|
+
valid_strategies = ["big_bang", "phased", "parallel"]
|
|
170
|
+
if migration_strategy not in valid_strategies:
|
|
171
|
+
return (
|
|
172
|
+
f"Error: Invalid migration strategy '{migration_strategy}'\n\n"
|
|
173
|
+
f"Suggestion: Use one of {', '.join(valid_strategies)}"
|
|
174
|
+
)
|
|
138
175
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
176
|
+
if not (1 <= timeline_weeks <= 104): # 1 week to 2 years
|
|
177
|
+
return (
|
|
178
|
+
f"Error: Timeline must be between 1 and 104 weeks, got {timeline_weeks}\n\n"
|
|
179
|
+
"Suggestion: Provide a realistic timeline (4-12 weeks typical)"
|
|
142
180
|
)
|
|
143
181
|
|
|
144
|
-
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def _parse_and_assess_cookbooks(cookbook_paths: str) -> tuple[list, str | None]:
|
|
186
|
+
"""
|
|
187
|
+
Parse cookbook paths and assess each cookbook.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
Tuple of (cookbook_assessments, error_message).
|
|
191
|
+
|
|
192
|
+
"""
|
|
193
|
+
paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
|
|
194
|
+
valid_paths = [p for p in paths if p.exists()]
|
|
195
|
+
|
|
196
|
+
if not valid_paths:
|
|
197
|
+
return (
|
|
198
|
+
[],
|
|
199
|
+
"Error: No valid cookbook paths found\n\n"
|
|
200
|
+
"Suggestion: Ensure paths exist and point to cookbook directories",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
cookbook_assessments = []
|
|
204
|
+
for cookbook_path in valid_paths:
|
|
205
|
+
# deepcode ignore PT: path normalized via _normalize_path
|
|
206
|
+
assessment = _assess_single_cookbook(cookbook_path)
|
|
207
|
+
cookbook_assessments.append(assessment)
|
|
208
|
+
|
|
209
|
+
return cookbook_assessments, None
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _format_migration_plan_output(
|
|
213
|
+
migration_plan: dict,
|
|
214
|
+
migration_strategy: str,
|
|
215
|
+
timeline_weeks: int,
|
|
216
|
+
num_cookbooks: int,
|
|
217
|
+
) -> str:
|
|
218
|
+
"""Format migration plan as markdown output."""
|
|
219
|
+
return f"""# Chef to Ansible Migration Plan
|
|
145
220
|
# Strategy: {migration_strategy}
|
|
146
221
|
# Timeline: {timeline_weeks} weeks
|
|
147
|
-
# Cookbooks: {
|
|
222
|
+
# Cookbooks: {num_cookbooks}
|
|
148
223
|
|
|
149
224
|
## Executive Summary:
|
|
150
225
|
{migration_plan["executive_summary"]}
|
|
@@ -173,8 +248,50 @@ def generate_migration_plan(
|
|
|
173
248
|
## Post-Migration Tasks:
|
|
174
249
|
{migration_plan["post_migration"]}
|
|
175
250
|
"""
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def generate_migration_plan(
|
|
254
|
+
cookbook_paths: str, migration_strategy: str = "phased", timeline_weeks: int = 12
|
|
255
|
+
) -> str:
|
|
256
|
+
"""
|
|
257
|
+
Generate a detailed migration plan from Chef to Ansible with timeline and milestones.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
cookbook_paths: Comma-separated paths to Chef cookbooks
|
|
261
|
+
migration_strategy: Migration approach (big_bang, phased, parallel)
|
|
262
|
+
timeline_weeks: Target timeline in weeks
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Detailed migration plan with phases, milestones, and deliverables
|
|
266
|
+
|
|
267
|
+
"""
|
|
268
|
+
try:
|
|
269
|
+
# Validate inputs
|
|
270
|
+
error = _validate_migration_plan_inputs(
|
|
271
|
+
cookbook_paths, migration_strategy, timeline_weeks
|
|
272
|
+
)
|
|
273
|
+
if error:
|
|
274
|
+
return error
|
|
275
|
+
|
|
276
|
+
# Parse and assess cookbooks
|
|
277
|
+
cookbook_assessments, error = _parse_and_assess_cookbooks(cookbook_paths)
|
|
278
|
+
if error:
|
|
279
|
+
return error
|
|
280
|
+
|
|
281
|
+
# Generate migration plan based on strategy
|
|
282
|
+
migration_plan = _generate_detailed_migration_plan(
|
|
283
|
+
cookbook_assessments, migration_strategy, timeline_weeks
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
return _format_migration_plan_output(
|
|
287
|
+
migration_plan,
|
|
288
|
+
migration_strategy,
|
|
289
|
+
timeline_weeks,
|
|
290
|
+
len(cookbook_assessments),
|
|
291
|
+
)
|
|
292
|
+
|
|
176
293
|
except Exception as e:
|
|
177
|
-
return
|
|
294
|
+
return format_error_with_context(e, "generating migration plan", cookbook_paths)
|
|
178
295
|
|
|
179
296
|
|
|
180
297
|
def analyze_cookbook_dependencies(
|
|
@@ -192,9 +309,20 @@ def analyze_cookbook_dependencies(
|
|
|
192
309
|
|
|
193
310
|
"""
|
|
194
311
|
try:
|
|
312
|
+
# Validate inputs
|
|
313
|
+
valid_depths = ["direct", "transitive", "full"]
|
|
314
|
+
if dependency_depth not in valid_depths:
|
|
315
|
+
return (
|
|
316
|
+
f"Error: Invalid dependency depth '{dependency_depth}'\n\n"
|
|
317
|
+
f"Suggestion: Use one of {', '.join(valid_depths)}"
|
|
318
|
+
)
|
|
319
|
+
|
|
195
320
|
cookbook_path_obj = _normalize_path(cookbook_path)
|
|
196
321
|
if not cookbook_path_obj.exists():
|
|
197
|
-
return
|
|
322
|
+
return (
|
|
323
|
+
f"Error: Cookbook path not found: {cookbook_path}\n\n"
|
|
324
|
+
"Suggestion: Check that the path exists and points to a cookbook directory"
|
|
325
|
+
)
|
|
198
326
|
|
|
199
327
|
# Analyze dependencies
|
|
200
328
|
dependency_analysis = _analyze_cookbook_dependencies_detailed(cookbook_path_obj)
|
|
@@ -231,7 +359,9 @@ def analyze_cookbook_dependencies(
|
|
|
231
359
|
{_analyze_dependency_migration_impact(dependency_analysis)}
|
|
232
360
|
"""
|
|
233
361
|
except Exception as e:
|
|
234
|
-
return
|
|
362
|
+
return format_error_with_context(
|
|
363
|
+
e, "analyzing cookbook dependencies", cookbook_path
|
|
364
|
+
)
|
|
235
365
|
|
|
236
366
|
|
|
237
367
|
def generate_migration_report(
|
|
@@ -300,7 +430,7 @@ def generate_migration_report(
|
|
|
300
430
|
{report["appendices"]}
|
|
301
431
|
"""
|
|
302
432
|
except Exception as e:
|
|
303
|
-
return
|
|
433
|
+
return format_error_with_context(e, "generating migration report")
|
|
304
434
|
|
|
305
435
|
|
|
306
436
|
def validate_conversion(
|
|
@@ -347,31 +477,188 @@ def validate_conversion(
|
|
|
347
477
|
return _format_validation_results_text(conversion_type, results, summary)
|
|
348
478
|
|
|
349
479
|
except Exception as e:
|
|
350
|
-
return
|
|
480
|
+
return format_error_with_context(
|
|
481
|
+
e, f"validating Ansible {conversion_type} conversion"
|
|
482
|
+
)
|
|
351
483
|
|
|
352
484
|
|
|
353
485
|
# Private helper functions for assessment
|
|
354
486
|
|
|
355
487
|
|
|
356
|
-
def
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
488
|
+
def _validate_assessment_inputs(
|
|
489
|
+
cookbook_paths: str, migration_scope: str, target_platform: str
|
|
490
|
+
) -> str | None:
|
|
491
|
+
"""
|
|
492
|
+
Validate inputs for migration assessment.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
cookbook_paths: Paths to cookbooks
|
|
496
|
+
migration_scope: Scope of migration
|
|
497
|
+
target_platform: Target platform
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
Error message if validation fails, None otherwise
|
|
501
|
+
|
|
502
|
+
"""
|
|
503
|
+
if not cookbook_paths or not cookbook_paths.strip():
|
|
504
|
+
return (
|
|
505
|
+
"Error: Cookbook paths cannot be empty\n\n"
|
|
506
|
+
"Suggestion: Provide comma-separated paths to Chef cookbooks"
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
valid_scopes = ["full", "recipes_only", "infrastructure_only"]
|
|
510
|
+
if migration_scope not in valid_scopes:
|
|
511
|
+
return (
|
|
512
|
+
f"Error: Invalid migration scope '{migration_scope}'\n\n"
|
|
513
|
+
f"Suggestion: Use one of {', '.join(valid_scopes)}"
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
valid_platforms = ["ansible_awx", "ansible_core", "ansible_tower"]
|
|
517
|
+
if target_platform not in valid_platforms:
|
|
518
|
+
return (
|
|
519
|
+
f"Error: Invalid target platform '{target_platform}'\n\n"
|
|
520
|
+
f"Suggestion: Use one of {', '.join(valid_platforms)}"
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
return None
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def _parse_cookbook_paths(cookbook_paths: str) -> list[Any]:
|
|
527
|
+
"""
|
|
528
|
+
Parse and validate cookbook paths.
|
|
529
|
+
|
|
530
|
+
Args:
|
|
531
|
+
cookbook_paths: Comma-separated paths to cookbooks
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
List of valid Path objects (may be empty)
|
|
535
|
+
|
|
536
|
+
"""
|
|
537
|
+
paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
|
|
538
|
+
valid_paths = [p for p in paths if p.exists()]
|
|
539
|
+
return valid_paths
|
|
540
|
+
|
|
541
|
+
|
|
542
|
+
def _analyze_cookbook_metrics(
|
|
543
|
+
valid_paths: list[Any],
|
|
544
|
+
) -> tuple[list[Any], dict[str, int]]:
|
|
545
|
+
"""
|
|
546
|
+
Analyze metrics for all cookbooks.
|
|
547
|
+
|
|
548
|
+
Args:
|
|
549
|
+
valid_paths: List of valid cookbook paths
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
Tuple of (cookbook_assessments, overall_metrics)
|
|
553
|
+
|
|
554
|
+
"""
|
|
555
|
+
cookbook_assessments = []
|
|
556
|
+
overall_metrics = {
|
|
557
|
+
"total_cookbooks": 0,
|
|
558
|
+
"total_recipes": 0,
|
|
559
|
+
"total_resources": 0,
|
|
363
560
|
"complexity_score": 0,
|
|
364
561
|
"estimated_effort_days": 0,
|
|
365
|
-
"challenges": [],
|
|
366
|
-
"migration_priority": "medium",
|
|
367
|
-
"dependencies": [],
|
|
368
562
|
}
|
|
369
563
|
|
|
370
|
-
|
|
371
|
-
|
|
564
|
+
for cookbook_path in valid_paths:
|
|
565
|
+
# deepcode ignore PT: path normalized via _normalize_path
|
|
566
|
+
assessment = _assess_single_cookbook(cookbook_path)
|
|
567
|
+
cookbook_assessments.append(assessment)
|
|
568
|
+
|
|
569
|
+
# Aggregate metrics
|
|
570
|
+
overall_metrics["total_cookbooks"] += 1
|
|
571
|
+
overall_metrics["total_recipes"] += assessment["metrics"]["recipe_count"]
|
|
572
|
+
overall_metrics["total_resources"] += assessment["metrics"]["resource_count"]
|
|
573
|
+
overall_metrics["complexity_score"] += assessment["complexity_score"]
|
|
574
|
+
overall_metrics["estimated_effort_days"] += assessment["estimated_effort_days"]
|
|
575
|
+
|
|
576
|
+
# Calculate averages
|
|
577
|
+
if cookbook_assessments:
|
|
578
|
+
overall_metrics["avg_complexity"] = int(
|
|
579
|
+
overall_metrics["complexity_score"] / len(cookbook_assessments)
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
return cookbook_assessments, overall_metrics
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def _format_assessment_report(
|
|
586
|
+
migration_scope: str,
|
|
587
|
+
target_platform: str,
|
|
588
|
+
overall_metrics: dict[str, int],
|
|
589
|
+
cookbook_assessments: list[Any],
|
|
590
|
+
recommendations: str,
|
|
591
|
+
roadmap: str,
|
|
592
|
+
) -> str:
|
|
593
|
+
"""
|
|
594
|
+
Format the final assessment report.
|
|
595
|
+
|
|
596
|
+
Args:
|
|
597
|
+
migration_scope: Scope of migration
|
|
598
|
+
target_platform: Target platform
|
|
599
|
+
overall_metrics: Overall metrics dictionary
|
|
600
|
+
cookbook_assessments: List of cookbook assessments
|
|
601
|
+
recommendations: Migration recommendations
|
|
602
|
+
roadmap: Migration roadmap
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
Formatted report string
|
|
606
|
+
|
|
607
|
+
"""
|
|
608
|
+
return f"""# Chef to Ansible Migration Assessment
|
|
609
|
+
# Scope: {migration_scope}
|
|
610
|
+
# Target Platform: {target_platform}
|
|
611
|
+
|
|
612
|
+
## Overall Migration Metrics:
|
|
613
|
+
{_format_overall_metrics(overall_metrics)}
|
|
614
|
+
|
|
615
|
+
## Cookbook Assessments:
|
|
616
|
+
{_format_cookbook_assessments(cookbook_assessments)}
|
|
617
|
+
|
|
618
|
+
## Migration Complexity Analysis:
|
|
619
|
+
{_format_complexity_analysis(cookbook_assessments)}
|
|
620
|
+
|
|
621
|
+
## Migration Recommendations:
|
|
622
|
+
{recommendations}
|
|
623
|
+
|
|
624
|
+
## Migration Roadmap:
|
|
625
|
+
{roadmap}
|
|
626
|
+
|
|
627
|
+
## Risk Assessment:
|
|
628
|
+
{_assess_migration_risks(cookbook_assessments, target_platform)}
|
|
629
|
+
|
|
630
|
+
## Resource Requirements:
|
|
631
|
+
{_estimate_resource_requirements(overall_metrics, target_platform)}
|
|
632
|
+
"""
|
|
633
|
+
|
|
634
|
+
|
|
635
|
+
def _count_cookbook_artifacts(cookbook_path) -> dict[str, int]:
|
|
636
|
+
"""Count basic cookbook artifacts (recipes, templates, files)."""
|
|
637
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
372
638
|
recipe_count = len(list(recipes_dir.glob("*.rb"))) if recipes_dir.exists() else 0
|
|
373
639
|
|
|
374
|
-
|
|
640
|
+
templates_count = (
|
|
641
|
+
len(list(_safe_join(cookbook_path, "templates").glob("*")))
|
|
642
|
+
if _safe_join(cookbook_path, "templates").exists()
|
|
643
|
+
else 0
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
files_count = (
|
|
647
|
+
len(list(_safe_join(cookbook_path, "files").glob("*")))
|
|
648
|
+
if _safe_join(cookbook_path, "files").exists()
|
|
649
|
+
else 0
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
return {
|
|
653
|
+
"recipe_count": recipe_count,
|
|
654
|
+
"templates": templates_count,
|
|
655
|
+
"files": files_count,
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def _analyze_recipe_complexity(cookbook_path) -> dict[str, int]:
|
|
660
|
+
"""Analyze recipe files for resource counts, Ruby blocks, and custom resources."""
|
|
661
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
375
662
|
resource_count = 0
|
|
376
663
|
custom_resources = 0
|
|
377
664
|
ruby_blocks = 0
|
|
@@ -381,7 +668,6 @@ def _assess_single_cookbook(cookbook_path) -> dict:
|
|
|
381
668
|
with recipe_file.open("r", encoding="utf-8", errors="ignore") as f:
|
|
382
669
|
content = f.read()
|
|
383
670
|
# Count Chef resources
|
|
384
|
-
|
|
385
671
|
resources = len(
|
|
386
672
|
re.findall(r'\w{1,100}\s+[\'"]([^\'"]{0,200})[\'"]\s+do', content)
|
|
387
673
|
)
|
|
@@ -395,57 +681,88 @@ def _assess_single_cookbook(cookbook_path) -> dict:
|
|
|
395
681
|
)
|
|
396
682
|
resource_count += resources
|
|
397
683
|
|
|
398
|
-
|
|
399
|
-
"recipe_count": recipe_count,
|
|
684
|
+
return {
|
|
400
685
|
"resource_count": resource_count,
|
|
401
686
|
"custom_resources": custom_resources,
|
|
402
687
|
"ruby_blocks": ruby_blocks,
|
|
403
|
-
"templates": len(list(_safe_join(cookbook, "templates").glob("*")))
|
|
404
|
-
if _safe_join(cookbook, "templates").exists()
|
|
405
|
-
else 0,
|
|
406
|
-
"files": len(list(_safe_join(cookbook, "files").glob("*")))
|
|
407
|
-
if _safe_join(cookbook, "files").exists()
|
|
408
|
-
else 0,
|
|
409
688
|
}
|
|
410
689
|
|
|
411
|
-
|
|
690
|
+
|
|
691
|
+
def _calculate_complexity_score(metrics: dict[str, int]) -> int:
|
|
692
|
+
"""Calculate complexity score (0-100) based on metrics."""
|
|
693
|
+
recipe_count = metrics["recipe_count"]
|
|
694
|
+
resource_count = metrics["resource_count"]
|
|
695
|
+
|
|
412
696
|
complexity_factors = {
|
|
413
697
|
"recipe_count": min(recipe_count * 2, 20),
|
|
414
698
|
"resource_density": min(resource_count / max(recipe_count, 1) * 5, 25),
|
|
415
|
-
"custom_resources": custom_resources * 10,
|
|
416
|
-
"ruby_blocks": ruby_blocks * 5,
|
|
417
|
-
"templates": min(
|
|
418
|
-
"files": min(
|
|
699
|
+
"custom_resources": metrics["custom_resources"] * 10,
|
|
700
|
+
"ruby_blocks": metrics["ruby_blocks"] * 5,
|
|
701
|
+
"templates": min(metrics["templates"] * 2, 15),
|
|
702
|
+
"files": min(metrics["files"] * 1, 10),
|
|
419
703
|
}
|
|
420
704
|
|
|
421
|
-
|
|
705
|
+
return int(sum(complexity_factors.values()))
|
|
422
706
|
|
|
423
|
-
# Estimate effort (person-days)
|
|
424
|
-
base_effort = recipe_count * 0.5 # 0.5 days per recipe
|
|
425
|
-
complexity_multiplier = 1 + (assessment["complexity_score"] / 100)
|
|
426
|
-
assessment["estimated_effort_days"] = round(base_effort * complexity_multiplier, 1)
|
|
427
707
|
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
708
|
+
def _identify_migration_challenges(
|
|
709
|
+
metrics: dict[str, int], complexity_score: int
|
|
710
|
+
) -> list[str]:
|
|
711
|
+
"""Identify migration challenges based on metrics."""
|
|
712
|
+
challenges = []
|
|
713
|
+
|
|
714
|
+
if metrics["custom_resources"] > 0:
|
|
715
|
+
challenges.append(
|
|
716
|
+
f"{metrics['custom_resources']} custom resources requiring manual conversion"
|
|
436
717
|
)
|
|
437
|
-
if
|
|
438
|
-
|
|
439
|
-
"
|
|
718
|
+
if metrics["ruby_blocks"] > 5:
|
|
719
|
+
challenges.append(
|
|
720
|
+
f"{metrics['ruby_blocks']} Ruby blocks needing shell script conversion"
|
|
440
721
|
)
|
|
722
|
+
if complexity_score > 70:
|
|
723
|
+
challenges.append("High complexity cookbook requiring expert review")
|
|
724
|
+
|
|
725
|
+
return challenges
|
|
441
726
|
|
|
442
|
-
# Set migration priority
|
|
443
|
-
if assessment["complexity_score"] < 30:
|
|
444
|
-
assessment["migration_priority"] = "low"
|
|
445
|
-
elif assessment["complexity_score"] > 70:
|
|
446
|
-
assessment["migration_priority"] = "high"
|
|
447
727
|
|
|
448
|
-
|
|
728
|
+
def _determine_migration_priority(complexity_score: int) -> str:
|
|
729
|
+
"""Determine migration priority based on complexity score."""
|
|
730
|
+
if complexity_score < 30:
|
|
731
|
+
return "low"
|
|
732
|
+
elif complexity_score > 70:
|
|
733
|
+
return "high"
|
|
734
|
+
return "medium"
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
def _assess_single_cookbook(cookbook_path: Path) -> dict:
|
|
738
|
+
"""Assess complexity of a single cookbook."""
|
|
739
|
+
# cookbook_path is already normalized to a Path object
|
|
740
|
+
cookbook = cookbook_path
|
|
741
|
+
|
|
742
|
+
# Collect metrics
|
|
743
|
+
artifact_counts = _count_cookbook_artifacts(cookbook)
|
|
744
|
+
recipe_complexity = _analyze_recipe_complexity(cookbook)
|
|
745
|
+
metrics = {**artifact_counts, **recipe_complexity}
|
|
746
|
+
|
|
747
|
+
# Calculate complexity and effort
|
|
748
|
+
complexity_score = _calculate_complexity_score(metrics)
|
|
749
|
+
# More realistic effort: 0.5-2 hours per recipe with AI assistance
|
|
750
|
+
# Base: 1 hour per recipe = 0.125 days (8-hour day)
|
|
751
|
+
base_effort = metrics["recipe_count"] * 0.125 # 0.125 days per recipe
|
|
752
|
+
complexity_multiplier = 1 + (complexity_score / 100)
|
|
753
|
+
estimated_effort = round(base_effort * complexity_multiplier, 1)
|
|
754
|
+
|
|
755
|
+
# Build assessment
|
|
756
|
+
return {
|
|
757
|
+
"cookbook_name": cookbook.name,
|
|
758
|
+
"cookbook_path": str(cookbook),
|
|
759
|
+
"metrics": metrics,
|
|
760
|
+
"complexity_score": complexity_score,
|
|
761
|
+
"estimated_effort_days": estimated_effort,
|
|
762
|
+
"challenges": _identify_migration_challenges(metrics, complexity_score),
|
|
763
|
+
"migration_priority": _determine_migration_priority(complexity_score),
|
|
764
|
+
"dependencies": [],
|
|
765
|
+
}
|
|
449
766
|
|
|
450
767
|
|
|
451
768
|
def _format_overall_metrics(metrics: dict) -> str:
|
|
@@ -455,7 +772,7 @@ def _format_overall_metrics(metrics: dict) -> str:
|
|
|
455
772
|
• Total Resources: {metrics["total_resources"]}
|
|
456
773
|
• Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
|
|
457
774
|
• Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
|
|
458
|
-
• Estimated Duration: {int(metrics["estimated_effort_days"] /
|
|
775
|
+
• Estimated Duration: {max(1, int(metrics["estimated_effort_days"] / 2))}-{max(2, int(metrics["estimated_effort_days"]))} weeks (with 2-4 parallel engineers)"""
|
|
459
776
|
|
|
460
777
|
|
|
461
778
|
def _format_cookbook_assessments(assessments: list) -> str:
|
|
@@ -625,17 +942,20 @@ def _create_migration_roadmap(assessments: list) -> str:
|
|
|
625
942
|
return "\n".join(roadmap_formatted)
|
|
626
943
|
|
|
627
944
|
|
|
628
|
-
def
|
|
629
|
-
"""Assess
|
|
945
|
+
def _assess_technical_complexity_risks(assessments: list) -> list[str]:
|
|
946
|
+
"""Assess risks related to technical complexity."""
|
|
630
947
|
risks = []
|
|
631
|
-
|
|
632
|
-
# Technical risks
|
|
633
948
|
high_complexity_count = len([a for a in assessments if a["complexity_score"] > 70])
|
|
634
949
|
if high_complexity_count > 0:
|
|
635
950
|
risks.append(
|
|
636
951
|
f"🔴 HIGH: {high_complexity_count} high-complexity cookbooks may cause delays"
|
|
637
952
|
)
|
|
953
|
+
return risks
|
|
638
954
|
|
|
955
|
+
|
|
956
|
+
def _assess_custom_resource_risks(assessments: list) -> list[str]:
|
|
957
|
+
"""Assess risks related to custom resources and Ruby blocks."""
|
|
958
|
+
risks = []
|
|
639
959
|
custom_resource_count = sum(a["metrics"]["custom_resources"] for a in assessments)
|
|
640
960
|
if custom_resource_count > 0:
|
|
641
961
|
risks.append(
|
|
@@ -648,14 +968,33 @@ def _assess_migration_risks(assessments: list, target_platform: str) -> str:
|
|
|
648
968
|
f"🟡 MEDIUM: {ruby_block_count} Ruby blocks require shell script conversion"
|
|
649
969
|
)
|
|
650
970
|
|
|
651
|
-
|
|
971
|
+
return risks
|
|
972
|
+
|
|
973
|
+
|
|
974
|
+
def _assess_timeline_risks(assessments: list) -> list[str]:
|
|
975
|
+
"""Assess risks related to migration timeline and scope."""
|
|
976
|
+
risks = []
|
|
652
977
|
total_effort = sum(a["estimated_effort_days"] for a in assessments)
|
|
653
978
|
if total_effort > 50:
|
|
654
979
|
risks.append("🟡 MEDIUM: Large migration scope may impact timeline")
|
|
980
|
+
return risks
|
|
981
|
+
|
|
655
982
|
|
|
656
|
-
|
|
983
|
+
def _assess_platform_risks(target_platform: str) -> list[str]:
|
|
984
|
+
"""Assess risks related to target platform."""
|
|
657
985
|
if target_platform == "ansible_awx":
|
|
658
|
-
|
|
986
|
+
return ["🟢 LOW: AWX integration well-supported with existing tools"]
|
|
987
|
+
return []
|
|
988
|
+
|
|
989
|
+
|
|
990
|
+
def _assess_migration_risks(assessments: list, target_platform: str) -> str:
|
|
991
|
+
"""Assess migration risks."""
|
|
992
|
+
risks = []
|
|
993
|
+
|
|
994
|
+
risks.extend(_assess_technical_complexity_risks(assessments))
|
|
995
|
+
risks.extend(_assess_custom_resource_risks(assessments))
|
|
996
|
+
risks.extend(_assess_timeline_risks(assessments))
|
|
997
|
+
risks.extend(_assess_platform_risks(target_platform))
|
|
659
998
|
|
|
660
999
|
if not risks:
|
|
661
1000
|
risks.append("🟢 LOW: No significant migration risks identified")
|
|
@@ -1146,6 +1485,46 @@ def _generate_migration_timeline(strategy: str, timeline_weeks: int) -> str:
|
|
|
1146
1485
|
return "\n".join([f"• {milestone}" for milestone in milestones])
|
|
1147
1486
|
|
|
1148
1487
|
|
|
1488
|
+
def _build_validation_header(
|
|
1489
|
+
conversion_type: str, summary: dict[str, int]
|
|
1490
|
+
) -> list[str]:
|
|
1491
|
+
"""Build the header section of validation results."""
|
|
1492
|
+
return [
|
|
1493
|
+
f"# Validation Results for {conversion_type} Conversion",
|
|
1494
|
+
"",
|
|
1495
|
+
"## Summary",
|
|
1496
|
+
f"• Errors: {summary['errors']}",
|
|
1497
|
+
f"• Warnings: {summary['warnings']}",
|
|
1498
|
+
f"• Info: {summary['info']}",
|
|
1499
|
+
"",
|
|
1500
|
+
]
|
|
1501
|
+
|
|
1502
|
+
|
|
1503
|
+
def _group_results_by_level(
|
|
1504
|
+
results: list[ValidationResult],
|
|
1505
|
+
) -> tuple[list[ValidationResult], list[ValidationResult], list[ValidationResult]]:
|
|
1506
|
+
"""Group validation results by severity level."""
|
|
1507
|
+
errors = [r for r in results if r.level == ValidationLevel.ERROR]
|
|
1508
|
+
warnings = [r for r in results if r.level == ValidationLevel.WARNING]
|
|
1509
|
+
infos = [r for r in results if r.level == ValidationLevel.INFO]
|
|
1510
|
+
return errors, warnings, infos
|
|
1511
|
+
|
|
1512
|
+
|
|
1513
|
+
def _format_result_section(
|
|
1514
|
+
title: str, icon: str, results: list[ValidationResult]
|
|
1515
|
+
) -> list[str]:
|
|
1516
|
+
"""Format a single validation results section."""
|
|
1517
|
+
if not results:
|
|
1518
|
+
return []
|
|
1519
|
+
|
|
1520
|
+
lines = [f"## {icon} {title}", ""]
|
|
1521
|
+
for result in results:
|
|
1522
|
+
lines.append(str(result))
|
|
1523
|
+
lines.append("")
|
|
1524
|
+
|
|
1525
|
+
return lines
|
|
1526
|
+
|
|
1527
|
+
|
|
1149
1528
|
def _format_validation_results_text(
|
|
1150
1529
|
conversion_type: str, results: list[ValidationResult], summary: dict[str, int]
|
|
1151
1530
|
) -> str:
|
|
@@ -1166,41 +1545,13 @@ def _format_validation_results_text(
|
|
|
1166
1545
|
|
|
1167
1546
|
✅ All validation checks passed! No issues found.
|
|
1168
1547
|
"""
|
|
1169
|
-
output_lines = [
|
|
1170
|
-
f"# Validation Results for {conversion_type} Conversion",
|
|
1171
|
-
"",
|
|
1172
|
-
"## Summary",
|
|
1173
|
-
f"• Errors: {summary['errors']}",
|
|
1174
|
-
f"• Warnings: {summary['warnings']}",
|
|
1175
|
-
f"• Info: {summary['info']}",
|
|
1176
|
-
"",
|
|
1177
|
-
]
|
|
1178
1548
|
|
|
1179
|
-
|
|
1180
|
-
errors
|
|
1181
|
-
warnings = [r for r in results if r.level == ValidationLevel.WARNING]
|
|
1182
|
-
infos = [r for r in results if r.level == ValidationLevel.INFO]
|
|
1549
|
+
output_lines = _build_validation_header(conversion_type, summary)
|
|
1550
|
+
errors, warnings, infos = _group_results_by_level(results)
|
|
1183
1551
|
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
for result in errors:
|
|
1188
|
-
output_lines.append(str(result))
|
|
1189
|
-
output_lines.append("")
|
|
1190
|
-
|
|
1191
|
-
if warnings:
|
|
1192
|
-
output_lines.append("## ⚠️ Warnings")
|
|
1193
|
-
output_lines.append("")
|
|
1194
|
-
for result in warnings:
|
|
1195
|
-
output_lines.append(str(result))
|
|
1196
|
-
output_lines.append("")
|
|
1197
|
-
|
|
1198
|
-
if infos:
|
|
1199
|
-
output_lines.append("## ℹ️ Information")
|
|
1200
|
-
output_lines.append("")
|
|
1201
|
-
for result in infos:
|
|
1202
|
-
output_lines.append(str(result))
|
|
1203
|
-
output_lines.append("")
|
|
1552
|
+
output_lines.extend(_format_result_section("❌ Errors", "", errors))
|
|
1553
|
+
output_lines.extend(_format_result_section("⚠️ Warnings", "", warnings))
|
|
1554
|
+
output_lines.extend(_format_result_section("ℹ️ Information", "", infos))
|
|
1204
1555
|
|
|
1205
1556
|
return "\n".join(output_lines)
|
|
1206
1557
|
|