mcp-souschef 2.0.1__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/assessment.py ADDED
@@ -0,0 +1,1498 @@
1
+ """
2
+ Assessment and migration planning module for Chef to Ansible migrations.
3
+
4
+ This module provides tools for analyzing Chef cookbook migration complexity,
5
+ generating migration plans, analyzing dependencies, and validating conversions.
6
+ """
7
+
8
+ import json
9
+ import re
10
+ from typing import Any
11
+
12
+ from souschef.core import METADATA_FILENAME, _normalize_path, _safe_join
13
+ from souschef.core.errors import format_error_with_context
14
+ from souschef.core.validation import (
15
+ ValidationEngine,
16
+ ValidationLevel,
17
+ ValidationResult,
18
+ )
19
+
20
+
21
+ def assess_chef_migration_complexity(
22
+ cookbook_paths: str,
23
+ migration_scope: str = "full",
24
+ target_platform: str = "ansible_awx",
25
+ ) -> str:
26
+ """
27
+ Assess the complexity of migrating Chef cookbooks to Ansible with detailed analysis.
28
+
29
+ Args:
30
+ cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
31
+ migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
32
+ target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
33
+
34
+ Returns:
35
+ Comprehensive migration complexity assessment with recommendations
36
+
37
+ """
38
+ try:
39
+ # Validate inputs
40
+ error_msg = _validate_assessment_inputs(
41
+ cookbook_paths, migration_scope, target_platform
42
+ )
43
+ if error_msg:
44
+ return error_msg
45
+
46
+ # Parse cookbook paths (may be empty if none exist)
47
+ valid_paths = _parse_cookbook_paths(cookbook_paths)
48
+
49
+ # Analyze all cookbooks (handles empty list gracefully)
50
+ cookbook_assessments, overall_metrics = _analyze_cookbook_metrics(valid_paths)
51
+
52
+ # Generate recommendations and reports
53
+ recommendations = _generate_migration_recommendations_from_assessment(
54
+ cookbook_assessments, overall_metrics, target_platform
55
+ )
56
+ roadmap = _create_migration_roadmap(cookbook_assessments)
57
+
58
+ # Format final assessment report
59
+ return _format_assessment_report(
60
+ migration_scope,
61
+ target_platform,
62
+ overall_metrics,
63
+ cookbook_assessments,
64
+ recommendations,
65
+ roadmap,
66
+ )
67
+ except Exception as e:
68
+ return format_error_with_context(
69
+ e, "assessing Chef migration complexity", cookbook_paths
70
+ )
71
+
72
+
73
+ def _validate_migration_plan_inputs(
74
+ cookbook_paths: str, migration_strategy: str, timeline_weeks: int
75
+ ) -> str | None:
76
+ """
77
+ Validate migration plan inputs.
78
+
79
+ Returns:
80
+ Error message if validation fails, None if valid.
81
+
82
+ """
83
+ if not cookbook_paths or not cookbook_paths.strip():
84
+ return (
85
+ "Error: Cookbook paths cannot be empty\n\n"
86
+ "Suggestion: Provide comma-separated paths to Chef cookbooks"
87
+ )
88
+
89
+ valid_strategies = ["big_bang", "phased", "parallel"]
90
+ if migration_strategy not in valid_strategies:
91
+ return (
92
+ f"Error: Invalid migration strategy '{migration_strategy}'\n\n"
93
+ f"Suggestion: Use one of {', '.join(valid_strategies)}"
94
+ )
95
+
96
+ if not (1 <= timeline_weeks <= 104): # 1 week to 2 years
97
+ return (
98
+ f"Error: Timeline must be between 1 and 104 weeks, got {timeline_weeks}\n\n"
99
+ "Suggestion: Provide a realistic timeline (4-12 weeks typical)"
100
+ )
101
+
102
+ return None
103
+
104
+
105
+ def _parse_and_assess_cookbooks(cookbook_paths: str) -> tuple[list, str | None]:
106
+ """
107
+ Parse cookbook paths and assess each cookbook.
108
+
109
+ Returns:
110
+ Tuple of (cookbook_assessments, error_message).
111
+
112
+ """
113
+ paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
114
+ valid_paths = [p for p in paths if p.exists()]
115
+
116
+ if not valid_paths:
117
+ return (
118
+ [],
119
+ "Error: No valid cookbook paths found\n\n"
120
+ "Suggestion: Ensure paths exist and point to cookbook directories",
121
+ )
122
+
123
+ cookbook_assessments = []
124
+ for cookbook_path in valid_paths:
125
+ # deepcode ignore PT: path normalized via _normalize_path
126
+ assessment = _assess_single_cookbook(cookbook_path)
127
+ cookbook_assessments.append(assessment)
128
+
129
+ return cookbook_assessments, None
130
+
131
+
132
+ def _format_migration_plan_output(
133
+ migration_plan: dict,
134
+ migration_strategy: str,
135
+ timeline_weeks: int,
136
+ num_cookbooks: int,
137
+ ) -> str:
138
+ """Format migration plan as markdown output."""
139
+ return f"""# Chef to Ansible Migration Plan
140
+ # Strategy: {migration_strategy}
141
+ # Timeline: {timeline_weeks} weeks
142
+ # Cookbooks: {num_cookbooks}
143
+
144
+ ## Executive Summary:
145
+ {migration_plan["executive_summary"]}
146
+
147
+ ## Migration Phases:
148
+ {migration_plan["phases"]}
149
+
150
+ ## Timeline and Milestones:
151
+ {migration_plan["timeline"]}
152
+
153
+ ## Team Requirements:
154
+ {migration_plan["team_requirements"]}
155
+
156
+ ## Prerequisites and Dependencies:
157
+ {migration_plan["prerequisites"]}
158
+
159
+ ## Testing Strategy:
160
+ {migration_plan["testing_strategy"]}
161
+
162
+ ## Risk Mitigation:
163
+ {migration_plan["risk_mitigation"]}
164
+
165
+ ## Success Criteria:
166
+ {migration_plan["success_criteria"]}
167
+
168
+ ## Post-Migration Tasks:
169
+ {migration_plan["post_migration"]}
170
+ """
171
+
172
+
173
+ def generate_migration_plan(
174
+ cookbook_paths: str, migration_strategy: str = "phased", timeline_weeks: int = 12
175
+ ) -> str:
176
+ """
177
+ Generate a detailed migration plan from Chef to Ansible with timeline and milestones.
178
+
179
+ Args:
180
+ cookbook_paths: Comma-separated paths to Chef cookbooks
181
+ migration_strategy: Migration approach (big_bang, phased, parallel)
182
+ timeline_weeks: Target timeline in weeks
183
+
184
+ Returns:
185
+ Detailed migration plan with phases, milestones, and deliverables
186
+
187
+ """
188
+ try:
189
+ # Validate inputs
190
+ error = _validate_migration_plan_inputs(
191
+ cookbook_paths, migration_strategy, timeline_weeks
192
+ )
193
+ if error:
194
+ return error
195
+
196
+ # Parse and assess cookbooks
197
+ cookbook_assessments, error = _parse_and_assess_cookbooks(cookbook_paths)
198
+ if error:
199
+ return error
200
+
201
+ # Generate migration plan based on strategy
202
+ migration_plan = _generate_detailed_migration_plan(
203
+ cookbook_assessments, migration_strategy, timeline_weeks
204
+ )
205
+
206
+ return _format_migration_plan_output(
207
+ migration_plan,
208
+ migration_strategy,
209
+ timeline_weeks,
210
+ len(cookbook_assessments),
211
+ )
212
+
213
+ except Exception as e:
214
+ return format_error_with_context(e, "generating migration plan", cookbook_paths)
215
+
216
+
217
+ def analyze_cookbook_dependencies(
218
+ cookbook_path: str, dependency_depth: str = "direct"
219
+ ) -> str:
220
+ """
221
+ Analyze cookbook dependencies and identify migration order requirements.
222
+
223
+ Args:
224
+ cookbook_path: Path to Chef cookbook or cookbooks directory
225
+ dependency_depth: Analysis depth (direct, transitive, full)
226
+
227
+ Returns:
228
+ Dependency analysis with migration order recommendations
229
+
230
+ """
231
+ try:
232
+ # Validate inputs
233
+ valid_depths = ["direct", "transitive", "full"]
234
+ if dependency_depth not in valid_depths:
235
+ return (
236
+ f"Error: Invalid dependency depth '{dependency_depth}'\n\n"
237
+ f"Suggestion: Use one of {', '.join(valid_depths)}"
238
+ )
239
+
240
+ cookbook_path_obj = _normalize_path(cookbook_path)
241
+ if not cookbook_path_obj.exists():
242
+ return (
243
+ f"Error: Cookbook path not found: {cookbook_path}\n\n"
244
+ "Suggestion: Check that the path exists and points to a cookbook directory"
245
+ )
246
+
247
+ # Analyze dependencies
248
+ dependency_analysis = _analyze_cookbook_dependencies_detailed(cookbook_path_obj)
249
+
250
+ # Determine migration order
251
+ migration_order = _determine_migration_order(dependency_analysis)
252
+
253
+ # Identify circular dependencies
254
+ circular_deps = _identify_circular_dependencies(dependency_analysis)
255
+
256
+ return f"""# Cookbook Dependency Analysis
257
+ # Cookbook: {cookbook_path_obj.name}
258
+ # Analysis Depth: {dependency_depth}
259
+
260
+ ## Dependency Overview:
261
+ {_format_dependency_overview(dependency_analysis)}
262
+
263
+ ## Dependency Graph:
264
+ {_format_dependency_graph(dependency_analysis)}
265
+
266
+ ## Migration Order Recommendations:
267
+ {_format_migration_order(migration_order)}
268
+
269
+ ## Circular Dependencies:
270
+ {_format_circular_dependencies(circular_deps)}
271
+
272
+ ## External Dependencies:
273
+ {_format_external_dependencies(dependency_analysis)}
274
+
275
+ ## Community Cookbooks:
276
+ {_format_community_cookbooks(dependency_analysis)}
277
+
278
+ ## Migration Impact Analysis:
279
+ {_analyze_dependency_migration_impact(dependency_analysis)}
280
+ """
281
+ except Exception as e:
282
+ return format_error_with_context(
283
+ e, "analyzing cookbook dependencies", cookbook_path
284
+ )
285
+
286
+
287
+ def generate_migration_report(
288
+ _assessment_results: str,
289
+ report_format: str = "executive",
290
+ include_technical_details: str = "yes",
291
+ ) -> str:
292
+ """
293
+ Generate comprehensive migration report from assessment results.
294
+
295
+ Args:
296
+ _assessment_results: JSON string or summary of assessment results (reserved for future use)
297
+ report_format: Report format (executive, technical, combined)
298
+ include_technical_details: Include detailed technical analysis (yes/no)
299
+
300
+ Returns:
301
+ Formatted migration report for stakeholders
302
+
303
+ """
304
+ try:
305
+ from datetime import datetime
306
+
307
+ # Generate report based on format
308
+ report = _generate_comprehensive_migration_report(
309
+ include_technical_details == "yes"
310
+ )
311
+
312
+ current_date = datetime.now().strftime("%Y-%m-%d")
313
+
314
+ return f"""# Chef to Ansible Migration Report
315
+ **Generated:** {current_date}
316
+ **Report Type:** {report_format.title()}
317
+ **Technical Details:** {"Included" if include_technical_details == "yes" else "Summary Only"}
318
+
319
+ ## Executive Summary
320
+ {report["executive_summary"]}
321
+
322
+ ## Migration Scope and Objectives
323
+ {report["scope_objectives"]}
324
+
325
+ ## Current State Analysis
326
+ {report["current_state"]}
327
+
328
+ ## Target State Architecture
329
+ {report["target_state"]}
330
+
331
+ ## Migration Strategy and Approach
332
+ {report["strategy"]}
333
+
334
+ ## Cost-Benefit Analysis
335
+ {report["cost_benefit"]}
336
+
337
+ ## Timeline and Resource Requirements
338
+ {report["timeline_resources"]}
339
+
340
+ ## Risk Assessment and Mitigation
341
+ {report["risk_assessment"]}
342
+
343
+ {"## Technical Implementation Details" if include_technical_details == "yes" else ""}
344
+ {report.get("technical_details", "") if include_technical_details == "yes" else ""}
345
+
346
+ ## Recommendations and Next Steps
347
+ {report["recommendations"]}
348
+
349
+ ## Appendices
350
+ {report["appendices"]}
351
+ """
352
+ except Exception as e:
353
+ return format_error_with_context(e, "generating migration report")
354
+
355
+
356
+ def validate_conversion(
357
+ conversion_type: str,
358
+ result_content: str,
359
+ output_format: str = "text",
360
+ ) -> str:
361
+ """
362
+ Validate a Chef-to-Ansible conversion for correctness, best practices, and quality.
363
+
364
+ This validation framework checks conversions across multiple dimensions:
365
+ - Syntax: YAML/Jinja2/Python syntax validation
366
+ - Semantic: Logic equivalence, variable usage, dependencies
367
+ - Best practices: Naming conventions, idempotency, task organization
368
+ - Security: Privilege escalation, sensitive data handling
369
+ - Performance: Efficiency recommendations
370
+
371
+ Args:
372
+ conversion_type: Type of conversion to validate
373
+ ('resource', 'recipe', 'template', 'inspec')
374
+ result_content: Converted Ansible code or configuration
375
+ output_format: Output format ('text', 'json', 'summary')
376
+
377
+ Returns:
378
+ Validation report with errors, warnings, and suggestions
379
+
380
+ """
381
+ try:
382
+ engine = ValidationEngine()
383
+ results = engine.validate_conversion(conversion_type, result_content)
384
+ summary = engine.get_summary()
385
+
386
+ if output_format == "json":
387
+ return json.dumps(
388
+ {
389
+ "summary": summary,
390
+ "results": [result.to_dict() for result in results],
391
+ },
392
+ indent=2,
393
+ )
394
+ elif output_format == "summary":
395
+ return _format_validation_results_summary(conversion_type, summary)
396
+ else:
397
+ return _format_validation_results_text(conversion_type, results, summary)
398
+
399
+ except Exception as e:
400
+ return format_error_with_context(
401
+ e, f"validating Ansible {conversion_type} conversion"
402
+ )
403
+
404
+
405
+ # Private helper functions for assessment
406
+
407
+
408
+ def _validate_assessment_inputs(
409
+ cookbook_paths: str, migration_scope: str, target_platform: str
410
+ ) -> str | None:
411
+ """
412
+ Validate inputs for migration assessment.
413
+
414
+ Args:
415
+ cookbook_paths: Paths to cookbooks
416
+ migration_scope: Scope of migration
417
+ target_platform: Target platform
418
+
419
+ Returns:
420
+ Error message if validation fails, None otherwise
421
+
422
+ """
423
+ if not cookbook_paths or not cookbook_paths.strip():
424
+ return (
425
+ "Error: Cookbook paths cannot be empty\n\n"
426
+ "Suggestion: Provide comma-separated paths to Chef cookbooks"
427
+ )
428
+
429
+ valid_scopes = ["full", "recipes_only", "infrastructure_only"]
430
+ if migration_scope not in valid_scopes:
431
+ return (
432
+ f"Error: Invalid migration scope '{migration_scope}'\n\n"
433
+ f"Suggestion: Use one of {', '.join(valid_scopes)}"
434
+ )
435
+
436
+ valid_platforms = ["ansible_awx", "ansible_core", "ansible_tower"]
437
+ if target_platform not in valid_platforms:
438
+ return (
439
+ f"Error: Invalid target platform '{target_platform}'\n\n"
440
+ f"Suggestion: Use one of {', '.join(valid_platforms)}"
441
+ )
442
+
443
+ return None
444
+
445
+
446
+ def _parse_cookbook_paths(cookbook_paths: str) -> list[Any]:
447
+ """
448
+ Parse and validate cookbook paths.
449
+
450
+ Args:
451
+ cookbook_paths: Comma-separated paths to cookbooks
452
+
453
+ Returns:
454
+ List of valid Path objects (may be empty)
455
+
456
+ """
457
+ paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
458
+ valid_paths = [p for p in paths if p.exists()]
459
+ return valid_paths
460
+
461
+
462
+ def _analyze_cookbook_metrics(
463
+ valid_paths: list[Any],
464
+ ) -> tuple[list[Any], dict[str, int]]:
465
+ """
466
+ Analyze metrics for all cookbooks.
467
+
468
+ Args:
469
+ valid_paths: List of valid cookbook paths
470
+
471
+ Returns:
472
+ Tuple of (cookbook_assessments, overall_metrics)
473
+
474
+ """
475
+ cookbook_assessments = []
476
+ overall_metrics = {
477
+ "total_cookbooks": 0,
478
+ "total_recipes": 0,
479
+ "total_resources": 0,
480
+ "complexity_score": 0,
481
+ "estimated_effort_days": 0,
482
+ }
483
+
484
+ for cookbook_path in valid_paths:
485
+ # deepcode ignore PT: path normalized via _normalize_path
486
+ assessment = _assess_single_cookbook(cookbook_path)
487
+ cookbook_assessments.append(assessment)
488
+
489
+ # Aggregate metrics
490
+ overall_metrics["total_cookbooks"] += 1
491
+ overall_metrics["total_recipes"] += assessment["metrics"]["recipe_count"]
492
+ overall_metrics["total_resources"] += assessment["metrics"]["resource_count"]
493
+ overall_metrics["complexity_score"] += assessment["complexity_score"]
494
+ overall_metrics["estimated_effort_days"] += assessment["estimated_effort_days"]
495
+
496
+ # Calculate averages
497
+ if cookbook_assessments:
498
+ overall_metrics["avg_complexity"] = int(
499
+ overall_metrics["complexity_score"] / len(cookbook_assessments)
500
+ )
501
+
502
+ return cookbook_assessments, overall_metrics
503
+
504
+
505
+ def _format_assessment_report(
506
+ migration_scope: str,
507
+ target_platform: str,
508
+ overall_metrics: dict[str, int],
509
+ cookbook_assessments: list[Any],
510
+ recommendations: str,
511
+ roadmap: str,
512
+ ) -> str:
513
+ """
514
+ Format the final assessment report.
515
+
516
+ Args:
517
+ migration_scope: Scope of migration
518
+ target_platform: Target platform
519
+ overall_metrics: Overall metrics dictionary
520
+ cookbook_assessments: List of cookbook assessments
521
+ recommendations: Migration recommendations
522
+ roadmap: Migration roadmap
523
+
524
+ Returns:
525
+ Formatted report string
526
+
527
+ """
528
+ return f"""# Chef to Ansible Migration Assessment
529
+ # Scope: {migration_scope}
530
+ # Target Platform: {target_platform}
531
+
532
+ ## Overall Migration Metrics:
533
+ {_format_overall_metrics(overall_metrics)}
534
+
535
+ ## Cookbook Assessments:
536
+ {_format_cookbook_assessments(cookbook_assessments)}
537
+
538
+ ## Migration Complexity Analysis:
539
+ {_format_complexity_analysis(cookbook_assessments)}
540
+
541
+ ## Migration Recommendations:
542
+ {recommendations}
543
+
544
+ ## Migration Roadmap:
545
+ {roadmap}
546
+
547
+ ## Risk Assessment:
548
+ {_assess_migration_risks(cookbook_assessments, target_platform)}
549
+
550
+ ## Resource Requirements:
551
+ {_estimate_resource_requirements(overall_metrics, target_platform)}
552
+ """
553
+
554
+
555
+ def _count_cookbook_artifacts(cookbook_path) -> dict[str, int]:
556
+ """Count basic cookbook artifacts (recipes, templates, files)."""
557
+ recipes_dir = _safe_join(cookbook_path, "recipes")
558
+ recipe_count = len(list(recipes_dir.glob("*.rb"))) if recipes_dir.exists() else 0
559
+
560
+ templates_count = (
561
+ len(list(_safe_join(cookbook_path, "templates").glob("*")))
562
+ if _safe_join(cookbook_path, "templates").exists()
563
+ else 0
564
+ )
565
+
566
+ files_count = (
567
+ len(list(_safe_join(cookbook_path, "files").glob("*")))
568
+ if _safe_join(cookbook_path, "files").exists()
569
+ else 0
570
+ )
571
+
572
+ return {
573
+ "recipe_count": recipe_count,
574
+ "templates": templates_count,
575
+ "files": files_count,
576
+ }
577
+
578
+
579
+ def _analyze_recipe_complexity(cookbook_path) -> dict[str, int]:
580
+ """Analyze recipe files for resource counts, Ruby blocks, and custom resources."""
581
+ recipes_dir = _safe_join(cookbook_path, "recipes")
582
+ resource_count = 0
583
+ custom_resources = 0
584
+ ruby_blocks = 0
585
+
586
+ if recipes_dir.exists():
587
+ for recipe_file in recipes_dir.glob("*.rb"):
588
+ with recipe_file.open("r", encoding="utf-8", errors="ignore") as f:
589
+ content = f.read()
590
+ # Count Chef resources
591
+ resources = len(
592
+ re.findall(r'\w{1,100}\s+[\'"]([^\'"]{0,200})[\'"]\s+do', content)
593
+ )
594
+ ruby_blocks += len(
595
+ re.findall(r"ruby_block|execute|bash", content, re.IGNORECASE)
596
+ )
597
+ custom_resources += len(
598
+ re.findall(
599
+ r"custom_resource|provides|use_inline_resources", content
600
+ )
601
+ )
602
+ resource_count += resources
603
+
604
+ return {
605
+ "resource_count": resource_count,
606
+ "custom_resources": custom_resources,
607
+ "ruby_blocks": ruby_blocks,
608
+ }
609
+
610
+
611
+ def _calculate_complexity_score(metrics: dict[str, int]) -> int:
612
+ """Calculate complexity score (0-100) based on metrics."""
613
+ recipe_count = metrics["recipe_count"]
614
+ resource_count = metrics["resource_count"]
615
+
616
+ complexity_factors = {
617
+ "recipe_count": min(recipe_count * 2, 20),
618
+ "resource_density": min(resource_count / max(recipe_count, 1) * 5, 25),
619
+ "custom_resources": metrics["custom_resources"] * 10,
620
+ "ruby_blocks": metrics["ruby_blocks"] * 5,
621
+ "templates": min(metrics["templates"] * 2, 15),
622
+ "files": min(metrics["files"] * 1, 10),
623
+ }
624
+
625
+ return int(sum(complexity_factors.values()))
626
+
627
+
628
+ def _identify_migration_challenges(
629
+ metrics: dict[str, int], complexity_score: int
630
+ ) -> list[str]:
631
+ """Identify migration challenges based on metrics."""
632
+ challenges = []
633
+
634
+ if metrics["custom_resources"] > 0:
635
+ challenges.append(
636
+ f"{metrics['custom_resources']} custom resources requiring manual conversion"
637
+ )
638
+ if metrics["ruby_blocks"] > 5:
639
+ challenges.append(
640
+ f"{metrics['ruby_blocks']} Ruby blocks needing shell script conversion"
641
+ )
642
+ if complexity_score > 70:
643
+ challenges.append("High complexity cookbook requiring expert review")
644
+
645
+ return challenges
646
+
647
+
648
+ def _determine_migration_priority(complexity_score: int) -> str:
649
+ """Determine migration priority based on complexity score."""
650
+ if complexity_score < 30:
651
+ return "low"
652
+ elif complexity_score > 70:
653
+ return "high"
654
+ return "medium"
655
+
656
+
657
+ def _assess_single_cookbook(cookbook_path) -> dict:
658
+ """Assess complexity of a single cookbook."""
659
+ cookbook = _normalize_path(cookbook_path)
660
+
661
+ # Collect metrics
662
+ artifact_counts = _count_cookbook_artifacts(cookbook)
663
+ recipe_complexity = _analyze_recipe_complexity(cookbook)
664
+ metrics = {**artifact_counts, **recipe_complexity}
665
+
666
+ # Calculate complexity and effort
667
+ complexity_score = _calculate_complexity_score(metrics)
668
+ base_effort = metrics["recipe_count"] * 0.5 # 0.5 days per recipe
669
+ complexity_multiplier = 1 + (complexity_score / 100)
670
+ estimated_effort = round(base_effort * complexity_multiplier, 1)
671
+
672
+ # Build assessment
673
+ return {
674
+ "cookbook_name": cookbook.name,
675
+ "cookbook_path": str(cookbook),
676
+ "metrics": metrics,
677
+ "complexity_score": complexity_score,
678
+ "estimated_effort_days": estimated_effort,
679
+ "challenges": _identify_migration_challenges(metrics, complexity_score),
680
+ "migration_priority": _determine_migration_priority(complexity_score),
681
+ "dependencies": [],
682
+ }
683
+
684
+
685
+ def _format_overall_metrics(metrics: dict) -> str:
686
+ """Format overall migration metrics."""
687
+ return f"""• Total Cookbooks: {metrics["total_cookbooks"]}
688
+ • Total Recipes: {metrics["total_recipes"]}
689
+ • Total Resources: {metrics["total_resources"]}
690
+ • Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
691
+ • Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
692
+ • Estimated Duration: {int(metrics["estimated_effort_days"] / 5)}-{int(metrics["estimated_effort_days"] / 3)} weeks"""
693
+
694
+
695
+ def _format_cookbook_assessments(assessments: list) -> str:
696
+ """Format individual cookbook assessments."""
697
+ if not assessments:
698
+ return "No cookbooks assessed."
699
+
700
+ def _get_priority_icon(priority: str) -> str:
701
+ """Get priority icon based on migration priority level."""
702
+ if priority == "high":
703
+ return "🔴"
704
+ elif priority == "medium":
705
+ return "🟡"
706
+ else:
707
+ return "🟢"
708
+
709
+ formatted = []
710
+ for assessment in assessments:
711
+ priority_icon = _get_priority_icon(assessment["migration_priority"])
712
+ formatted.append(f"""### {assessment["cookbook_name"]} {priority_icon}
713
+ • Complexity Score: {assessment["complexity_score"]:.1f}/100
714
+ • Estimated Effort: {assessment["estimated_effort_days"]} days
715
+ • Recipes: {assessment["metrics"]["recipe_count"]}
716
+ • Resources: {assessment["metrics"]["resource_count"]}
717
+ • Custom Resources: {assessment["metrics"]["custom_resources"]}
718
+ • Challenges: {len(assessment["challenges"])}""")
719
+
720
+ return "\n\n".join(formatted)
721
+
722
+
723
+ def _format_complexity_analysis(assessments: list) -> str:
724
+ """Format complexity analysis."""
725
+ if not assessments:
726
+ return "No complexity analysis available."
727
+
728
+ high_complexity = [a for a in assessments if a["complexity_score"] > 70]
729
+ medium_complexity = [a for a in assessments if 30 <= a["complexity_score"] <= 70]
730
+ low_complexity = [a for a in assessments if a["complexity_score"] < 30]
731
+
732
+ return f"""• High Complexity (>70): {len(high_complexity)} cookbooks
733
+ • Medium Complexity (30-70): {len(medium_complexity)} cookbooks
734
+ • Low Complexity (<30): {len(low_complexity)} cookbooks
735
+
736
+ **Top Migration Challenges:**
737
+ {_identify_top_challenges(assessments)}"""
738
+
739
+
740
+ def _identify_top_challenges(assessments: list) -> str:
741
+ """Identify the most common migration challenges."""
742
+ challenge_counts: dict[str, int] = {}
743
+ for assessment in assessments:
744
+ for challenge in assessment["challenges"]:
745
+ challenge_counts[challenge] = challenge_counts.get(challenge, 0) + 1
746
+
747
+ top_challenges = sorted(challenge_counts.items(), key=lambda x: x[1], reverse=True)[
748
+ :5
749
+ ]
750
+
751
+ formatted = []
752
+ for challenge, count in top_challenges:
753
+ formatted.append(f" - {challenge} ({count} cookbooks)")
754
+
755
+ return (
756
+ "\n".join(formatted)
757
+ if formatted
758
+ else " - No significant challenges identified"
759
+ )
760
+
761
+
762
+ def _generate_migration_recommendations_from_assessment(
763
+ assessments: list, metrics: dict, target_platform: str
764
+ ) -> str:
765
+ """Generate migration recommendations based on assessment."""
766
+ recommendations = []
767
+
768
+ # Platform-specific recommendations
769
+ if target_platform == "ansible_awx":
770
+ recommendations.append(
771
+ "• Implement AWX/AAP integration for job templates and workflows"
772
+ )
773
+ recommendations.append(
774
+ "• Set up dynamic inventory sources for Chef server integration"
775
+ )
776
+
777
+ # Complexity-based recommendations
778
+ avg_complexity = metrics.get("avg_complexity", 0)
779
+ if avg_complexity > 60:
780
+ recommendations.append(
781
+ "• Consider phased migration approach due to high complexity"
782
+ )
783
+ recommendations.append(
784
+ "• Allocate additional time for custom resource conversion"
785
+ )
786
+ recommendations.append("• Plan for comprehensive testing and validation")
787
+ else:
788
+ recommendations.append("• Standard migration timeline should be sufficient")
789
+ recommendations.append("• Consider big-bang approach for faster delivery")
790
+
791
+ # Effort-based recommendations
792
+ total_effort = metrics["estimated_effort_days"]
793
+ if total_effort > 30:
794
+ recommendations.append("• Establish dedicated migration team")
795
+ recommendations.append("• Consider parallel migration tracks")
796
+ else:
797
+ recommendations.append("• Single developer can handle migration with oversight")
798
+
799
+ # Custom resource recommendations
800
+ custom_resource_cookbooks = [
801
+ a for a in assessments if a["metrics"]["custom_resources"] > 0
802
+ ]
803
+ if custom_resource_cookbooks:
804
+ recommendations.append(
805
+ f"• {len(custom_resource_cookbooks)} cookbooks need custom resource conversion"
806
+ )
807
+ recommendations.append(
808
+ "• Prioritize custom resource analysis and conversion strategy"
809
+ )
810
+
811
+ return "\n".join(recommendations)
812
+
813
+
814
+ def _create_migration_roadmap(assessments: list) -> str:
815
+ """Create a migration roadmap based on assessments."""
816
+ # Sort cookbooks by complexity (low to high for easier wins first)
817
+ sorted_cookbooks = sorted(assessments, key=lambda x: x["complexity_score"])
818
+
819
+ phases = {
820
+ "Phase 1 - Foundation (Weeks 1-2)": [
821
+ "Set up Ansible/AWX environment",
822
+ "Establish CI/CD pipelines",
823
+ "Create testing framework",
824
+ "Train team on Ansible best practices",
825
+ ],
826
+ "Phase 2 - Low Complexity Migration (Weeks 3-5)": [],
827
+ "Phase 3 - Medium Complexity Migration (Weeks 6-9)": [],
828
+ "Phase 4 - High Complexity Migration (Weeks 10-12)": [],
829
+ "Phase 5 - Validation and Cleanup (Weeks 13-14)": [
830
+ "Comprehensive testing",
831
+ "Performance validation",
832
+ "Documentation updates",
833
+ "Team training and handover",
834
+ ],
835
+ }
836
+
837
+ # Distribute cookbooks across phases
838
+ for cookbook in sorted_cookbooks:
839
+ if cookbook["complexity_score"] < 30:
840
+ phases["Phase 2 - Low Complexity Migration (Weeks 3-5)"].append(
841
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
842
+ )
843
+ elif cookbook["complexity_score"] < 70:
844
+ phases["Phase 3 - Medium Complexity Migration (Weeks 6-9)"].append(
845
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
846
+ )
847
+ else:
848
+ phases["Phase 4 - High Complexity Migration (Weeks 10-12)"].append(
849
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
850
+ )
851
+
852
+ # Format roadmap
853
+ roadmap_formatted = []
854
+ for phase, tasks in phases.items():
855
+ roadmap_formatted.append(f"\n### {phase}")
856
+ for task in tasks:
857
+ roadmap_formatted.append(f" - {task}")
858
+
859
+ return "\n".join(roadmap_formatted)
860
+
861
+
862
+ def _assess_technical_complexity_risks(assessments: list) -> list[str]:
863
+ """Assess risks related to technical complexity."""
864
+ risks = []
865
+ high_complexity_count = len([a for a in assessments if a["complexity_score"] > 70])
866
+ if high_complexity_count > 0:
867
+ risks.append(
868
+ f"🔴 HIGH: {high_complexity_count} high-complexity cookbooks may cause delays"
869
+ )
870
+ return risks
871
+
872
+
873
+ def _assess_custom_resource_risks(assessments: list) -> list[str]:
874
+ """Assess risks related to custom resources and Ruby blocks."""
875
+ risks = []
876
+ custom_resource_count = sum(a["metrics"]["custom_resources"] for a in assessments)
877
+ if custom_resource_count > 0:
878
+ risks.append(
879
+ f"🟡 MEDIUM: {custom_resource_count} custom resources need manual conversion"
880
+ )
881
+
882
+ ruby_block_count = sum(a["metrics"]["ruby_blocks"] for a in assessments)
883
+ if ruby_block_count > 10:
884
+ risks.append(
885
+ f"🟡 MEDIUM: {ruby_block_count} Ruby blocks require shell script conversion"
886
+ )
887
+
888
+ return risks
889
+
890
+
891
+ def _assess_timeline_risks(assessments: list) -> list[str]:
892
+ """Assess risks related to migration timeline and scope."""
893
+ risks = []
894
+ total_effort = sum(a["estimated_effort_days"] for a in assessments)
895
+ if total_effort > 50:
896
+ risks.append("🟡 MEDIUM: Large migration scope may impact timeline")
897
+ return risks
898
+
899
+
900
+ def _assess_platform_risks(target_platform: str) -> list[str]:
901
+ """Assess risks related to target platform."""
902
+ if target_platform == "ansible_awx":
903
+ return ["🟢 LOW: AWX integration well-supported with existing tools"]
904
+ return []
905
+
906
+
907
+ def _assess_migration_risks(assessments: list, target_platform: str) -> str:
908
+ """Assess migration risks."""
909
+ risks = []
910
+
911
+ risks.extend(_assess_technical_complexity_risks(assessments))
912
+ risks.extend(_assess_custom_resource_risks(assessments))
913
+ risks.extend(_assess_timeline_risks(assessments))
914
+ risks.extend(_assess_platform_risks(target_platform))
915
+
916
+ if not risks:
917
+ risks.append("🟢 LOW: No significant migration risks identified")
918
+
919
+ return "\n".join(risks)
920
+
921
+
922
+ def _estimate_resource_requirements(metrics: dict, target_platform: str) -> str:
923
+ """Estimate resource requirements for migration."""
924
+ total_effort = metrics["estimated_effort_days"]
925
+
926
+ # Team size recommendations
927
+ if total_effort < 20:
928
+ team_size = "1 developer + 1 reviewer"
929
+ timeline = "4-6 weeks"
930
+ elif total_effort < 50:
931
+ team_size = "2 developers + 1 senior reviewer"
932
+ timeline = "6-10 weeks"
933
+ else:
934
+ team_size = "3-4 developers + 1 tech lead + 1 architect"
935
+ timeline = "10-16 weeks"
936
+
937
+ return f"""• **Team Size:** {team_size}
938
+ • **Estimated Timeline:** {timeline}
939
+ • **Total Effort:** {total_effort:.1f} person-days
940
+ • **Infrastructure:** {target_platform.replace("_", "/").upper()} environment
941
+ • **Testing:** Dedicated test environment recommended
942
+ • **Training:** 2-3 days Ansible/AWX training for team"""
943
+
944
+
945
+ def _analyze_cookbook_dependencies_detailed(cookbook_path) -> dict:
946
+ """Analyze cookbook dependencies in detail."""
947
+ analysis = {
948
+ "cookbook_name": cookbook_path.name,
949
+ "direct_dependencies": [],
950
+ "transitive_dependencies": [],
951
+ "external_dependencies": [],
952
+ "community_cookbooks": [],
953
+ "circular_dependencies": [],
954
+ }
955
+
956
+ # Read metadata.rb for dependencies
957
+ metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
958
+ if metadata_file.exists():
959
+ with metadata_file.open("r", encoding="utf-8", errors="ignore") as f:
960
+ content = f.read()
961
+
962
+ # Parse dependencies
963
+
964
+ depends_matches = re.findall(r'depends\s+[\'"]([^\'"]+)[\'"]', content)
965
+ analysis["direct_dependencies"] = depends_matches
966
+
967
+ # Read Berksfile for additional dependencies
968
+ berksfile = _safe_join(cookbook_path, "Berksfile")
969
+ if berksfile.exists():
970
+ with berksfile.open("r", encoding="utf-8", errors="ignore") as f:
971
+ content = f.read()
972
+
973
+ cookbook_matches = re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]', content)
974
+ analysis["external_dependencies"].extend(cookbook_matches)
975
+
976
+ # Identify community cookbooks (common ones)
977
+ community_cookbook_patterns = [
978
+ "apache2",
979
+ "nginx",
980
+ "mysql",
981
+ "postgresql",
982
+ "java",
983
+ "python",
984
+ "nodejs",
985
+ "docker",
986
+ "build-essential",
987
+ "git",
988
+ "ntp",
989
+ "sudo",
990
+ "users",
991
+ ]
992
+
993
+ all_deps = analysis["direct_dependencies"] + analysis["external_dependencies"]
994
+ for dep in all_deps:
995
+ if any(pattern in dep.lower() for pattern in community_cookbook_patterns):
996
+ analysis["community_cookbooks"].append(dep)
997
+
998
+ return analysis
999
+
1000
+
1001
+ def _determine_migration_order(dependency_analysis: dict) -> list:
1002
+ """Determine optimal migration order based on dependencies."""
1003
+ # For now, return a simple order based on dependency count
1004
+ # In a full implementation, this would use topological sorting
1005
+
1006
+ order = []
1007
+
1008
+ # Leaf nodes first (no dependencies)
1009
+ if not dependency_analysis["direct_dependencies"]:
1010
+ order.append(
1011
+ {
1012
+ "cookbook": dependency_analysis["cookbook_name"],
1013
+ "priority": 1,
1014
+ "reason": "No dependencies - can be migrated first",
1015
+ }
1016
+ )
1017
+ else:
1018
+ # Has dependencies - migrate after dependencies
1019
+ dep_count = len(dependency_analysis["direct_dependencies"])
1020
+ priority = min(dep_count + 1, 5) # Cap at priority 5
1021
+ order.append(
1022
+ {
1023
+ "cookbook": dependency_analysis["cookbook_name"],
1024
+ "priority": priority,
1025
+ "reason": f"Has {dep_count} dependencies - migrate after dependencies",
1026
+ }
1027
+ )
1028
+
1029
+ return order
1030
+
1031
+
1032
+ def _identify_circular_dependencies(dependency_analysis: dict) -> list:
1033
+ """Identify circular dependencies (simplified)."""
1034
+ # This is a simplified implementation
1035
+ # A full implementation would build a dependency graph and detect cycles
1036
+
1037
+ circular = []
1038
+ cookbook_name = dependency_analysis["cookbook_name"]
1039
+
1040
+ # Check if any dependency might depend back on this cookbook
1041
+ for dep in dependency_analysis["direct_dependencies"]:
1042
+ if cookbook_name.lower() in dep.lower(): # Simple heuristic
1043
+ circular.append(
1044
+ {"cookbook1": cookbook_name, "cookbook2": dep, "type": "potential"}
1045
+ )
1046
+
1047
+ return circular
1048
+
1049
+
1050
+ def _generate_detailed_migration_plan(
1051
+ assessments: list, strategy: str, timeline_weeks: int
1052
+ ) -> dict:
1053
+ """Generate detailed migration plan."""
1054
+ plan = {
1055
+ "executive_summary": "",
1056
+ "phases": "",
1057
+ "timeline": "",
1058
+ "team_requirements": "",
1059
+ "prerequisites": "",
1060
+ "testing_strategy": "",
1061
+ "risk_mitigation": "",
1062
+ "success_criteria": "",
1063
+ "post_migration": "",
1064
+ }
1065
+
1066
+ total_cookbooks = len(assessments)
1067
+ total_effort = sum(a["estimated_effort_days"] for a in assessments)
1068
+
1069
+ plan["executive_summary"] = (
1070
+ f"""This migration plan covers {total_cookbooks} Chef cookbooks with an estimated effort of {total_effort:.1f} person-days over {timeline_weeks} weeks using a {strategy} approach. The plan balances speed of delivery with risk mitigation, focusing on early wins to build momentum while carefully handling complex cookbooks."""
1071
+ )
1072
+
1073
+ # Generate phases based on strategy
1074
+ if strategy == "phased":
1075
+ plan["phases"] = _generate_phased_migration_phases(assessments, timeline_weeks)
1076
+ elif strategy == "big_bang":
1077
+ plan["phases"] = _generate_big_bang_phases(assessments, timeline_weeks)
1078
+ else: # parallel
1079
+ plan["phases"] = _generate_parallel_migration_phases(timeline_weeks)
1080
+
1081
+ plan["timeline"] = _generate_migration_timeline(strategy, timeline_weeks)
1082
+
1083
+ plan["team_requirements"] = f"""**Core Team:**
1084
+ • 1 Migration Lead (Ansible expert)
1085
+ • {min(3, max(1, total_effort // 10))} Ansible Developers
1086
+ • 1 Chef SME (part-time consultation)
1087
+ • 1 QA Engineer for testing
1088
+ • 1 DevOps Engineer for infrastructure
1089
+
1090
+ **Skills Required:**
1091
+ • Advanced Ansible/AWX experience
1092
+ • Chef cookbook understanding
1093
+ • Infrastructure as Code principles
1094
+ • CI/CD pipeline experience"""
1095
+ plan["prerequisites"] = """• AWX/AAP environment setup and configured
1096
+ • Git repository structure established
1097
+ • CI/CD pipelines created for Ansible playbooks
1098
+ • Test environments provisioned
1099
+ • Team training on Ansible best practices completed
1100
+ • Chef cookbook inventory and documentation review
1101
+ • Stakeholder alignment on migration approach"""
1102
+ plan["testing_strategy"] = """**Testing Phases:**
1103
+ 1. **Unit Testing:** Ansible syntax validation and linting
1104
+ 2. **Integration Testing:** Playbook execution in test environments
1105
+ 3. **Functional Testing:** End-to-end application functionality validation
1106
+ 4. **Performance Testing:** Resource usage and execution time comparison
1107
+ 5. **User Acceptance Testing:** Stakeholder validation of migrated functionality
1108
+
1109
+ **Testing Tools:**
1110
+ • ansible-lint for syntax validation
1111
+ • molecule for role testing
1112
+ • testinfra for infrastructure testing
1113
+ • Custom validation scripts for Chef parity"""
1114
+ plan[
1115
+ "success_criteria"
1116
+ ] = """• All Chef cookbooks successfully converted to Ansible playbooks
1117
+ • 100% functional parity between Chef and Ansible implementations
1118
+ • No performance degradation in deployment times
1119
+ • All automated tests passing
1120
+ • Team trained and comfortable with new Ansible workflows
1121
+ • Documentation complete and accessible
1122
+ • Rollback procedures tested and documented"""
1123
+ return plan
1124
+
1125
+
1126
+ def _generate_comprehensive_migration_report(include_technical: bool) -> dict:
1127
+ """Generate comprehensive migration report."""
1128
+ report = {
1129
+ "executive_summary": "",
1130
+ "scope_objectives": "",
1131
+ "current_state": "",
1132
+ "target_state": "",
1133
+ "strategy": "",
1134
+ "cost_benefit": "",
1135
+ "timeline_resources": "",
1136
+ "risk_assessment": "",
1137
+ "recommendations": "",
1138
+ "appendices": "",
1139
+ }
1140
+
1141
+ # Executive Summary
1142
+ report[
1143
+ "executive_summary"
1144
+ ] = """This report outlines the migration strategy from Chef to Ansible/AWX, providing a comprehensive analysis of the current Chef infrastructure and a detailed roadmap for transition. The migration will modernize configuration management capabilities while reducing operational complexity and improving deployment automation.
1145
+
1146
+ **Key Findings:**
1147
+ • Migration is technically feasible with moderate complexity
1148
+ • Estimated 8-16 week timeline depending on approach
1149
+ • Significant long-term cost savings and operational improvements
1150
+ • Low-to-medium risk with proper planning and execution"""
1151
+ # Scope and Objectives
1152
+ report["scope_objectives"] = """**Migration Scope:**
1153
+ • All production Chef cookbooks and recipes
1154
+ • Chef server configurations and node management
1155
+ • Existing deployment pipelines and automation
1156
+ • Monitoring and compliance integrations
1157
+
1158
+ **Primary Objectives:**
1159
+ • Modernize configuration management with Ansible/AWX
1160
+ • Improve deployment reliability and speed
1161
+ • Reduce operational overhead and complexity
1162
+ • Enhance security and compliance capabilities
1163
+ • Standardize on Red Hat ecosystem tools"""
1164
+ # Current State Analysis
1165
+ report["current_state"] = """**Current Chef Infrastructure:**
1166
+ • Chef Server managing X nodes across multiple environments
1167
+ • Y cookbooks covering infrastructure and application deployment
1168
+ • Established CI/CD pipelines with Chef integration
1169
+ • Monitoring and compliance reporting in place
1170
+
1171
+ **Pain Points Identified:**
1172
+ • Complex Chef DSL requiring Ruby expertise
1173
+ • Lengthy convergence times in large environments
1174
+ • Limited workflow orchestration capabilities
1175
+ • Dependency management challenges
1176
+ • Scaling limitations with current architecture"""
1177
+ # Target State Architecture
1178
+ report["target_state"] = """**Target Ansible/AWX Architecture:**
1179
+ • Red Hat Ansible Automation Platform (AWX/AAP)
1180
+ • Git-based playbook and role management
1181
+ • Dynamic inventory from multiple sources
1182
+ • Integrated workflow templates and job scheduling
1183
+ • Enhanced RBAC and audit capabilities
1184
+
1185
+ **Key Improvements:**
1186
+ • YAML-based playbooks (easier to read/write)
1187
+ • Faster execution with SSH-based architecture
1188
+ • Rich workflow orchestration capabilities
1189
+ • Better integration with CI/CD tools
1190
+ • Enhanced scalability and performance"""
1191
+ if include_technical:
1192
+ report["technical_details"] = """## Technical Implementation Approach
1193
+
1194
+ ### Cookbook Conversion Strategy
1195
+ • **Resource Mapping:** Direct mapping of Chef resources to Ansible modules
1196
+ • **Variable Extraction:** Chef node attributes converted to Ansible variables
1197
+ • **Template Conversion:** ERB templates converted to Jinja2 format
1198
+ • **Custom Resources:** Manual conversion to Ansible roles/modules
1199
+
1200
+ ### Data Migration
1201
+ • **Node Attributes:** Migrated to Ansible inventory variables
1202
+ • **Data Bags:** Converted to Ansible Vault encrypted variables
1203
+ • **Environments:** Mapped to inventory groups with variable precedence
1204
+
1205
+ ### Testing and Validation
1206
+ • **Syntax Validation:** ansible-lint and yaml-lint integration
1207
+ • **Functional Testing:** molecule framework for role testing
1208
+ • **Integration Testing:** testinfra for infrastructure validation
1209
+ • **Performance Testing:** Execution time and resource usage comparison"""
1210
+ return report
1211
+
1212
+
1213
+ def _format_dependency_overview(analysis: dict) -> str:
1214
+ """Format dependency overview."""
1215
+ return f"""• Direct Dependencies: {len(analysis["direct_dependencies"])}
1216
+ • External Dependencies: {len(analysis["external_dependencies"])}
1217
+ • Community Cookbooks: {len(analysis["community_cookbooks"])}
1218
+ • Circular Dependencies: {len(analysis["circular_dependencies"])}"""
1219
+
1220
+
1221
+ def _format_dependency_graph(analysis: dict) -> str:
1222
+ """Format dependency graph (text representation)."""
1223
+ graph = [f"{analysis['cookbook_name']} depends on:"]
1224
+
1225
+ for dep in analysis["direct_dependencies"]:
1226
+ graph.append(f" ├── {dep}")
1227
+
1228
+ if analysis["external_dependencies"]:
1229
+ graph.append("External dependencies:")
1230
+ for dep in analysis["external_dependencies"]:
1231
+ graph.append(f" ├── {dep}")
1232
+
1233
+ return "\n".join(graph) if len(graph) > 1 else "No dependencies found."
1234
+
1235
+
1236
+ def _format_migration_order(order: list) -> str:
1237
+ """Format migration order recommendations."""
1238
+ if not order:
1239
+ return "No order analysis available."
1240
+
1241
+ formatted = []
1242
+ for item in sorted(order, key=lambda x: x["priority"]):
1243
+ priority_text = f"Priority {item['priority']}"
1244
+ formatted.append(f"• {item['cookbook']} - {priority_text}: {item['reason']}")
1245
+
1246
+ return "\n".join(formatted)
1247
+
1248
+
1249
+ def _format_circular_dependencies(circular: list) -> str:
1250
+ """Format circular dependencies."""
1251
+ if not circular:
1252
+ return "✅ No circular dependencies detected."
1253
+
1254
+ formatted = []
1255
+ for circ in circular:
1256
+ formatted.append(
1257
+ f"⚠️ {circ['cookbook1']} ↔ {circ['cookbook2']} ({circ['type']})"
1258
+ )
1259
+
1260
+ return "\n".join(formatted)
1261
+
1262
+
1263
+ def _format_external_dependencies(analysis: dict) -> str:
1264
+ """Format external dependencies."""
1265
+ if not analysis["external_dependencies"]:
1266
+ return "No external dependencies."
1267
+
1268
+ return "\n".join([f"• {dep}" for dep in analysis["external_dependencies"]])
1269
+
1270
+
1271
+ def _format_community_cookbooks(analysis: dict) -> str:
1272
+ """Format community cookbooks."""
1273
+ if not analysis["community_cookbooks"]:
1274
+ return "No community cookbooks identified."
1275
+
1276
+ return "\n".join(
1277
+ [
1278
+ f"• {cb} (consider ansible-galaxy role)"
1279
+ for cb in analysis["community_cookbooks"]
1280
+ ]
1281
+ )
1282
+
1283
+
1284
+ def _analyze_dependency_migration_impact(analysis: dict) -> str:
1285
+ """Analyze migration impact of dependencies."""
1286
+ impacts = []
1287
+
1288
+ if analysis["community_cookbooks"]:
1289
+ impacts.append(
1290
+ f"• {len(analysis['community_cookbooks'])} community cookbooks can likely be replaced with Ansible Galaxy roles"
1291
+ )
1292
+
1293
+ if analysis["circular_dependencies"]:
1294
+ impacts.append(
1295
+ f"• {len(analysis['circular_dependencies'])} circular dependencies need resolution before migration"
1296
+ )
1297
+
1298
+ direct_count = len(analysis["direct_dependencies"])
1299
+ if direct_count > 5:
1300
+ impacts.append(
1301
+ f"• High dependency count ({direct_count}) suggests complex migration order requirements"
1302
+ )
1303
+
1304
+ if not impacts:
1305
+ impacts.append(
1306
+ "• Low dependency complexity - straightforward migration expected"
1307
+ )
1308
+
1309
+ return "\n".join(impacts)
1310
+
1311
+
1312
+ def _generate_phased_migration_phases(assessments: list, timeline_weeks: int) -> str:
1313
+ """Generate phased migration phases."""
1314
+ phases = []
1315
+
1316
+ # Sort by complexity
1317
+ sorted_assessments = sorted(assessments, key=lambda x: x["complexity_score"])
1318
+
1319
+ phase1 = [a for a in sorted_assessments if a["complexity_score"] < 30]
1320
+ phase2 = [a for a in sorted_assessments if 30 <= a["complexity_score"] < 70]
1321
+ phase3 = [a for a in sorted_assessments if a["complexity_score"] >= 70]
1322
+
1323
+ weeks_per_phase = timeline_weeks // 3
1324
+
1325
+ phases.append(
1326
+ f"**Phase 1 (Weeks 1-{weeks_per_phase}):** Foundation & Low Complexity"
1327
+ )
1328
+ phases.append(f" • {len(phase1)} low-complexity cookbooks")
1329
+ phases.append(" • Setup AWX environment and CI/CD")
1330
+
1331
+ phases.append(
1332
+ f"\n**Phase 2 (Weeks {weeks_per_phase + 1}-{weeks_per_phase * 2}):** Medium Complexity"
1333
+ )
1334
+ phases.append(f" • {len(phase2)} medium-complexity cookbooks")
1335
+ phases.append(" • Parallel conversion and testing")
1336
+
1337
+ phases.append(
1338
+ f"\n**Phase 3 (Weeks {weeks_per_phase * 2 + 1}-{timeline_weeks}):** High Complexity & Finalization"
1339
+ )
1340
+ phases.append(f" • {len(phase3)} high-complexity cookbooks")
1341
+ phases.append(" • Final testing and deployment")
1342
+
1343
+ return "\n".join(phases)
1344
+
1345
+
1346
+ def _generate_big_bang_phases(assessments: list, timeline_weeks: int) -> str:
1347
+ """Generate big bang migration phases."""
1348
+ return f"""**Phase 1 (Weeks 1-2):** Preparation
1349
+ • AWX environment setup
1350
+ • Team training and preparation
1351
+ • Conversion tooling setup
1352
+
1353
+ **Phase 2 (Weeks 3-{timeline_weeks - 2}):** Mass Conversion
1354
+ • Parallel conversion of all {len(assessments)} cookbooks
1355
+ • Continuous integration and testing
1356
+ • Issue resolution and refinement
1357
+
1358
+ **Phase 3 (Weeks {timeline_weeks - 1}-{timeline_weeks}):** Cutover
1359
+ • Final validation and testing
1360
+ • Production deployment
1361
+ • Rollback readiness verification"""
1362
+
1363
+
1364
+ def _generate_parallel_migration_phases(timeline_weeks: int) -> str:
1365
+ """Generate parallel migration phases."""
1366
+ return f"""**Track A - Infrastructure (Weeks 1-{timeline_weeks}):**
1367
+ • Core infrastructure cookbooks
1368
+ • Base OS configuration
1369
+ • Security and compliance
1370
+
1371
+ **Track B - Applications (Weeks 1-{timeline_weeks}):**
1372
+ • Application deployment cookbooks
1373
+ • Service configuration
1374
+ • Custom business logic
1375
+
1376
+ **Track C - Integration (Weeks 1-{timeline_weeks}):**
1377
+ • AWX workflow development
1378
+ • CI/CD pipeline integration
1379
+ • Testing and validation automation"""
1380
+
1381
+
1382
+ def _generate_migration_timeline(strategy: str, timeline_weeks: int) -> str:
1383
+ """Generate migration timeline."""
1384
+ milestones = []
1385
+
1386
+ if strategy == "phased":
1387
+ week_intervals = timeline_weeks // 4
1388
+ milestones = [
1389
+ f"Week {week_intervals}: Phase 1 completion - Low complexity cookbooks migrated",
1390
+ f"Week {week_intervals * 2}: Phase 2 completion - Medium complexity cookbooks migrated",
1391
+ f"Week {week_intervals * 3}: Phase 3 completion - High complexity cookbooks migrated",
1392
+ f"Week {timeline_weeks}: Final validation and production deployment",
1393
+ ]
1394
+ else:
1395
+ milestones = [
1396
+ "Week 2: Environment setup and team training complete",
1397
+ f"Week {timeline_weeks // 2}: 50% of cookbooks converted and tested",
1398
+ f"Week {timeline_weeks - 2}: All conversions complete, final testing",
1399
+ f"Week {timeline_weeks}: Production deployment and go-live",
1400
+ ]
1401
+
1402
+ return "\n".join([f"• {milestone}" for milestone in milestones])
1403
+
1404
+
1405
+ def _build_validation_header(
1406
+ conversion_type: str, summary: dict[str, int]
1407
+ ) -> list[str]:
1408
+ """Build the header section of validation results."""
1409
+ return [
1410
+ f"# Validation Results for {conversion_type} Conversion",
1411
+ "",
1412
+ "## Summary",
1413
+ f"• Errors: {summary['errors']}",
1414
+ f"• Warnings: {summary['warnings']}",
1415
+ f"• Info: {summary['info']}",
1416
+ "",
1417
+ ]
1418
+
1419
+
1420
+ def _group_results_by_level(
1421
+ results: list[ValidationResult],
1422
+ ) -> tuple[list[ValidationResult], list[ValidationResult], list[ValidationResult]]:
1423
+ """Group validation results by severity level."""
1424
+ errors = [r for r in results if r.level == ValidationLevel.ERROR]
1425
+ warnings = [r for r in results if r.level == ValidationLevel.WARNING]
1426
+ infos = [r for r in results if r.level == ValidationLevel.INFO]
1427
+ return errors, warnings, infos
1428
+
1429
+
1430
+ def _format_result_section(
1431
+ title: str, icon: str, results: list[ValidationResult]
1432
+ ) -> list[str]:
1433
+ """Format a single validation results section."""
1434
+ if not results:
1435
+ return []
1436
+
1437
+ lines = [f"## {icon} {title}", ""]
1438
+ for result in results:
1439
+ lines.append(str(result))
1440
+ lines.append("")
1441
+
1442
+ return lines
1443
+
1444
+
1445
+ def _format_validation_results_text(
1446
+ conversion_type: str, results: list[ValidationResult], summary: dict[str, int]
1447
+ ) -> str:
1448
+ """
1449
+ Format validation results as text.
1450
+
1451
+ Args:
1452
+ conversion_type: Type of conversion.
1453
+ results: List of validation results.
1454
+ summary: Summary of validation results.
1455
+
1456
+ Returns:
1457
+ Formatted text output.
1458
+
1459
+ """
1460
+ if not results:
1461
+ return f"""# Validation Results for {conversion_type} Conversion
1462
+
1463
+ ✅ All validation checks passed! No issues found.
1464
+ """
1465
+
1466
+ output_lines = _build_validation_header(conversion_type, summary)
1467
+ errors, warnings, infos = _group_results_by_level(results)
1468
+
1469
+ output_lines.extend(_format_result_section("❌ Errors", "", errors))
1470
+ output_lines.extend(_format_result_section("⚠️ Warnings", "", warnings))
1471
+ output_lines.extend(_format_result_section("ℹ️ Information", "", infos))
1472
+
1473
+ return "\n".join(output_lines)
1474
+
1475
+
1476
+ def _format_validation_results_summary(
1477
+ conversion_type: str, summary: dict[str, int]
1478
+ ) -> str:
1479
+ """
1480
+ Format validation results as summary.
1481
+
1482
+ Args:
1483
+ conversion_type: Type of conversion.
1484
+ summary: Summary of validation results.
1485
+
1486
+ Returns:
1487
+ Formatted summary output.
1488
+
1489
+ """
1490
+ return f"""# Validation Summary
1491
+
1492
+ ✓ Conversion Type: {conversion_type}
1493
+ • Errors: {summary["errors"]}
1494
+ • Warnings: {summary["warnings"]}
1495
+ • Info: {summary["info"]}
1496
+
1497
+ {"✅ No critical issues found!" if summary["errors"] == 0 else "❌ Critical issues found - review errors"}
1498
+ """