mcp-souschef 2.0.1__py3-none-any.whl → 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/assessment.py ADDED
@@ -0,0 +1,1230 @@
1
+ """
2
+ Assessment and migration planning module for Chef to Ansible migrations.
3
+
4
+ This module provides tools for analyzing Chef cookbook migration complexity,
5
+ generating migration plans, analyzing dependencies, and validating conversions.
6
+ """
7
+
8
+ import json
9
+ import re
10
+ from typing import Any
11
+
12
+ from souschef.core import ERROR_PREFIX, METADATA_FILENAME, _normalize_path, _safe_join
13
+ from souschef.core.validation import (
14
+ ValidationEngine,
15
+ ValidationLevel,
16
+ ValidationResult,
17
+ )
18
+
19
+
20
+ def assess_chef_migration_complexity(
21
+ cookbook_paths: str,
22
+ migration_scope: str = "full",
23
+ target_platform: str = "ansible_awx",
24
+ ) -> str:
25
+ """
26
+ Assess the complexity of migrating Chef cookbooks to Ansible with detailed analysis.
27
+
28
+ Args:
29
+ cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
30
+ migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
31
+ target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
32
+
33
+ Returns:
34
+ Comprehensive migration complexity assessment with recommendations
35
+
36
+ """
37
+ try:
38
+ # Parse cookbook paths
39
+ paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
40
+
41
+ # Assess each cookbook
42
+ cookbook_assessments = []
43
+ overall_metrics = {
44
+ "total_cookbooks": 0,
45
+ "total_recipes": 0,
46
+ "total_resources": 0,
47
+ "complexity_score": 0,
48
+ "estimated_effort_days": 0,
49
+ }
50
+
51
+ for cookbook_path in paths:
52
+ if cookbook_path.exists():
53
+ # deepcode ignore PT: path normalized via _normalize_path
54
+ assessment = _assess_single_cookbook(cookbook_path)
55
+ cookbook_assessments.append(assessment)
56
+
57
+ # Aggregate metrics
58
+ overall_metrics["total_cookbooks"] += 1
59
+ overall_metrics["total_recipes"] += assessment["metrics"][
60
+ "recipe_count"
61
+ ]
62
+ overall_metrics["total_resources"] += assessment["metrics"][
63
+ "resource_count"
64
+ ]
65
+ overall_metrics["complexity_score"] += assessment["complexity_score"]
66
+ overall_metrics["estimated_effort_days"] += assessment[
67
+ "estimated_effort_days"
68
+ ]
69
+
70
+ # Calculate averages
71
+ if cookbook_assessments:
72
+ overall_metrics["avg_complexity"] = int(
73
+ overall_metrics["complexity_score"] / len(cookbook_assessments)
74
+ )
75
+
76
+ # Generate migration recommendations
77
+ recommendations = _generate_migration_recommendations_from_assessment(
78
+ cookbook_assessments, overall_metrics, target_platform
79
+ )
80
+
81
+ # Create migration roadmap
82
+ roadmap = _create_migration_roadmap(cookbook_assessments)
83
+
84
+ return f"""# Chef to Ansible Migration Assessment
85
+ # Scope: {migration_scope}
86
+ # Target Platform: {target_platform}
87
+
88
+ ## Overall Migration Metrics:
89
+ {_format_overall_metrics(overall_metrics)}
90
+
91
+ ## Cookbook Assessments:
92
+ {_format_cookbook_assessments(cookbook_assessments)}
93
+
94
+ ## Migration Complexity Analysis:
95
+ {_format_complexity_analysis(cookbook_assessments)}
96
+
97
+ ## Migration Recommendations:
98
+ {recommendations}
99
+
100
+ ## Migration Roadmap:
101
+ {roadmap}
102
+
103
+ ## Risk Assessment:
104
+ {_assess_migration_risks(cookbook_assessments, target_platform)}
105
+
106
+ ## Resource Requirements:
107
+ {_estimate_resource_requirements(overall_metrics, target_platform)}
108
+ """
109
+ except Exception as e:
110
+ return f"Error assessing migration complexity: {e}"
111
+
112
+
113
+ def generate_migration_plan(
114
+ cookbook_paths: str, migration_strategy: str = "phased", timeline_weeks: int = 12
115
+ ) -> str:
116
+ """
117
+ Generate a detailed migration plan from Chef to Ansible with timeline and milestones.
118
+
119
+ Args:
120
+ cookbook_paths: Comma-separated paths to Chef cookbooks
121
+ migration_strategy: Migration approach (big_bang, phased, parallel)
122
+ timeline_weeks: Target timeline in weeks
123
+
124
+ Returns:
125
+ Detailed migration plan with phases, milestones, and deliverables
126
+
127
+ """
128
+ try:
129
+ # Parse and assess cookbooks
130
+ paths = [_normalize_path(path.strip()) for path in cookbook_paths.split(",")]
131
+ cookbook_assessments = []
132
+
133
+ for cookbook_path in paths:
134
+ if cookbook_path.exists():
135
+ # deepcode ignore PT: path normalized via _normalize_path
136
+ assessment = _assess_single_cookbook(cookbook_path)
137
+ cookbook_assessments.append(assessment)
138
+
139
+ # Generate migration plan based on strategy
140
+ migration_plan = _generate_detailed_migration_plan(
141
+ cookbook_assessments, migration_strategy, timeline_weeks
142
+ )
143
+
144
+ return f"""# Chef to Ansible Migration Plan
145
+ # Strategy: {migration_strategy}
146
+ # Timeline: {timeline_weeks} weeks
147
+ # Cookbooks: {len(cookbook_assessments)}
148
+
149
+ ## Executive Summary:
150
+ {migration_plan["executive_summary"]}
151
+
152
+ ## Migration Phases:
153
+ {migration_plan["phases"]}
154
+
155
+ ## Timeline and Milestones:
156
+ {migration_plan["timeline"]}
157
+
158
+ ## Team Requirements:
159
+ {migration_plan["team_requirements"]}
160
+
161
+ ## Prerequisites and Dependencies:
162
+ {migration_plan["prerequisites"]}
163
+
164
+ ## Testing Strategy:
165
+ {migration_plan["testing_strategy"]}
166
+
167
+ ## Risk Mitigation:
168
+ {migration_plan["risk_mitigation"]}
169
+
170
+ ## Success Criteria:
171
+ {migration_plan["success_criteria"]}
172
+
173
+ ## Post-Migration Tasks:
174
+ {migration_plan["post_migration"]}
175
+ """
176
+ except Exception as e:
177
+ return f"Error generating migration plan: {e}"
178
+
179
+
180
+ def analyze_cookbook_dependencies(
181
+ cookbook_path: str, dependency_depth: str = "direct"
182
+ ) -> str:
183
+ """
184
+ Analyze cookbook dependencies and identify migration order requirements.
185
+
186
+ Args:
187
+ cookbook_path: Path to Chef cookbook or cookbooks directory
188
+ dependency_depth: Analysis depth (direct, transitive, full)
189
+
190
+ Returns:
191
+ Dependency analysis with migration order recommendations
192
+
193
+ """
194
+ try:
195
+ cookbook_path_obj = _normalize_path(cookbook_path)
196
+ if not cookbook_path_obj.exists():
197
+ return f"{ERROR_PREFIX} Cookbook path not found: {cookbook_path}"
198
+
199
+ # Analyze dependencies
200
+ dependency_analysis = _analyze_cookbook_dependencies_detailed(cookbook_path_obj)
201
+
202
+ # Determine migration order
203
+ migration_order = _determine_migration_order(dependency_analysis)
204
+
205
+ # Identify circular dependencies
206
+ circular_deps = _identify_circular_dependencies(dependency_analysis)
207
+
208
+ return f"""# Cookbook Dependency Analysis
209
+ # Cookbook: {cookbook_path_obj.name}
210
+ # Analysis Depth: {dependency_depth}
211
+
212
+ ## Dependency Overview:
213
+ {_format_dependency_overview(dependency_analysis)}
214
+
215
+ ## Dependency Graph:
216
+ {_format_dependency_graph(dependency_analysis)}
217
+
218
+ ## Migration Order Recommendations:
219
+ {_format_migration_order(migration_order)}
220
+
221
+ ## Circular Dependencies:
222
+ {_format_circular_dependencies(circular_deps)}
223
+
224
+ ## External Dependencies:
225
+ {_format_external_dependencies(dependency_analysis)}
226
+
227
+ ## Community Cookbooks:
228
+ {_format_community_cookbooks(dependency_analysis)}
229
+
230
+ ## Migration Impact Analysis:
231
+ {_analyze_dependency_migration_impact(dependency_analysis)}
232
+ """
233
+ except Exception as e:
234
+ return f"Error analyzing cookbook dependencies: {e}"
235
+
236
+
237
+ def generate_migration_report(
238
+ _assessment_results: str,
239
+ report_format: str = "executive",
240
+ include_technical_details: str = "yes",
241
+ ) -> str:
242
+ """
243
+ Generate comprehensive migration report from assessment results.
244
+
245
+ Args:
246
+ _assessment_results: JSON string or summary of assessment results (reserved for future use)
247
+ report_format: Report format (executive, technical, combined)
248
+ include_technical_details: Include detailed technical analysis (yes/no)
249
+
250
+ Returns:
251
+ Formatted migration report for stakeholders
252
+
253
+ """
254
+ try:
255
+ from datetime import datetime
256
+
257
+ # Generate report based on format
258
+ report = _generate_comprehensive_migration_report(
259
+ include_technical_details == "yes"
260
+ )
261
+
262
+ current_date = datetime.now().strftime("%Y-%m-%d")
263
+
264
+ return f"""# Chef to Ansible Migration Report
265
+ **Generated:** {current_date}
266
+ **Report Type:** {report_format.title()}
267
+ **Technical Details:** {"Included" if include_technical_details == "yes" else "Summary Only"}
268
+
269
+ ## Executive Summary
270
+ {report["executive_summary"]}
271
+
272
+ ## Migration Scope and Objectives
273
+ {report["scope_objectives"]}
274
+
275
+ ## Current State Analysis
276
+ {report["current_state"]}
277
+
278
+ ## Target State Architecture
279
+ {report["target_state"]}
280
+
281
+ ## Migration Strategy and Approach
282
+ {report["strategy"]}
283
+
284
+ ## Cost-Benefit Analysis
285
+ {report["cost_benefit"]}
286
+
287
+ ## Timeline and Resource Requirements
288
+ {report["timeline_resources"]}
289
+
290
+ ## Risk Assessment and Mitigation
291
+ {report["risk_assessment"]}
292
+
293
+ {"## Technical Implementation Details" if include_technical_details == "yes" else ""}
294
+ {report.get("technical_details", "") if include_technical_details == "yes" else ""}
295
+
296
+ ## Recommendations and Next Steps
297
+ {report["recommendations"]}
298
+
299
+ ## Appendices
300
+ {report["appendices"]}
301
+ """
302
+ except Exception as e:
303
+ return f"Error generating migration report: {e}"
304
+
305
+
306
+ def validate_conversion(
307
+ conversion_type: str,
308
+ result_content: str,
309
+ output_format: str = "text",
310
+ ) -> str:
311
+ """
312
+ Validate a Chef-to-Ansible conversion for correctness, best practices, and quality.
313
+
314
+ This validation framework checks conversions across multiple dimensions:
315
+ - Syntax: YAML/Jinja2/Python syntax validation
316
+ - Semantic: Logic equivalence, variable usage, dependencies
317
+ - Best practices: Naming conventions, idempotency, task organization
318
+ - Security: Privilege escalation, sensitive data handling
319
+ - Performance: Efficiency recommendations
320
+
321
+ Args:
322
+ conversion_type: Type of conversion to validate
323
+ ('resource', 'recipe', 'template', 'inspec')
324
+ result_content: Converted Ansible code or configuration
325
+ output_format: Output format ('text', 'json', 'summary')
326
+
327
+ Returns:
328
+ Validation report with errors, warnings, and suggestions
329
+
330
+ """
331
+ try:
332
+ engine = ValidationEngine()
333
+ results = engine.validate_conversion(conversion_type, result_content)
334
+ summary = engine.get_summary()
335
+
336
+ if output_format == "json":
337
+ return json.dumps(
338
+ {
339
+ "summary": summary,
340
+ "results": [result.to_dict() for result in results],
341
+ },
342
+ indent=2,
343
+ )
344
+ elif output_format == "summary":
345
+ return _format_validation_results_summary(conversion_type, summary)
346
+ else:
347
+ return _format_validation_results_text(conversion_type, results, summary)
348
+
349
+ except Exception as e:
350
+ return f"Error during validation: {e}"
351
+
352
+
353
+ # Private helper functions for assessment
354
+
355
+
356
+ def _assess_single_cookbook(cookbook_path) -> dict:
357
+ """Assess complexity of a single cookbook."""
358
+ cookbook = _normalize_path(cookbook_path)
359
+ assessment: dict[str, Any] = {
360
+ "cookbook_name": cookbook.name,
361
+ "cookbook_path": str(cookbook),
362
+ "metrics": {},
363
+ "complexity_score": 0,
364
+ "estimated_effort_days": 0,
365
+ "challenges": [],
366
+ "migration_priority": "medium",
367
+ "dependencies": [],
368
+ }
369
+
370
+ # Count recipes and resources
371
+ recipes_dir = _safe_join(cookbook, "recipes")
372
+ recipe_count = len(list(recipes_dir.glob("*.rb"))) if recipes_dir.exists() else 0
373
+
374
+ # Analyze recipe complexity
375
+ resource_count = 0
376
+ custom_resources = 0
377
+ ruby_blocks = 0
378
+
379
+ if recipes_dir.exists():
380
+ for recipe_file in recipes_dir.glob("*.rb"):
381
+ with recipe_file.open("r", encoding="utf-8", errors="ignore") as f:
382
+ content = f.read()
383
+ # Count Chef resources
384
+
385
+ resources = len(
386
+ re.findall(r'\w{1,100}\s+[\'"]([^\'"]{0,200})[\'"]\s+do', content)
387
+ )
388
+ ruby_blocks += len(
389
+ re.findall(r"ruby_block|execute|bash", content, re.IGNORECASE)
390
+ )
391
+ custom_resources += len(
392
+ re.findall(
393
+ r"custom_resource|provides|use_inline_resources", content
394
+ )
395
+ )
396
+ resource_count += resources
397
+
398
+ assessment["metrics"] = {
399
+ "recipe_count": recipe_count,
400
+ "resource_count": resource_count,
401
+ "custom_resources": custom_resources,
402
+ "ruby_blocks": ruby_blocks,
403
+ "templates": len(list(_safe_join(cookbook, "templates").glob("*")))
404
+ if _safe_join(cookbook, "templates").exists()
405
+ else 0,
406
+ "files": len(list(_safe_join(cookbook, "files").glob("*")))
407
+ if _safe_join(cookbook, "files").exists()
408
+ else 0,
409
+ }
410
+
411
+ # Calculate complexity score (0-100)
412
+ complexity_factors = {
413
+ "recipe_count": min(recipe_count * 2, 20),
414
+ "resource_density": min(resource_count / max(recipe_count, 1) * 5, 25),
415
+ "custom_resources": custom_resources * 10,
416
+ "ruby_blocks": ruby_blocks * 5,
417
+ "templates": min(assessment["metrics"]["templates"] * 2, 15),
418
+ "files": min(assessment["metrics"]["files"] * 1, 10),
419
+ }
420
+
421
+ assessment["complexity_score"] = sum(complexity_factors.values())
422
+
423
+ # Estimate effort (person-days)
424
+ base_effort = recipe_count * 0.5 # 0.5 days per recipe
425
+ complexity_multiplier = 1 + (assessment["complexity_score"] / 100)
426
+ assessment["estimated_effort_days"] = round(base_effort * complexity_multiplier, 1)
427
+
428
+ # Identify challenges
429
+ if custom_resources > 0:
430
+ assessment["challenges"].append(
431
+ f"{custom_resources} custom resources requiring manual conversion"
432
+ )
433
+ if ruby_blocks > 5:
434
+ assessment["challenges"].append(
435
+ f"{ruby_blocks} Ruby blocks needing shell script conversion"
436
+ )
437
+ if assessment["complexity_score"] > 70:
438
+ assessment["challenges"].append(
439
+ "High complexity cookbook requiring expert review"
440
+ )
441
+
442
+ # Set migration priority
443
+ if assessment["complexity_score"] < 30:
444
+ assessment["migration_priority"] = "low"
445
+ elif assessment["complexity_score"] > 70:
446
+ assessment["migration_priority"] = "high"
447
+
448
+ return assessment
449
+
450
+
451
+ def _format_overall_metrics(metrics: dict) -> str:
452
+ """Format overall migration metrics."""
453
+ return f"""• Total Cookbooks: {metrics["total_cookbooks"]}
454
+ • Total Recipes: {metrics["total_recipes"]}
455
+ • Total Resources: {metrics["total_resources"]}
456
+ • Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
457
+ • Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
458
+ • Estimated Duration: {int(metrics["estimated_effort_days"] / 5)}-{int(metrics["estimated_effort_days"] / 3)} weeks"""
459
+
460
+
461
+ def _format_cookbook_assessments(assessments: list) -> str:
462
+ """Format individual cookbook assessments."""
463
+ if not assessments:
464
+ return "No cookbooks assessed."
465
+
466
+ def _get_priority_icon(priority: str) -> str:
467
+ """Get priority icon based on migration priority level."""
468
+ if priority == "high":
469
+ return "🔴"
470
+ elif priority == "medium":
471
+ return "🟡"
472
+ else:
473
+ return "🟢"
474
+
475
+ formatted = []
476
+ for assessment in assessments:
477
+ priority_icon = _get_priority_icon(assessment["migration_priority"])
478
+ formatted.append(f"""### {assessment["cookbook_name"]} {priority_icon}
479
+ • Complexity Score: {assessment["complexity_score"]:.1f}/100
480
+ • Estimated Effort: {assessment["estimated_effort_days"]} days
481
+ • Recipes: {assessment["metrics"]["recipe_count"]}
482
+ • Resources: {assessment["metrics"]["resource_count"]}
483
+ • Custom Resources: {assessment["metrics"]["custom_resources"]}
484
+ • Challenges: {len(assessment["challenges"])}""")
485
+
486
+ return "\n\n".join(formatted)
487
+
488
+
489
+ def _format_complexity_analysis(assessments: list) -> str:
490
+ """Format complexity analysis."""
491
+ if not assessments:
492
+ return "No complexity analysis available."
493
+
494
+ high_complexity = [a for a in assessments if a["complexity_score"] > 70]
495
+ medium_complexity = [a for a in assessments if 30 <= a["complexity_score"] <= 70]
496
+ low_complexity = [a for a in assessments if a["complexity_score"] < 30]
497
+
498
+ return f"""• High Complexity (>70): {len(high_complexity)} cookbooks
499
+ • Medium Complexity (30-70): {len(medium_complexity)} cookbooks
500
+ • Low Complexity (<30): {len(low_complexity)} cookbooks
501
+
502
+ **Top Migration Challenges:**
503
+ {_identify_top_challenges(assessments)}"""
504
+
505
+
506
+ def _identify_top_challenges(assessments: list) -> str:
507
+ """Identify the most common migration challenges."""
508
+ challenge_counts: dict[str, int] = {}
509
+ for assessment in assessments:
510
+ for challenge in assessment["challenges"]:
511
+ challenge_counts[challenge] = challenge_counts.get(challenge, 0) + 1
512
+
513
+ top_challenges = sorted(challenge_counts.items(), key=lambda x: x[1], reverse=True)[
514
+ :5
515
+ ]
516
+
517
+ formatted = []
518
+ for challenge, count in top_challenges:
519
+ formatted.append(f" - {challenge} ({count} cookbooks)")
520
+
521
+ return (
522
+ "\n".join(formatted)
523
+ if formatted
524
+ else " - No significant challenges identified"
525
+ )
526
+
527
+
528
+ def _generate_migration_recommendations_from_assessment(
529
+ assessments: list, metrics: dict, target_platform: str
530
+ ) -> str:
531
+ """Generate migration recommendations based on assessment."""
532
+ recommendations = []
533
+
534
+ # Platform-specific recommendations
535
+ if target_platform == "ansible_awx":
536
+ recommendations.append(
537
+ "• Implement AWX/AAP integration for job templates and workflows"
538
+ )
539
+ recommendations.append(
540
+ "• Set up dynamic inventory sources for Chef server integration"
541
+ )
542
+
543
+ # Complexity-based recommendations
544
+ avg_complexity = metrics.get("avg_complexity", 0)
545
+ if avg_complexity > 60:
546
+ recommendations.append(
547
+ "• Consider phased migration approach due to high complexity"
548
+ )
549
+ recommendations.append(
550
+ "• Allocate additional time for custom resource conversion"
551
+ )
552
+ recommendations.append("• Plan for comprehensive testing and validation")
553
+ else:
554
+ recommendations.append("• Standard migration timeline should be sufficient")
555
+ recommendations.append("• Consider big-bang approach for faster delivery")
556
+
557
+ # Effort-based recommendations
558
+ total_effort = metrics["estimated_effort_days"]
559
+ if total_effort > 30:
560
+ recommendations.append("• Establish dedicated migration team")
561
+ recommendations.append("• Consider parallel migration tracks")
562
+ else:
563
+ recommendations.append("• Single developer can handle migration with oversight")
564
+
565
+ # Custom resource recommendations
566
+ custom_resource_cookbooks = [
567
+ a for a in assessments if a["metrics"]["custom_resources"] > 0
568
+ ]
569
+ if custom_resource_cookbooks:
570
+ recommendations.append(
571
+ f"• {len(custom_resource_cookbooks)} cookbooks need custom resource conversion"
572
+ )
573
+ recommendations.append(
574
+ "• Prioritize custom resource analysis and conversion strategy"
575
+ )
576
+
577
+ return "\n".join(recommendations)
578
+
579
+
580
+ def _create_migration_roadmap(assessments: list) -> str:
581
+ """Create a migration roadmap based on assessments."""
582
+ # Sort cookbooks by complexity (low to high for easier wins first)
583
+ sorted_cookbooks = sorted(assessments, key=lambda x: x["complexity_score"])
584
+
585
+ phases = {
586
+ "Phase 1 - Foundation (Weeks 1-2)": [
587
+ "Set up Ansible/AWX environment",
588
+ "Establish CI/CD pipelines",
589
+ "Create testing framework",
590
+ "Train team on Ansible best practices",
591
+ ],
592
+ "Phase 2 - Low Complexity Migration (Weeks 3-5)": [],
593
+ "Phase 3 - Medium Complexity Migration (Weeks 6-9)": [],
594
+ "Phase 4 - High Complexity Migration (Weeks 10-12)": [],
595
+ "Phase 5 - Validation and Cleanup (Weeks 13-14)": [
596
+ "Comprehensive testing",
597
+ "Performance validation",
598
+ "Documentation updates",
599
+ "Team training and handover",
600
+ ],
601
+ }
602
+
603
+ # Distribute cookbooks across phases
604
+ for cookbook in sorted_cookbooks:
605
+ if cookbook["complexity_score"] < 30:
606
+ phases["Phase 2 - Low Complexity Migration (Weeks 3-5)"].append(
607
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
608
+ )
609
+ elif cookbook["complexity_score"] < 70:
610
+ phases["Phase 3 - Medium Complexity Migration (Weeks 6-9)"].append(
611
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
612
+ )
613
+ else:
614
+ phases["Phase 4 - High Complexity Migration (Weeks 10-12)"].append(
615
+ f"Migrate {cookbook['cookbook_name']} ({cookbook['estimated_effort_days']} days)"
616
+ )
617
+
618
+ # Format roadmap
619
+ roadmap_formatted = []
620
+ for phase, tasks in phases.items():
621
+ roadmap_formatted.append(f"\n### {phase}")
622
+ for task in tasks:
623
+ roadmap_formatted.append(f" - {task}")
624
+
625
+ return "\n".join(roadmap_formatted)
626
+
627
+
628
+ def _assess_migration_risks(assessments: list, target_platform: str) -> str:
629
+ """Assess migration risks."""
630
+ risks = []
631
+
632
+ # Technical risks
633
+ high_complexity_count = len([a for a in assessments if a["complexity_score"] > 70])
634
+ if high_complexity_count > 0:
635
+ risks.append(
636
+ f"🔴 HIGH: {high_complexity_count} high-complexity cookbooks may cause delays"
637
+ )
638
+
639
+ custom_resource_count = sum(a["metrics"]["custom_resources"] for a in assessments)
640
+ if custom_resource_count > 0:
641
+ risks.append(
642
+ f"🟡 MEDIUM: {custom_resource_count} custom resources need manual conversion"
643
+ )
644
+
645
+ ruby_block_count = sum(a["metrics"]["ruby_blocks"] for a in assessments)
646
+ if ruby_block_count > 10:
647
+ risks.append(
648
+ f"🟡 MEDIUM: {ruby_block_count} Ruby blocks require shell script conversion"
649
+ )
650
+
651
+ # Timeline risks
652
+ total_effort = sum(a["estimated_effort_days"] for a in assessments)
653
+ if total_effort > 50:
654
+ risks.append("🟡 MEDIUM: Large migration scope may impact timeline")
655
+
656
+ # Platform risks
657
+ if target_platform == "ansible_awx":
658
+ risks.append("🟢 LOW: AWX integration well-supported with existing tools")
659
+
660
+ if not risks:
661
+ risks.append("🟢 LOW: No significant migration risks identified")
662
+
663
+ return "\n".join(risks)
664
+
665
+
666
+ def _estimate_resource_requirements(metrics: dict, target_platform: str) -> str:
667
+ """Estimate resource requirements for migration."""
668
+ total_effort = metrics["estimated_effort_days"]
669
+
670
+ # Team size recommendations
671
+ if total_effort < 20:
672
+ team_size = "1 developer + 1 reviewer"
673
+ timeline = "4-6 weeks"
674
+ elif total_effort < 50:
675
+ team_size = "2 developers + 1 senior reviewer"
676
+ timeline = "6-10 weeks"
677
+ else:
678
+ team_size = "3-4 developers + 1 tech lead + 1 architect"
679
+ timeline = "10-16 weeks"
680
+
681
+ return f"""• **Team Size:** {team_size}
682
+ • **Estimated Timeline:** {timeline}
683
+ • **Total Effort:** {total_effort:.1f} person-days
684
+ • **Infrastructure:** {target_platform.replace("_", "/").upper()} environment
685
+ • **Testing:** Dedicated test environment recommended
686
+ • **Training:** 2-3 days Ansible/AWX training for team"""
687
+
688
+
689
+ def _analyze_cookbook_dependencies_detailed(cookbook_path) -> dict:
690
+ """Analyze cookbook dependencies in detail."""
691
+ analysis = {
692
+ "cookbook_name": cookbook_path.name,
693
+ "direct_dependencies": [],
694
+ "transitive_dependencies": [],
695
+ "external_dependencies": [],
696
+ "community_cookbooks": [],
697
+ "circular_dependencies": [],
698
+ }
699
+
700
+ # Read metadata.rb for dependencies
701
+ metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
702
+ if metadata_file.exists():
703
+ with metadata_file.open("r", encoding="utf-8", errors="ignore") as f:
704
+ content = f.read()
705
+
706
+ # Parse dependencies
707
+
708
+ depends_matches = re.findall(r'depends\s+[\'"]([^\'"]+)[\'"]', content)
709
+ analysis["direct_dependencies"] = depends_matches
710
+
711
+ # Read Berksfile for additional dependencies
712
+ berksfile = _safe_join(cookbook_path, "Berksfile")
713
+ if berksfile.exists():
714
+ with berksfile.open("r", encoding="utf-8", errors="ignore") as f:
715
+ content = f.read()
716
+
717
+ cookbook_matches = re.findall(r'cookbook\s+[\'"]([^\'"]+)[\'"]', content)
718
+ analysis["external_dependencies"].extend(cookbook_matches)
719
+
720
+ # Identify community cookbooks (common ones)
721
+ community_cookbook_patterns = [
722
+ "apache2",
723
+ "nginx",
724
+ "mysql",
725
+ "postgresql",
726
+ "java",
727
+ "python",
728
+ "nodejs",
729
+ "docker",
730
+ "build-essential",
731
+ "git",
732
+ "ntp",
733
+ "sudo",
734
+ "users",
735
+ ]
736
+
737
+ all_deps = analysis["direct_dependencies"] + analysis["external_dependencies"]
738
+ for dep in all_deps:
739
+ if any(pattern in dep.lower() for pattern in community_cookbook_patterns):
740
+ analysis["community_cookbooks"].append(dep)
741
+
742
+ return analysis
743
+
744
+
745
+ def _determine_migration_order(dependency_analysis: dict) -> list:
746
+ """Determine optimal migration order based on dependencies."""
747
+ # For now, return a simple order based on dependency count
748
+ # In a full implementation, this would use topological sorting
749
+
750
+ order = []
751
+
752
+ # Leaf nodes first (no dependencies)
753
+ if not dependency_analysis["direct_dependencies"]:
754
+ order.append(
755
+ {
756
+ "cookbook": dependency_analysis["cookbook_name"],
757
+ "priority": 1,
758
+ "reason": "No dependencies - can be migrated first",
759
+ }
760
+ )
761
+ else:
762
+ # Has dependencies - migrate after dependencies
763
+ dep_count = len(dependency_analysis["direct_dependencies"])
764
+ priority = min(dep_count + 1, 5) # Cap at priority 5
765
+ order.append(
766
+ {
767
+ "cookbook": dependency_analysis["cookbook_name"],
768
+ "priority": priority,
769
+ "reason": f"Has {dep_count} dependencies - migrate after dependencies",
770
+ }
771
+ )
772
+
773
+ return order
774
+
775
+
776
+ def _identify_circular_dependencies(dependency_analysis: dict) -> list:
777
+ """Identify circular dependencies (simplified)."""
778
+ # This is a simplified implementation
779
+ # A full implementation would build a dependency graph and detect cycles
780
+
781
+ circular = []
782
+ cookbook_name = dependency_analysis["cookbook_name"]
783
+
784
+ # Check if any dependency might depend back on this cookbook
785
+ for dep in dependency_analysis["direct_dependencies"]:
786
+ if cookbook_name.lower() in dep.lower(): # Simple heuristic
787
+ circular.append(
788
+ {"cookbook1": cookbook_name, "cookbook2": dep, "type": "potential"}
789
+ )
790
+
791
+ return circular
792
+
793
+
794
+ def _generate_detailed_migration_plan(
795
+ assessments: list, strategy: str, timeline_weeks: int
796
+ ) -> dict:
797
+ """Generate detailed migration plan."""
798
+ plan = {
799
+ "executive_summary": "",
800
+ "phases": "",
801
+ "timeline": "",
802
+ "team_requirements": "",
803
+ "prerequisites": "",
804
+ "testing_strategy": "",
805
+ "risk_mitigation": "",
806
+ "success_criteria": "",
807
+ "post_migration": "",
808
+ }
809
+
810
+ total_cookbooks = len(assessments)
811
+ total_effort = sum(a["estimated_effort_days"] for a in assessments)
812
+
813
+ plan["executive_summary"] = (
814
+ f"""This migration plan covers {total_cookbooks} Chef cookbooks with an estimated effort of {total_effort:.1f} person-days over {timeline_weeks} weeks using a {strategy} approach. The plan balances speed of delivery with risk mitigation, focusing on early wins to build momentum while carefully handling complex cookbooks."""
815
+ )
816
+
817
+ # Generate phases based on strategy
818
+ if strategy == "phased":
819
+ plan["phases"] = _generate_phased_migration_phases(assessments, timeline_weeks)
820
+ elif strategy == "big_bang":
821
+ plan["phases"] = _generate_big_bang_phases(assessments, timeline_weeks)
822
+ else: # parallel
823
+ plan["phases"] = _generate_parallel_migration_phases(timeline_weeks)
824
+
825
+ plan["timeline"] = _generate_migration_timeline(strategy, timeline_weeks)
826
+
827
+ plan["team_requirements"] = f"""**Core Team:**
828
+ • 1 Migration Lead (Ansible expert)
829
+ • {min(3, max(1, total_effort // 10))} Ansible Developers
830
+ • 1 Chef SME (part-time consultation)
831
+ • 1 QA Engineer for testing
832
+ • 1 DevOps Engineer for infrastructure
833
+
834
+ **Skills Required:**
835
+ • Advanced Ansible/AWX experience
836
+ • Chef cookbook understanding
837
+ • Infrastructure as Code principles
838
+ • CI/CD pipeline experience"""
839
+ plan["prerequisites"] = """• AWX/AAP environment setup and configured
840
+ • Git repository structure established
841
+ • CI/CD pipelines created for Ansible playbooks
842
+ • Test environments provisioned
843
+ • Team training on Ansible best practices completed
844
+ • Chef cookbook inventory and documentation review
845
+ • Stakeholder alignment on migration approach"""
846
+ plan["testing_strategy"] = """**Testing Phases:**
847
+ 1. **Unit Testing:** Ansible syntax validation and linting
848
+ 2. **Integration Testing:** Playbook execution in test environments
849
+ 3. **Functional Testing:** End-to-end application functionality validation
850
+ 4. **Performance Testing:** Resource usage and execution time comparison
851
+ 5. **User Acceptance Testing:** Stakeholder validation of migrated functionality
852
+
853
+ **Testing Tools:**
854
+ • ansible-lint for syntax validation
855
+ • molecule for role testing
856
+ • testinfra for infrastructure testing
857
+ • Custom validation scripts for Chef parity"""
858
+ plan[
859
+ "success_criteria"
860
+ ] = """• All Chef cookbooks successfully converted to Ansible playbooks
861
+ • 100% functional parity between Chef and Ansible implementations
862
+ • No performance degradation in deployment times
863
+ • All automated tests passing
864
+ • Team trained and comfortable with new Ansible workflows
865
+ • Documentation complete and accessible
866
+ • Rollback procedures tested and documented"""
867
+ return plan
868
+
869
+
870
+ def _generate_comprehensive_migration_report(include_technical: bool) -> dict:
871
+ """Generate comprehensive migration report."""
872
+ report = {
873
+ "executive_summary": "",
874
+ "scope_objectives": "",
875
+ "current_state": "",
876
+ "target_state": "",
877
+ "strategy": "",
878
+ "cost_benefit": "",
879
+ "timeline_resources": "",
880
+ "risk_assessment": "",
881
+ "recommendations": "",
882
+ "appendices": "",
883
+ }
884
+
885
+ # Executive Summary
886
+ report[
887
+ "executive_summary"
888
+ ] = """This report outlines the migration strategy from Chef to Ansible/AWX, providing a comprehensive analysis of the current Chef infrastructure and a detailed roadmap for transition. The migration will modernize configuration management capabilities while reducing operational complexity and improving deployment automation.
889
+
890
+ **Key Findings:**
891
+ • Migration is technically feasible with moderate complexity
892
+ • Estimated 8-16 week timeline depending on approach
893
+ • Significant long-term cost savings and operational improvements
894
+ • Low-to-medium risk with proper planning and execution"""
895
+ # Scope and Objectives
896
+ report["scope_objectives"] = """**Migration Scope:**
897
+ • All production Chef cookbooks and recipes
898
+ • Chef server configurations and node management
899
+ • Existing deployment pipelines and automation
900
+ • Monitoring and compliance integrations
901
+
902
+ **Primary Objectives:**
903
+ • Modernize configuration management with Ansible/AWX
904
+ • Improve deployment reliability and speed
905
+ • Reduce operational overhead and complexity
906
+ • Enhance security and compliance capabilities
907
+ • Standardize on Red Hat ecosystem tools"""
908
+ # Current State Analysis
909
+ report["current_state"] = """**Current Chef Infrastructure:**
910
+ • Chef Server managing X nodes across multiple environments
911
+ • Y cookbooks covering infrastructure and application deployment
912
+ • Established CI/CD pipelines with Chef integration
913
+ • Monitoring and compliance reporting in place
914
+
915
+ **Pain Points Identified:**
916
+ • Complex Chef DSL requiring Ruby expertise
917
+ • Lengthy convergence times in large environments
918
+ • Limited workflow orchestration capabilities
919
+ • Dependency management challenges
920
+ • Scaling limitations with current architecture"""
921
+ # Target State Architecture
922
+ report["target_state"] = """**Target Ansible/AWX Architecture:**
923
+ • Red Hat Ansible Automation Platform (AWX/AAP)
924
+ • Git-based playbook and role management
925
+ • Dynamic inventory from multiple sources
926
+ • Integrated workflow templates and job scheduling
927
+ • Enhanced RBAC and audit capabilities
928
+
929
+ **Key Improvements:**
930
+ • YAML-based playbooks (easier to read/write)
931
+ • Faster execution with SSH-based architecture
932
+ • Rich workflow orchestration capabilities
933
+ • Better integration with CI/CD tools
934
+ • Enhanced scalability and performance"""
935
+ if include_technical:
936
+ report["technical_details"] = """## Technical Implementation Approach
937
+
938
+ ### Cookbook Conversion Strategy
939
+ • **Resource Mapping:** Direct mapping of Chef resources to Ansible modules
940
+ • **Variable Extraction:** Chef node attributes converted to Ansible variables
941
+ • **Template Conversion:** ERB templates converted to Jinja2 format
942
+ • **Custom Resources:** Manual conversion to Ansible roles/modules
943
+
944
+ ### Data Migration
945
+ • **Node Attributes:** Migrated to Ansible inventory variables
946
+ • **Data Bags:** Converted to Ansible Vault encrypted variables
947
+ • **Environments:** Mapped to inventory groups with variable precedence
948
+
949
+ ### Testing and Validation
950
+ • **Syntax Validation:** ansible-lint and yaml-lint integration
951
+ • **Functional Testing:** molecule framework for role testing
952
+ • **Integration Testing:** testinfra for infrastructure validation
953
+ • **Performance Testing:** Execution time and resource usage comparison"""
954
+ return report
955
+
956
+
957
+ def _format_dependency_overview(analysis: dict) -> str:
958
+ """Format dependency overview."""
959
+ return f"""• Direct Dependencies: {len(analysis["direct_dependencies"])}
960
+ • External Dependencies: {len(analysis["external_dependencies"])}
961
+ • Community Cookbooks: {len(analysis["community_cookbooks"])}
962
+ • Circular Dependencies: {len(analysis["circular_dependencies"])}"""
963
+
964
+
965
+ def _format_dependency_graph(analysis: dict) -> str:
966
+ """Format dependency graph (text representation)."""
967
+ graph = [f"{analysis['cookbook_name']} depends on:"]
968
+
969
+ for dep in analysis["direct_dependencies"]:
970
+ graph.append(f" ├── {dep}")
971
+
972
+ if analysis["external_dependencies"]:
973
+ graph.append("External dependencies:")
974
+ for dep in analysis["external_dependencies"]:
975
+ graph.append(f" ├── {dep}")
976
+
977
+ return "\n".join(graph) if len(graph) > 1 else "No dependencies found."
978
+
979
+
980
+ def _format_migration_order(order: list) -> str:
981
+ """Format migration order recommendations."""
982
+ if not order:
983
+ return "No order analysis available."
984
+
985
+ formatted = []
986
+ for item in sorted(order, key=lambda x: x["priority"]):
987
+ priority_text = f"Priority {item['priority']}"
988
+ formatted.append(f"• {item['cookbook']} - {priority_text}: {item['reason']}")
989
+
990
+ return "\n".join(formatted)
991
+
992
+
993
+ def _format_circular_dependencies(circular: list) -> str:
994
+ """Format circular dependencies."""
995
+ if not circular:
996
+ return "✅ No circular dependencies detected."
997
+
998
+ formatted = []
999
+ for circ in circular:
1000
+ formatted.append(
1001
+ f"⚠️ {circ['cookbook1']} ↔ {circ['cookbook2']} ({circ['type']})"
1002
+ )
1003
+
1004
+ return "\n".join(formatted)
1005
+
1006
+
1007
+ def _format_external_dependencies(analysis: dict) -> str:
1008
+ """Format external dependencies."""
1009
+ if not analysis["external_dependencies"]:
1010
+ return "No external dependencies."
1011
+
1012
+ return "\n".join([f"• {dep}" for dep in analysis["external_dependencies"]])
1013
+
1014
+
1015
+ def _format_community_cookbooks(analysis: dict) -> str:
1016
+ """Format community cookbooks."""
1017
+ if not analysis["community_cookbooks"]:
1018
+ return "No community cookbooks identified."
1019
+
1020
+ return "\n".join(
1021
+ [
1022
+ f"• {cb} (consider ansible-galaxy role)"
1023
+ for cb in analysis["community_cookbooks"]
1024
+ ]
1025
+ )
1026
+
1027
+
1028
+ def _analyze_dependency_migration_impact(analysis: dict) -> str:
1029
+ """Analyze migration impact of dependencies."""
1030
+ impacts = []
1031
+
1032
+ if analysis["community_cookbooks"]:
1033
+ impacts.append(
1034
+ f"• {len(analysis['community_cookbooks'])} community cookbooks can likely be replaced with Ansible Galaxy roles"
1035
+ )
1036
+
1037
+ if analysis["circular_dependencies"]:
1038
+ impacts.append(
1039
+ f"• {len(analysis['circular_dependencies'])} circular dependencies need resolution before migration"
1040
+ )
1041
+
1042
+ direct_count = len(analysis["direct_dependencies"])
1043
+ if direct_count > 5:
1044
+ impacts.append(
1045
+ f"• High dependency count ({direct_count}) suggests complex migration order requirements"
1046
+ )
1047
+
1048
+ if not impacts:
1049
+ impacts.append(
1050
+ "• Low dependency complexity - straightforward migration expected"
1051
+ )
1052
+
1053
+ return "\n".join(impacts)
1054
+
1055
+
1056
+ def _generate_phased_migration_phases(assessments: list, timeline_weeks: int) -> str:
1057
+ """Generate phased migration phases."""
1058
+ phases = []
1059
+
1060
+ # Sort by complexity
1061
+ sorted_assessments = sorted(assessments, key=lambda x: x["complexity_score"])
1062
+
1063
+ phase1 = [a for a in sorted_assessments if a["complexity_score"] < 30]
1064
+ phase2 = [a for a in sorted_assessments if 30 <= a["complexity_score"] < 70]
1065
+ phase3 = [a for a in sorted_assessments if a["complexity_score"] >= 70]
1066
+
1067
+ weeks_per_phase = timeline_weeks // 3
1068
+
1069
+ phases.append(
1070
+ f"**Phase 1 (Weeks 1-{weeks_per_phase}):** Foundation & Low Complexity"
1071
+ )
1072
+ phases.append(f" • {len(phase1)} low-complexity cookbooks")
1073
+ phases.append(" • Setup AWX environment and CI/CD")
1074
+
1075
+ phases.append(
1076
+ f"\n**Phase 2 (Weeks {weeks_per_phase + 1}-{weeks_per_phase * 2}):** Medium Complexity"
1077
+ )
1078
+ phases.append(f" • {len(phase2)} medium-complexity cookbooks")
1079
+ phases.append(" • Parallel conversion and testing")
1080
+
1081
+ phases.append(
1082
+ f"\n**Phase 3 (Weeks {weeks_per_phase * 2 + 1}-{timeline_weeks}):** High Complexity & Finalization"
1083
+ )
1084
+ phases.append(f" • {len(phase3)} high-complexity cookbooks")
1085
+ phases.append(" • Final testing and deployment")
1086
+
1087
+ return "\n".join(phases)
1088
+
1089
+
1090
+ def _generate_big_bang_phases(assessments: list, timeline_weeks: int) -> str:
1091
+ """Generate big bang migration phases."""
1092
+ return f"""**Phase 1 (Weeks 1-2):** Preparation
1093
+ • AWX environment setup
1094
+ • Team training and preparation
1095
+ • Conversion tooling setup
1096
+
1097
+ **Phase 2 (Weeks 3-{timeline_weeks - 2}):** Mass Conversion
1098
+ • Parallel conversion of all {len(assessments)} cookbooks
1099
+ • Continuous integration and testing
1100
+ • Issue resolution and refinement
1101
+
1102
+ **Phase 3 (Weeks {timeline_weeks - 1}-{timeline_weeks}):** Cutover
1103
+ • Final validation and testing
1104
+ • Production deployment
1105
+ • Rollback readiness verification"""
1106
+
1107
+
1108
+ def _generate_parallel_migration_phases(timeline_weeks: int) -> str:
1109
+ """Generate parallel migration phases."""
1110
+ return f"""**Track A - Infrastructure (Weeks 1-{timeline_weeks}):**
1111
+ • Core infrastructure cookbooks
1112
+ • Base OS configuration
1113
+ • Security and compliance
1114
+
1115
+ **Track B - Applications (Weeks 1-{timeline_weeks}):**
1116
+ • Application deployment cookbooks
1117
+ • Service configuration
1118
+ • Custom business logic
1119
+
1120
+ **Track C - Integration (Weeks 1-{timeline_weeks}):**
1121
+ • AWX workflow development
1122
+ • CI/CD pipeline integration
1123
+ • Testing and validation automation"""
1124
+
1125
+
1126
+ def _generate_migration_timeline(strategy: str, timeline_weeks: int) -> str:
1127
+ """Generate migration timeline."""
1128
+ milestones = []
1129
+
1130
+ if strategy == "phased":
1131
+ week_intervals = timeline_weeks // 4
1132
+ milestones = [
1133
+ f"Week {week_intervals}: Phase 1 completion - Low complexity cookbooks migrated",
1134
+ f"Week {week_intervals * 2}: Phase 2 completion - Medium complexity cookbooks migrated",
1135
+ f"Week {week_intervals * 3}: Phase 3 completion - High complexity cookbooks migrated",
1136
+ f"Week {timeline_weeks}: Final validation and production deployment",
1137
+ ]
1138
+ else:
1139
+ milestones = [
1140
+ "Week 2: Environment setup and team training complete",
1141
+ f"Week {timeline_weeks // 2}: 50% of cookbooks converted and tested",
1142
+ f"Week {timeline_weeks - 2}: All conversions complete, final testing",
1143
+ f"Week {timeline_weeks}: Production deployment and go-live",
1144
+ ]
1145
+
1146
+ return "\n".join([f"• {milestone}" for milestone in milestones])
1147
+
1148
+
1149
+ def _format_validation_results_text(
1150
+ conversion_type: str, results: list[ValidationResult], summary: dict[str, int]
1151
+ ) -> str:
1152
+ """
1153
+ Format validation results as text.
1154
+
1155
+ Args:
1156
+ conversion_type: Type of conversion.
1157
+ results: List of validation results.
1158
+ summary: Summary of validation results.
1159
+
1160
+ Returns:
1161
+ Formatted text output.
1162
+
1163
+ """
1164
+ if not results:
1165
+ return f"""# Validation Results for {conversion_type} Conversion
1166
+
1167
+ ✅ All validation checks passed! No issues found.
1168
+ """
1169
+ output_lines = [
1170
+ f"# Validation Results for {conversion_type} Conversion",
1171
+ "",
1172
+ "## Summary",
1173
+ f"• Errors: {summary['errors']}",
1174
+ f"• Warnings: {summary['warnings']}",
1175
+ f"• Info: {summary['info']}",
1176
+ "",
1177
+ ]
1178
+
1179
+ # Group results by level
1180
+ errors = [r for r in results if r.level == ValidationLevel.ERROR]
1181
+ warnings = [r for r in results if r.level == ValidationLevel.WARNING]
1182
+ infos = [r for r in results if r.level == ValidationLevel.INFO]
1183
+
1184
+ if errors:
1185
+ output_lines.append("## ❌ Errors")
1186
+ output_lines.append("")
1187
+ for result in errors:
1188
+ output_lines.append(str(result))
1189
+ output_lines.append("")
1190
+
1191
+ if warnings:
1192
+ output_lines.append("## ⚠️ Warnings")
1193
+ output_lines.append("")
1194
+ for result in warnings:
1195
+ output_lines.append(str(result))
1196
+ output_lines.append("")
1197
+
1198
+ if infos:
1199
+ output_lines.append("## ℹ️ Information")
1200
+ output_lines.append("")
1201
+ for result in infos:
1202
+ output_lines.append(str(result))
1203
+ output_lines.append("")
1204
+
1205
+ return "\n".join(output_lines)
1206
+
1207
+
1208
+ def _format_validation_results_summary(
1209
+ conversion_type: str, summary: dict[str, int]
1210
+ ) -> str:
1211
+ """
1212
+ Format validation results as summary.
1213
+
1214
+ Args:
1215
+ conversion_type: Type of conversion.
1216
+ summary: Summary of validation results.
1217
+
1218
+ Returns:
1219
+ Formatted summary output.
1220
+
1221
+ """
1222
+ return f"""# Validation Summary
1223
+
1224
+ ✓ Conversion Type: {conversion_type}
1225
+ • Errors: {summary["errors"]}
1226
+ • Warnings: {summary["warnings"]}
1227
+ • Info: {summary["info"]}
1228
+
1229
+ {"✅ No critical issues found!" if summary["errors"] == 0 else "❌ Critical issues found - review errors"}
1230
+ """