kailash 0.9.15__py3-none-any.whl → 0.9.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. kailash/middleware/database/base_models.py +7 -1
  2. kailash/migration/__init__.py +30 -0
  3. kailash/migration/cli.py +340 -0
  4. kailash/migration/compatibility_checker.py +662 -0
  5. kailash/migration/configuration_validator.py +837 -0
  6. kailash/migration/documentation_generator.py +1828 -0
  7. kailash/migration/examples/__init__.py +5 -0
  8. kailash/migration/examples/complete_migration_example.py +692 -0
  9. kailash/migration/migration_assistant.py +715 -0
  10. kailash/migration/performance_comparator.py +760 -0
  11. kailash/migration/regression_detector.py +1141 -0
  12. kailash/migration/tests/__init__.py +6 -0
  13. kailash/migration/tests/test_compatibility_checker.py +403 -0
  14. kailash/migration/tests/test_integration.py +463 -0
  15. kailash/migration/tests/test_migration_assistant.py +397 -0
  16. kailash/migration/tests/test_performance_comparator.py +433 -0
  17. kailash/nodes/data/async_sql.py +1507 -6
  18. kailash/runtime/local.py +1255 -8
  19. kailash/runtime/monitoring/__init__.py +1 -0
  20. kailash/runtime/monitoring/runtime_monitor.py +780 -0
  21. kailash/runtime/resource_manager.py +3033 -0
  22. kailash/sdk_exceptions.py +21 -0
  23. kailash/workflow/cyclic_runner.py +18 -2
  24. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/METADATA +1 -1
  25. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/RECORD +30 -12
  26. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/WHEEL +0 -0
  27. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/entry_points.txt +0 -0
  28. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/LICENSE +0 -0
  29. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/NOTICE +0 -0
  30. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,5 @@
1
+ """Examples and demonstrations for LocalRuntime migration tools.
2
+
3
+ This package contains practical examples showing how to use the migration
4
+ tools and utilities for LocalRuntime upgrades and migrations.
5
+ """
@@ -0,0 +1,692 @@
1
+ #!/usr/bin/env python3
2
+ """Complete LocalRuntime migration example.
3
+
4
+ This script demonstrates the complete migration workflow using all available
5
+ migration tools and utilities. It serves as both a practical example and
6
+ a comprehensive test of the migration toolchain.
7
+ """
8
+
9
+ import argparse
10
+ import tempfile
11
+ import textwrap
12
+ from pathlib import Path
13
+
14
+ from kailash.migration import (
15
+ CompatibilityChecker,
16
+ ConfigurationValidator,
17
+ MigrationAssistant,
18
+ MigrationDocGenerator,
19
+ PerformanceComparator,
20
+ RegressionDetector,
21
+ )
22
+ from kailash.workflow.builder import WorkflowBuilder
23
+
24
+
25
+ def create_sample_project(project_path: Path) -> None:
26
+ """Create a sample project with various migration scenarios."""
27
+ project_path.mkdir(exist_ok=True)
28
+
29
+ print(f"Creating sample project in: {project_path}")
30
+
31
+ # Legacy application file
32
+ (project_path / "legacy_app.py").write_text(
33
+ textwrap.dedent(
34
+ """
35
+ from kailash.runtime.local import LocalRuntime
36
+ from kailash.workflow.builder import WorkflowBuilder
37
+
38
+ class LegacyWorkflowRunner:
39
+ def __init__(self):
40
+ # Legacy configuration patterns
41
+ self.runtime = LocalRuntime(
42
+ enable_parallel=True,
43
+ thread_pool_size=12,
44
+ debug_mode=False,
45
+ memory_limit=2048,
46
+ timeout=600,
47
+ retry_count=3
48
+ )
49
+
50
+ def run_data_processing(self, input_data):
51
+ # Create workflow
52
+ workflow = WorkflowBuilder()
53
+
54
+ # Data validation step
55
+ workflow.add_node("PythonCodeNode", "validate", {
56
+ "code": '''
57
+ if not input_data:
58
+ raise ValueError("Input data is required")
59
+ result = {"valid": True, "data": input_data}
60
+ ''',
61
+ "input_mapping": {"input_data": "input_data"},
62
+ "output_key": "validated_data"
63
+ })
64
+
65
+ # Processing step
66
+ workflow.add_node("PythonCodeNode", "process", {
67
+ "code": '''
68
+ import json
69
+ data = validated_data["data"]
70
+ processed = {
71
+ "items_count": len(data) if isinstance(data, list) else 1,
72
+ "processed_at": "2024-01-01T00:00:00Z",
73
+ "status": "completed"
74
+ }
75
+ result = processed
76
+ ''',
77
+ "input_mapping": {"validated_data": "validate.validated_data"},
78
+ "output_key": "processed_result"
79
+ })
80
+
81
+ # Legacy execution pattern
82
+ self.runtime.execute_sync(workflow.build(), parameters={"input_data": input_data})
83
+ return self.runtime.get_results()
84
+
85
+ def cleanup(self):
86
+ # Manual cleanup (legacy pattern)
87
+ self.runtime.set_context(None)
88
+ """
89
+ ).strip()
90
+ )
91
+
92
+ # Configuration file with multiple legacy patterns
93
+ (project_path / "config.py").write_text(
94
+ textwrap.dedent(
95
+ """
96
+ # Legacy configuration patterns
97
+
98
+ DEVELOPMENT_CONFIG = {
99
+ 'debug_mode': True,
100
+ 'enable_parallel': False,
101
+ 'memory_limit': 1024,
102
+ 'log_level': 'DEBUG'
103
+ }
104
+
105
+ PRODUCTION_CONFIG = {
106
+ 'enable_parallel': True,
107
+ 'thread_pool_size': 32,
108
+ 'memory_limit': 8192,
109
+ 'timeout': 1800,
110
+ 'retry_count': 5,
111
+ 'cache_enabled': True
112
+ }
113
+
114
+ TESTING_CONFIG = {
115
+ 'debug_mode': True,
116
+ 'thread_pool_size': 2,
117
+ 'timeout': 300
118
+ }
119
+
120
+ def get_runtime_config(environment='development'):
121
+ configs = {
122
+ 'development': DEVELOPMENT_CONFIG,
123
+ 'production': PRODUCTION_CONFIG,
124
+ 'testing': TESTING_CONFIG
125
+ }
126
+ return configs.get(environment, DEVELOPMENT_CONFIG)
127
+ """
128
+ ).strip()
129
+ )
130
+
131
+ # Already modernized file (should not be changed)
132
+ (project_path / "modern_service.py").write_text(
133
+ textwrap.dedent(
134
+ """
135
+ from kailash.runtime.local import LocalRuntime
136
+ from kailash.access_control import UserContext
137
+ from kailash.workflow.builder import WorkflowBuilder
138
+
139
+ class ModernWorkflowService:
140
+ def __init__(self, user_id: str):
141
+ # Modern configuration
142
+ self.user_context = UserContext(
143
+ user_id=user_id,
144
+ roles=["data_processor", "workflow_admin"]
145
+ )
146
+
147
+ self.runtime = LocalRuntime(
148
+ debug=False,
149
+ max_concurrency=20,
150
+ enable_monitoring=True,
151
+ enable_security=True,
152
+ enable_audit=True,
153
+ user_context=self.user_context,
154
+ resource_limits={
155
+ 'memory_mb': 4096,
156
+ 'timeout_seconds': 1200
157
+ },
158
+ retry_policy_config={
159
+ 'max_retries': 3,
160
+ 'backoff_factor': 2.0
161
+ }
162
+ )
163
+
164
+ def execute_workflow(self, workflow):
165
+ # Modern execution pattern
166
+ results, run_id = self.runtime.execute(workflow)
167
+ return {
168
+ 'results': results,
169
+ 'run_id': run_id,
170
+ 'user': self.user_context.user_id
171
+ }
172
+
173
+ def create_analytics_workflow(self):
174
+ workflow = WorkflowBuilder()
175
+
176
+ workflow.add_node("PythonCodeNode", "analyze", {
177
+ "code": '''
178
+ import datetime
179
+ result = {
180
+ "analysis_timestamp": datetime.datetime.now().isoformat(),
181
+ "metrics": {
182
+ "total_processed": 100,
183
+ "success_rate": 0.95,
184
+ "avg_duration_ms": 250
185
+ }
186
+ }
187
+ ''',
188
+ "output_key": "analytics"
189
+ })
190
+
191
+ return workflow.build()
192
+ """
193
+ ).strip()
194
+ )
195
+
196
+ # Test file
197
+ (project_path / "test_workflows.py").write_text(
198
+ textwrap.dedent(
199
+ """
200
+ import pytest
201
+ from legacy_app import LegacyWorkflowRunner
202
+ from modern_service import ModernWorkflowService
203
+
204
+ def test_legacy_workflow():
205
+ runner = LegacyWorkflowRunner()
206
+ result = runner.run_data_processing(["item1", "item2", "item3"])
207
+ assert result is not None
208
+ runner.cleanup()
209
+
210
+ def test_modern_workflow():
211
+ service = ModernWorkflowService("test_user")
212
+ workflow = service.create_analytics_workflow()
213
+ result = service.execute_workflow(workflow)
214
+
215
+ assert result is not None
216
+ assert "results" in result
217
+ assert "run_id" in result
218
+ assert "user" in result
219
+ assert result["user"] == "test_user"
220
+
221
+ def test_data_validation():
222
+ runner = LegacyWorkflowRunner()
223
+
224
+ # Test with valid data
225
+ result = runner.run_data_processing(["valid", "data"])
226
+ assert result is not None
227
+
228
+ # Test with empty data (should handle gracefully)
229
+ try:
230
+ runner.run_data_processing([])
231
+ # Should either work or fail gracefully
232
+ except Exception as e:
233
+ assert "required" in str(e).lower()
234
+
235
+ runner.cleanup()
236
+ """
237
+ ).strip()
238
+ )
239
+
240
+ # README file
241
+ (project_path / "README.md").write_text(
242
+ textwrap.dedent(
243
+ """
244
+ # Sample Migration Project
245
+
246
+ This is a sample project demonstrating LocalRuntime migration patterns.
247
+
248
+ ## Files
249
+
250
+ - `legacy_app.py` - Contains legacy LocalRuntime usage patterns
251
+ - `config.py` - Configuration files with deprecated parameters
252
+ - `modern_service.py` - Already modernized code (should not be changed)
253
+ - `test_workflows.py` - Test cases for both legacy and modern patterns
254
+
255
+ ## Migration Notes
256
+
257
+ This project contains various migration scenarios:
258
+
259
+ 1. **Legacy Configuration**: Old parameter names and patterns
260
+ 2. **Legacy Execution**: `execute_sync()` and `get_results()` patterns
261
+ 3. **Manual Resource Management**: Manual cleanup patterns
262
+ 4. **Mixed Patterns**: Some modern and some legacy code
263
+
264
+ The migration tools should identify and fix these issues automatically.
265
+ """
266
+ ).strip()
267
+ )
268
+
269
+ print("✅ Sample project created successfully")
270
+
271
+
272
+ def run_complete_migration_example(project_path: Path, output_dir: Path) -> None:
273
+ """Run the complete migration example."""
274
+ print("\n" + "=" * 60)
275
+ print("LOCALRUNTIME MIGRATION TOOLCHAIN EXAMPLE")
276
+ print("=" * 60)
277
+
278
+ output_dir.mkdir(exist_ok=True)
279
+
280
+ # Step 1: Compatibility Analysis
281
+ print("\n🔍 Step 1: Compatibility Analysis")
282
+ print("-" * 35)
283
+
284
+ checker = CompatibilityChecker()
285
+ analysis_result = checker.analyze_codebase(project_path)
286
+
287
+ print(f"Files analyzed: {analysis_result.total_files_analyzed}")
288
+ print(f"Issues found: {len(analysis_result.issues)}")
289
+ print(f"Migration complexity: {analysis_result.migration_complexity}")
290
+ print(f"Estimated effort: {analysis_result.estimated_effort_days} days")
291
+
292
+ # Save analysis report
293
+ analysis_report = checker.generate_report(analysis_result, "markdown")
294
+ (output_dir / "01_compatibility_analysis.md").write_text(analysis_report)
295
+ print("📄 Analysis report saved to: 01_compatibility_analysis.md")
296
+
297
+ # Step 2: Configuration Validation
298
+ print("\n⚙️ Step 2: Configuration Validation")
299
+ print("-" * 37)
300
+
301
+ validator = ConfigurationValidator()
302
+
303
+ # Test various configurations
304
+ configs_to_test = [
305
+ (
306
+ "Legacy Development",
307
+ {"debug_mode": True, "enable_parallel": False, "memory_limit": 1024},
308
+ ),
309
+ (
310
+ "Legacy Production",
311
+ {
312
+ "enable_parallel": True,
313
+ "thread_pool_size": 32,
314
+ "memory_limit": 8192,
315
+ "timeout": 1800,
316
+ "retry_count": 5,
317
+ },
318
+ ),
319
+ (
320
+ "Modern Configuration",
321
+ {
322
+ "debug": True,
323
+ "max_concurrency": 10,
324
+ "enable_monitoring": True,
325
+ "resource_limits": {"memory_mb": 2048},
326
+ },
327
+ ),
328
+ ]
329
+
330
+ validation_reports = []
331
+ for config_name, config in configs_to_test:
332
+ print(f"\nValidating {config_name} configuration:")
333
+ validation_result = validator.validate_configuration(config)
334
+
335
+ print(f" Valid: {'Yes' if validation_result.valid else 'No'}")
336
+ print(f" Issues: {len(validation_result.issues)}")
337
+ print(f" Security score: {validation_result.security_score}/100")
338
+ print(f" Performance score: {validation_result.performance_score}/100")
339
+
340
+ # Save validation report
341
+ config_report = validator.generate_validation_report(
342
+ validation_result, "markdown"
343
+ )
344
+ safe_name = config_name.lower().replace(" ", "_")
345
+ report_file = output_dir / f"02_validation_{safe_name}.md"
346
+ report_file.write_text(
347
+ f"# {config_name} Configuration Validation\n\n{config_report}"
348
+ )
349
+
350
+ validation_reports.append((config_name, validation_result))
351
+
352
+ print("📄 Configuration validation reports saved")
353
+
354
+ # Step 3: Migration Planning and Execution
355
+ print("\n🚀 Step 3: Migration Planning and Execution")
356
+ print("-" * 45)
357
+
358
+ assistant = MigrationAssistant(dry_run=True, create_backups=True)
359
+ migration_plan = assistant.create_migration_plan(project_path)
360
+
361
+ print(f"Migration steps: {len(migration_plan.steps)}")
362
+ print(f"Estimated duration: {migration_plan.estimated_duration_minutes} minutes")
363
+ print(f"Risk level: {migration_plan.risk_level}")
364
+
365
+ # Execute migration (dry run)
366
+ migration_result = assistant.execute_migration(migration_plan)
367
+
368
+ print(f"Migration success: {'Yes' if migration_result.success else 'No'}")
369
+ print(f"Steps completed: {migration_result.steps_completed}")
370
+ print(f"Steps failed: {migration_result.steps_failed}")
371
+
372
+ # Save migration report
373
+ migration_report = assistant.generate_migration_report(
374
+ migration_plan, migration_result
375
+ )
376
+ (output_dir / "03_migration_execution.md").write_text(migration_report)
377
+ print("📄 Migration report saved to: 03_migration_execution.md")
378
+
379
+ # Step 4: Performance Analysis
380
+ print("\n📊 Step 4: Performance Analysis")
381
+ print("-" * 33)
382
+
383
+ comparator = PerformanceComparator(sample_size=2, warmup_runs=1)
384
+
385
+ # Compare legacy vs modern configurations
386
+ legacy_config = {"debug": True, "max_concurrency": 1}
387
+ modern_config = {
388
+ "debug": True,
389
+ "max_concurrency": 8,
390
+ "enable_monitoring": True,
391
+ "persistent_mode": True,
392
+ }
393
+
394
+ try:
395
+ # Create test workflows for performance comparison
396
+ simple_workflow = WorkflowBuilder()
397
+ simple_workflow.add_node(
398
+ "PythonCodeNode",
399
+ "perf_test",
400
+ {
401
+ "code": "result = sum(i*i for i in range(1000))",
402
+ "output_key": "sum_squares",
403
+ },
404
+ )
405
+ test_workflows = [("performance_test", simple_workflow.build())]
406
+
407
+ print("Comparing legacy vs modern configurations...")
408
+ performance_report = comparator.compare_configurations(
409
+ legacy_config, modern_config, test_workflows
410
+ )
411
+
412
+ print(
413
+ f"Overall performance change: {performance_report.overall_change_percentage:+.1f}%"
414
+ )
415
+ print(
416
+ f"Performance status: {'Improvement' if performance_report.overall_improvement else 'Regression'}"
417
+ )
418
+ print(f"Risk assessment: {performance_report.risk_assessment}")
419
+
420
+ # Save performance report
421
+ perf_report = comparator.generate_performance_report(
422
+ performance_report, "markdown"
423
+ )
424
+ (output_dir / "04_performance_analysis.md").write_text(perf_report)
425
+ print("📄 Performance report saved to: 04_performance_analysis.md")
426
+
427
+ except Exception as e:
428
+ print(f"⚠️ Performance analysis skipped (requires full LocalRuntime): {e}")
429
+ print(" In a real migration, this would provide detailed performance metrics")
430
+
431
+ # Step 5: Regression Detection
432
+ print("\n🔍 Step 5: Regression Detection")
433
+ print("-" * 32)
434
+
435
+ try:
436
+ detector = RegressionDetector(
437
+ baseline_path=output_dir / "baseline.json", parallel_tests=False
438
+ )
439
+
440
+ # Create baseline
441
+ print("Creating performance baseline...")
442
+ baseline_config = {"debug": True, "max_concurrency": 2}
443
+
444
+ simple_workflow = WorkflowBuilder()
445
+ simple_workflow.add_node(
446
+ "PythonCodeNode",
447
+ "regression_test",
448
+ {"code": "result = 'regression_test_passed'", "output_key": "test_result"},
449
+ )
450
+ test_workflows = [("regression_check", simple_workflow.build())]
451
+
452
+ baselines = detector.create_baseline(baseline_config, test_workflows)
453
+ print(f"Created {len(baselines)} baseline snapshots")
454
+
455
+ # Test for regressions
456
+ print("Checking for regressions...")
457
+ modified_config = {"debug": True, "max_concurrency": 4}
458
+ regression_report = detector.detect_regressions(modified_config, test_workflows)
459
+
460
+ print(f"Tests run: {regression_report.total_tests}")
461
+ print(f"Passed: {regression_report.passed_tests}")
462
+ print(f"Failed: {regression_report.failed_tests}")
463
+ print(f"Regression issues: {len(regression_report.regression_issues)}")
464
+ print(f"Overall status: {regression_report.overall_status}")
465
+
466
+ # Save regression report
467
+ regression_text = detector.generate_regression_report(
468
+ regression_report, "markdown"
469
+ )
470
+ (output_dir / "05_regression_detection.md").write_text(regression_text)
471
+ print("📄 Regression report saved to: 05_regression_detection.md")
472
+
473
+ except Exception as e:
474
+ print(f"⚠️ Regression detection skipped (requires full LocalRuntime): {e}")
475
+ print(
476
+ " In a real migration, this would detect performance and functional regressions"
477
+ )
478
+
479
+ # Step 6: Documentation Generation
480
+ print("\n📚 Step 6: Documentation Generation")
481
+ print("-" * 36)
482
+
483
+ doc_generator = MigrationDocGenerator()
484
+
485
+ # Generate comprehensive migration guide
486
+ migration_guide = doc_generator.generate_migration_guide(
487
+ analysis_result=analysis_result,
488
+ migration_plan=migration_plan,
489
+ migration_result=migration_result,
490
+ validation_result=validation_reports[0][1], # Use first validation result
491
+ scenario="enterprise",
492
+ audience="developer",
493
+ )
494
+
495
+ print(f"Generated migration guide with {len(migration_guide.sections)} sections")
496
+
497
+ # Export guide as markdown
498
+ guide_path = output_dir / "06_complete_migration_guide.md"
499
+ doc_generator.export_guide(migration_guide, guide_path, "markdown")
500
+ print("📄 Complete migration guide saved to: 06_complete_migration_guide.md")
501
+
502
+ # Generate audience-specific guides
503
+ audiences = ["developer", "admin", "architect"]
504
+ for audience in audiences:
505
+ audience_guide = doc_generator.generate_migration_guide(
506
+ analysis_result=analysis_result,
507
+ migration_plan=migration_plan,
508
+ scenario="standard",
509
+ audience=audience,
510
+ )
511
+
512
+ audience_path = output_dir / f"06_migration_guide_{audience}.md"
513
+ doc_generator.export_guide(audience_guide, audience_path, "markdown")
514
+
515
+ print("📄 Audience-specific guides generated")
516
+
517
+ # Step 7: Summary Report
518
+ print("\n📋 Step 7: Summary Report")
519
+ print("-" * 25)
520
+
521
+ summary_report = generate_summary_report(
522
+ analysis_result, migration_plan, migration_result, validation_reports
523
+ )
524
+
525
+ (output_dir / "00_migration_summary.md").write_text(summary_report)
526
+ print("📄 Migration summary saved to: 00_migration_summary.md")
527
+
528
+ print("\n" + "=" * 60)
529
+ print("MIGRATION ANALYSIS COMPLETE")
530
+ print("=" * 60)
531
+ print(f"\nAll reports have been saved to: {output_dir}")
532
+ print("\nFiles generated:")
533
+ for report_file in sorted(output_dir.glob("*.md")):
534
+ print(f" 📄 {report_file.name}")
535
+
536
+ print("\n✅ Complete migration toolchain example finished successfully!")
537
+
538
+
539
+ def generate_summary_report(
540
+ analysis_result, migration_plan, migration_result, validation_reports
541
+ ):
542
+ """Generate a comprehensive summary report."""
543
+
544
+ report_lines = [
545
+ "# LocalRuntime Migration Summary",
546
+ "",
547
+ f"*Generated: {migration_result.backup_path or 'N/A'}*",
548
+ "",
549
+ "## Executive Summary",
550
+ "",
551
+ f"- **Files Analyzed**: {analysis_result.total_files_analyzed}",
552
+ f"- **Issues Identified**: {len(analysis_result.issues)}",
553
+ f"- **Migration Complexity**: {analysis_result.migration_complexity.title()}",
554
+ f"- **Estimated Effort**: {analysis_result.estimated_effort_days} days",
555
+ f"- **Migration Success**: {'Yes' if migration_result.success else 'No'}",
556
+ f"- **Steps Completed**: {migration_result.steps_completed}/{len(migration_plan.steps)}",
557
+ "",
558
+ "## Issues Breakdown",
559
+ "",
560
+ f"- Critical Issues: {analysis_result.summary.get('critical_issues', 0)}",
561
+ f"- High Priority: {analysis_result.summary.get('high_issues', 0)}",
562
+ f"- Medium Priority: {analysis_result.summary.get('medium_issues', 0)}",
563
+ f"- Low Priority: {analysis_result.summary.get('low_issues', 0)}",
564
+ f"- Breaking Changes: {analysis_result.summary.get('breaking_changes', 0)}",
565
+ f"- Automated Fixes: {analysis_result.summary.get('automated_fixes', 0)}",
566
+ "",
567
+ "## Configuration Analysis",
568
+ "",
569
+ ]
570
+
571
+ for config_name, validation_result in validation_reports:
572
+ report_lines.extend(
573
+ [
574
+ f"### {config_name}",
575
+ "",
576
+ f"- **Valid**: {'Yes' if validation_result.valid else 'No'}",
577
+ f"- **Issues**: {len(validation_result.issues)}",
578
+ f"- **Security Score**: {validation_result.security_score}/100",
579
+ f"- **Performance Score**: {validation_result.performance_score}/100",
580
+ f"- **Enterprise Readiness**: {validation_result.enterprise_readiness}/100",
581
+ "",
582
+ ]
583
+ )
584
+
585
+ report_lines.extend(
586
+ [
587
+ "## Migration Plan",
588
+ "",
589
+ f"- **Total Steps**: {len(migration_plan.steps)}",
590
+ f"- **Estimated Duration**: {migration_plan.estimated_duration_minutes} minutes",
591
+ f"- **Risk Level**: {migration_plan.risk_level.title()}",
592
+ f"- **Prerequisites**: {len(migration_plan.prerequisites)}",
593
+ f"- **Post-Migration Tests**: {len(migration_plan.post_migration_tests)}",
594
+ "",
595
+ "## Recommendations",
596
+ "",
597
+ ]
598
+ )
599
+
600
+ # Add recommendations based on analysis
601
+ if analysis_result.summary.get("critical_issues", 0) > 0:
602
+ report_lines.append(
603
+ "🚨 **CRITICAL**: Address critical issues before proceeding with migration"
604
+ )
605
+ elif analysis_result.summary.get("breaking_changes", 0) > 0:
606
+ report_lines.append(
607
+ "⚠️ **HIGH PRIORITY**: Review breaking changes and update code accordingly"
608
+ )
609
+ elif migration_result.success:
610
+ report_lines.append("✅ **SUCCESS**: Migration completed successfully")
611
+ else:
612
+ report_lines.append(
613
+ "❌ **FAILED**: Migration encountered errors - review and retry"
614
+ )
615
+
616
+ report_lines.extend(
617
+ [
618
+ "",
619
+ "## Next Steps",
620
+ "",
621
+ "1. Review detailed analysis reports",
622
+ "2. Address critical and high-priority issues",
623
+ "3. Execute migration plan (remove dry-run mode)",
624
+ "4. Run comprehensive tests",
625
+ "5. Monitor performance post-migration",
626
+ "",
627
+ "## Report Files",
628
+ "",
629
+ "- `01_compatibility_analysis.md` - Detailed compatibility analysis",
630
+ "- `02_validation_*.md` - Configuration validation reports",
631
+ "- `03_migration_execution.md` - Migration execution details",
632
+ "- `04_performance_analysis.md` - Performance comparison (if available)",
633
+ "- `05_regression_detection.md` - Regression analysis (if available)",
634
+ "- `06_complete_migration_guide.md` - Comprehensive migration guide",
635
+ "- `06_migration_guide_*.md` - Audience-specific guides",
636
+ ]
637
+ )
638
+
639
+ return "\n".join(report_lines)
640
+
641
+
642
+ def main():
643
+ """Main function for the migration example script."""
644
+ parser = argparse.ArgumentParser(
645
+ description="LocalRuntime Migration Toolchain Example"
646
+ )
647
+ parser.add_argument(
648
+ "--project-path",
649
+ type=Path,
650
+ help="Path to existing project (if not provided, creates sample project)",
651
+ )
652
+ parser.add_argument(
653
+ "--output-dir",
654
+ type=Path,
655
+ default=Path("migration_reports"),
656
+ help="Directory for output reports (default: migration_reports)",
657
+ )
658
+ parser.add_argument(
659
+ "--create-sample",
660
+ action="store_true",
661
+ help="Create a sample project for demonstration",
662
+ )
663
+
664
+ args = parser.parse_args()
665
+
666
+ try:
667
+ if args.create_sample or not args.project_path:
668
+ # Create sample project
669
+ with tempfile.TemporaryDirectory() as temp_dir:
670
+ project_path = Path(temp_dir) / "sample_project"
671
+ create_sample_project(project_path)
672
+ run_complete_migration_example(project_path, args.output_dir)
673
+ else:
674
+ # Use existing project
675
+ if not args.project_path.exists():
676
+ print(f"❌ Error: Project path does not exist: {args.project_path}")
677
+ return 1
678
+
679
+ run_complete_migration_example(args.project_path, args.output_dir)
680
+
681
+ return 0
682
+
683
+ except Exception as e:
684
+ print(f"❌ Error during migration analysis: {e}")
685
+ import traceback
686
+
687
+ traceback.print_exc()
688
+ return 1
689
+
690
+
691
+ if __name__ == "__main__":
692
+ exit(main())