kailash 0.9.15__py3-none-any.whl → 0.9.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. kailash/__init__.py +4 -3
  2. kailash/middleware/database/base_models.py +7 -1
  3. kailash/migration/__init__.py +30 -0
  4. kailash/migration/cli.py +340 -0
  5. kailash/migration/compatibility_checker.py +662 -0
  6. kailash/migration/configuration_validator.py +837 -0
  7. kailash/migration/documentation_generator.py +1828 -0
  8. kailash/migration/examples/__init__.py +5 -0
  9. kailash/migration/examples/complete_migration_example.py +692 -0
  10. kailash/migration/migration_assistant.py +715 -0
  11. kailash/migration/performance_comparator.py +760 -0
  12. kailash/migration/regression_detector.py +1141 -0
  13. kailash/migration/tests/__init__.py +6 -0
  14. kailash/migration/tests/test_compatibility_checker.py +403 -0
  15. kailash/migration/tests/test_integration.py +463 -0
  16. kailash/migration/tests/test_migration_assistant.py +397 -0
  17. kailash/migration/tests/test_performance_comparator.py +433 -0
  18. kailash/monitoring/__init__.py +29 -2
  19. kailash/monitoring/asyncsql_metrics.py +275 -0
  20. kailash/nodes/data/async_sql.py +1828 -33
  21. kailash/runtime/local.py +1255 -8
  22. kailash/runtime/monitoring/__init__.py +1 -0
  23. kailash/runtime/monitoring/runtime_monitor.py +780 -0
  24. kailash/runtime/resource_manager.py +3033 -0
  25. kailash/sdk_exceptions.py +21 -0
  26. kailash/workflow/cyclic_runner.py +18 -2
  27. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/METADATA +1 -1
  28. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/RECORD +33 -14
  29. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/WHEEL +0 -0
  30. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/entry_points.txt +0 -0
  31. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/LICENSE +0 -0
  32. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/NOTICE +0 -0
  33. {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,662 @@
1
+ """Compatibility checker for LocalRuntime migration analysis.
2
+
3
+ This module provides comprehensive static analysis of codebases to identify
4
+ potential compatibility issues when migrating to the enhanced LocalRuntime.
5
+ It analyzes usage patterns, configuration parameters, and provides detailed
6
+ recommendations for migration.
7
+ """
8
+
9
+ import ast
10
+ import os
11
+ import re
12
+ from dataclasses import dataclass, field
13
+ from enum import Enum
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
16
+
17
+
18
+ class IssueType(Enum):
19
+ """Categories of compatibility issues."""
20
+
21
+ BREAKING_CHANGE = "breaking_change"
22
+ DEPRECATED_PARAMETER = "deprecated_parameter"
23
+ CONFIGURATION_UPDATE = "configuration_update"
24
+ PERFORMANCE_OPTIMIZATION = "performance_optimization"
25
+ SECURITY_ENHANCEMENT = "security_enhancement"
26
+ FEATURE_MIGRATION = "feature_migration"
27
+ ENTERPRISE_UPGRADE = "enterprise_upgrade"
28
+
29
+
30
+ class IssueSeverity(Enum):
31
+ """Severity levels for compatibility issues."""
32
+
33
+ CRITICAL = "critical" # Blocks migration
34
+ HIGH = "high" # Requires immediate attention
35
+ MEDIUM = "medium" # Should be addressed
36
+ LOW = "low" # Optional optimization
37
+ INFO = "info" # Informational only
38
+
39
+
40
+ @dataclass
41
+ class CompatibilityIssue:
42
+ """Represents a compatibility issue found during analysis."""
43
+
44
+ issue_type: IssueType
45
+ severity: IssueSeverity
46
+ description: str
47
+ file_path: str
48
+ line_number: int
49
+ code_snippet: str
50
+ recommendation: str
51
+ migration_effort: str # "low", "medium", "high"
52
+ automated_fix: bool = False
53
+ breaking_change: bool = False
54
+ enterprise_feature: bool = False
55
+
56
+
57
+ @dataclass
58
+ class AnalysisResult:
59
+ """Results of compatibility analysis."""
60
+
61
+ total_files_analyzed: int
62
+ issues: List[CompatibilityIssue] = field(default_factory=list)
63
+ summary: Dict[str, int] = field(default_factory=dict)
64
+ migration_complexity: str = "unknown"
65
+ estimated_effort_days: float = 0.0
66
+ enterprise_opportunities: List[str] = field(default_factory=list)
67
+
68
+
69
+ class CompatibilityChecker:
70
+ """Analyzes codebases for LocalRuntime compatibility issues."""
71
+
72
+ def __init__(self):
73
+ """Initialize the compatibility checker."""
74
+ self.deprecated_parameters = {
75
+ "enable_parallel": "Use max_concurrency parameter instead",
76
+ "thread_pool_size": "Use max_concurrency parameter instead",
77
+ "memory_limit": "Use resource_limits parameter instead",
78
+ "timeout": "Use resource_limits parameter instead",
79
+ "log_level": "Use debug parameter or logging configuration",
80
+ "cache_enabled": "Use enterprise caching nodes instead",
81
+ "retry_count": "Use retry_policy_config parameter",
82
+ }
83
+
84
+ self.breaking_changes = {
85
+ "execute_sync": "Method renamed to execute()",
86
+ "execute_async": "Use enable_async=True parameter instead",
87
+ "get_results": "Results now returned directly from execute()",
88
+ "set_context": "Use user_context parameter in constructor",
89
+ "enable_monitoring": "Parameter moved to constructor",
90
+ }
91
+
92
+ self.parameter_migrations = {
93
+ "debug_mode": "debug",
94
+ "parallel_execution": "max_concurrency",
95
+ "enable_security_audit": "enable_audit",
96
+ "connection_pooling": "enable_connection_sharing",
97
+ "persistent_resources": "persistent_mode",
98
+ }
99
+
100
+ self.enterprise_patterns = {
101
+ "UserContext": "Access control and security features",
102
+ "ResourceLimits": "Enterprise resource management",
103
+ "AuditLog": "Compliance and audit logging",
104
+ "MonitoringNode": "Performance monitoring capabilities",
105
+ "SecurityNode": "Enhanced security features",
106
+ }
107
+
108
+ def analyze_codebase(
109
+ self,
110
+ root_path: Union[str, Path],
111
+ include_patterns: Optional[List[str]] = None,
112
+ exclude_patterns: Optional[List[str]] = None,
113
+ ) -> AnalysisResult:
114
+ """Analyze a codebase for LocalRuntime compatibility.
115
+
116
+ Args:
117
+ root_path: Root directory to analyze
118
+ include_patterns: File patterns to include (e.g., ['*.py'])
119
+ exclude_patterns: File patterns to exclude (e.g., ['test_*'])
120
+
121
+ Returns:
122
+ Analysis results with issues and recommendations
123
+ """
124
+ root_path = Path(root_path)
125
+ include_patterns = include_patterns or ["*.py"]
126
+ exclude_patterns = exclude_patterns or [
127
+ "__pycache__",
128
+ "*.pyc",
129
+ ".git",
130
+ ".venv",
131
+ "venv",
132
+ "node_modules",
133
+ ]
134
+
135
+ result = AnalysisResult(total_files_analyzed=0)
136
+
137
+ # Find Python files to analyze
138
+ python_files = self._find_python_files(
139
+ root_path, include_patterns, exclude_patterns
140
+ )
141
+ result.total_files_analyzed = len(python_files)
142
+
143
+ # Analyze each file
144
+ for file_path in python_files:
145
+ try:
146
+ file_issues = self._analyze_file(file_path)
147
+ result.issues.extend(file_issues)
148
+ except Exception as e:
149
+ # Add analysis error as an issue
150
+ error_issue = CompatibilityIssue(
151
+ issue_type=IssueType.BREAKING_CHANGE,
152
+ severity=IssueSeverity.HIGH,
153
+ description=f"Failed to analyze file: {str(e)}",
154
+ file_path=str(file_path),
155
+ line_number=0,
156
+ code_snippet="",
157
+ recommendation="Manual review required",
158
+ migration_effort="medium",
159
+ )
160
+ result.issues.append(error_issue)
161
+
162
+ # Generate summary and complexity assessment
163
+ self._generate_summary(result)
164
+ self._assess_migration_complexity(result)
165
+ self._identify_enterprise_opportunities(result)
166
+
167
+ return result
168
+
169
+ def _find_python_files(
170
+ self, root_path: Path, include_patterns: List[str], exclude_patterns: List[str]
171
+ ) -> List[Path]:
172
+ """Find Python files to analyze based on patterns."""
173
+ python_files = []
174
+
175
+ for pattern in include_patterns:
176
+ for file_path in root_path.rglob(pattern):
177
+ # Check if file should be excluded
178
+ exclude_file = False
179
+ for exclude_pattern in exclude_patterns:
180
+ if exclude_pattern in str(file_path):
181
+ exclude_file = True
182
+ break
183
+
184
+ if not exclude_file and file_path.is_file():
185
+ python_files.append(file_path)
186
+
187
+ return python_files
188
+
189
+ def _analyze_file(self, file_path: Path) -> List[CompatibilityIssue]:
190
+ """Analyze a single Python file for compatibility issues."""
191
+ issues = []
192
+
193
+ try:
194
+ with open(file_path, "r", encoding="utf-8") as f:
195
+ content = f.read()
196
+
197
+ # Parse AST for detailed analysis
198
+ tree = ast.parse(content)
199
+ issues.extend(self._analyze_ast(tree, file_path, content))
200
+
201
+ # Regex-based pattern analysis for complex patterns
202
+ issues.extend(self._analyze_patterns(content, file_path))
203
+
204
+ except SyntaxError as e:
205
+ issues.append(
206
+ CompatibilityIssue(
207
+ issue_type=IssueType.BREAKING_CHANGE,
208
+ severity=IssueSeverity.CRITICAL,
209
+ description=f"Syntax error in file: {str(e)}",
210
+ file_path=str(file_path),
211
+ line_number=e.lineno or 0,
212
+ code_snippet="",
213
+ recommendation="Fix syntax errors before migration",
214
+ migration_effort="high",
215
+ )
216
+ )
217
+
218
+ return issues
219
+
220
+ def _analyze_ast(
221
+ self, tree: ast.AST, file_path: Path, content: str
222
+ ) -> List[CompatibilityIssue]:
223
+ """Analyze AST for compatibility issues."""
224
+ issues = []
225
+ lines = content.split("\n")
226
+
227
+ class CompatibilityVisitor(ast.NodeVisitor):
228
+ def __init__(self, checker):
229
+ self.checker = checker
230
+ self.issues = []
231
+
232
+ def visit_Call(self, node):
233
+ # Check for LocalRuntime instantiation
234
+ if isinstance(node.func, ast.Name) and node.func.id == "LocalRuntime":
235
+ self._check_constructor_call(node)
236
+
237
+ # Check for deprecated method calls
238
+ if isinstance(node.func, ast.Attribute):
239
+ self._check_method_call(node)
240
+
241
+ self.generic_visit(node)
242
+
243
+ def visit_ImportFrom(self, node):
244
+ # Check for runtime imports
245
+ if node.module and "kailash.runtime" in node.module:
246
+ self._check_import(node)
247
+
248
+ self.generic_visit(node)
249
+
250
+ def _check_constructor_call(self, node):
251
+ """Check LocalRuntime constructor for deprecated parameters."""
252
+ for keyword in node.keywords:
253
+ param_name = keyword.arg
254
+
255
+ # Check deprecated parameters
256
+ if param_name in self.checker.deprecated_parameters:
257
+ line_num = keyword.lineno
258
+ code_snippet = (
259
+ lines[line_num - 1] if line_num <= len(lines) else ""
260
+ )
261
+
262
+ self.issues.append(
263
+ CompatibilityIssue(
264
+ issue_type=IssueType.DEPRECATED_PARAMETER,
265
+ severity=IssueSeverity.HIGH,
266
+ description=f"Deprecated parameter '{param_name}' used",
267
+ file_path=str(file_path),
268
+ line_number=line_num,
269
+ code_snippet=code_snippet.strip(),
270
+ recommendation=self.checker.deprecated_parameters[
271
+ param_name
272
+ ],
273
+ migration_effort="low",
274
+ automated_fix=True,
275
+ )
276
+ )
277
+
278
+ # Check parameter migrations
279
+ elif param_name in self.checker.parameter_migrations:
280
+ line_num = keyword.lineno
281
+ code_snippet = (
282
+ lines[line_num - 1] if line_num <= len(lines) else ""
283
+ )
284
+ new_param = self.checker.parameter_migrations[param_name]
285
+
286
+ self.issues.append(
287
+ CompatibilityIssue(
288
+ issue_type=IssueType.CONFIGURATION_UPDATE,
289
+ severity=IssueSeverity.MEDIUM,
290
+ description=f"Parameter '{param_name}' should be renamed to '{new_param}'",
291
+ file_path=str(file_path),
292
+ line_number=line_num,
293
+ code_snippet=code_snippet.strip(),
294
+ recommendation=f"Replace '{param_name}' with '{new_param}'",
295
+ migration_effort="low",
296
+ automated_fix=True,
297
+ )
298
+ )
299
+
300
+ def _check_method_call(self, node):
301
+ """Check for deprecated method calls."""
302
+ if hasattr(node.func, "attr"):
303
+ method_name = node.func.attr
304
+
305
+ if method_name in self.checker.breaking_changes:
306
+ line_num = node.lineno
307
+ code_snippet = (
308
+ lines[line_num - 1] if line_num <= len(lines) else ""
309
+ )
310
+
311
+ self.issues.append(
312
+ CompatibilityIssue(
313
+ issue_type=IssueType.BREAKING_CHANGE,
314
+ severity=IssueSeverity.CRITICAL,
315
+ description=f"Deprecated method '{method_name}' used",
316
+ file_path=str(file_path),
317
+ line_number=line_num,
318
+ code_snippet=code_snippet.strip(),
319
+ recommendation=self.checker.breaking_changes[
320
+ method_name
321
+ ],
322
+ migration_effort="medium",
323
+ breaking_change=True,
324
+ )
325
+ )
326
+
327
+ def _check_import(self, node):
328
+ """Check for import-related issues."""
329
+ for alias in node.names:
330
+ name = alias.name
331
+
332
+ # Check for enterprise feature imports
333
+ if name in self.checker.enterprise_patterns:
334
+ line_num = node.lineno
335
+ code_snippet = (
336
+ lines[line_num - 1] if line_num <= len(lines) else ""
337
+ )
338
+
339
+ self.issues.append(
340
+ CompatibilityIssue(
341
+ issue_type=IssueType.ENTERPRISE_UPGRADE,
342
+ severity=IssueSeverity.INFO,
343
+ description=f"Enterprise feature '{name}' detected",
344
+ file_path=str(file_path),
345
+ line_number=line_num,
346
+ code_snippet=code_snippet.strip(),
347
+ recommendation=f"Consider upgrading to use enhanced {name}: {self.checker.enterprise_patterns[name]}",
348
+ migration_effort="medium",
349
+ enterprise_feature=True,
350
+ )
351
+ )
352
+
353
+ visitor = CompatibilityVisitor(self)
354
+ visitor.visit(tree)
355
+ issues.extend(visitor.issues)
356
+
357
+ return issues
358
+
359
+ def _analyze_patterns(
360
+ self, content: str, file_path: Path
361
+ ) -> List[CompatibilityIssue]:
362
+ """Analyze content using regex patterns for complex issues."""
363
+ issues = []
364
+ lines = content.split("\n")
365
+
366
+ # Pattern for old-style runtime usage
367
+ old_runtime_pattern = r"runtime\.(execute_sync|execute_async|get_results)"
368
+ for i, line in enumerate(lines):
369
+ matches = re.finditer(old_runtime_pattern, line)
370
+ for match in matches:
371
+ method = match.group(1)
372
+ issues.append(
373
+ CompatibilityIssue(
374
+ issue_type=IssueType.BREAKING_CHANGE,
375
+ severity=IssueSeverity.CRITICAL,
376
+ description=f"Old-style method '{method}' usage detected",
377
+ file_path=str(file_path),
378
+ line_number=i + 1,
379
+ code_snippet=line.strip(),
380
+ recommendation=f"Replace '{method}' with new execute() method",
381
+ migration_effort="medium",
382
+ breaking_change=True,
383
+ )
384
+ )
385
+
386
+ # Pattern for configuration dictionary usage
387
+ config_dict_pattern = r"LocalRuntime\(.*\{.*\}"
388
+ for i, line in enumerate(lines):
389
+ if re.search(config_dict_pattern, line):
390
+ issues.append(
391
+ CompatibilityIssue(
392
+ issue_type=IssueType.CONFIGURATION_UPDATE,
393
+ severity=IssueSeverity.HIGH,
394
+ description="Dictionary-style configuration detected",
395
+ file_path=str(file_path),
396
+ line_number=i + 1,
397
+ code_snippet=line.strip(),
398
+ recommendation="Use named parameters instead of configuration dictionary",
399
+ migration_effort="medium",
400
+ )
401
+ )
402
+
403
+ # Pattern for hardcoded resource limits
404
+ resource_pattern = r"(memory|cpu|timeout)\s*=\s*\d+"
405
+ for i, line in enumerate(lines):
406
+ matches = re.finditer(resource_pattern, line, re.IGNORECASE)
407
+ for match in matches:
408
+ resource_type = match.group(1)
409
+ issues.append(
410
+ CompatibilityIssue(
411
+ issue_type=IssueType.PERFORMANCE_OPTIMIZATION,
412
+ severity=IssueSeverity.MEDIUM,
413
+ description=f"Hardcoded {resource_type} limit detected",
414
+ file_path=str(file_path),
415
+ line_number=i + 1,
416
+ code_snippet=line.strip(),
417
+ recommendation=f"Move {resource_type} limit to resource_limits configuration",
418
+ migration_effort="low",
419
+ automated_fix=True,
420
+ )
421
+ )
422
+
423
+ return issues
424
+
425
+ def _generate_summary(self, result: AnalysisResult) -> None:
426
+ """Generate summary statistics for the analysis."""
427
+ result.summary = {
428
+ "total_issues": len(result.issues),
429
+ "critical_issues": len(
430
+ [i for i in result.issues if i.severity == IssueSeverity.CRITICAL]
431
+ ),
432
+ "high_issues": len(
433
+ [i for i in result.issues if i.severity == IssueSeverity.HIGH]
434
+ ),
435
+ "medium_issues": len(
436
+ [i for i in result.issues if i.severity == IssueSeverity.MEDIUM]
437
+ ),
438
+ "low_issues": len(
439
+ [i for i in result.issues if i.severity == IssueSeverity.LOW]
440
+ ),
441
+ "breaking_changes": len([i for i in result.issues if i.breaking_change]),
442
+ "automated_fixes": len([i for i in result.issues if i.automated_fix]),
443
+ "enterprise_opportunities": len(
444
+ [i for i in result.issues if i.enterprise_feature]
445
+ ),
446
+ }
447
+
448
+ def _assess_migration_complexity(self, result: AnalysisResult) -> None:
449
+ """Assess overall migration complexity and effort."""
450
+ critical_count = result.summary.get("critical_issues", 0)
451
+ high_count = result.summary.get("high_issues", 0)
452
+ breaking_changes = result.summary.get("breaking_changes", 0)
453
+
454
+ # Calculate complexity score
455
+ complexity_score = (
456
+ (critical_count * 3) + (high_count * 2) + (breaking_changes * 2)
457
+ )
458
+
459
+ if complexity_score == 0:
460
+ result.migration_complexity = "trivial"
461
+ result.estimated_effort_days = 0.5
462
+ elif complexity_score <= 5:
463
+ result.migration_complexity = "low"
464
+ result.estimated_effort_days = 1.0
465
+ elif complexity_score <= 15:
466
+ result.migration_complexity = "medium"
467
+ result.estimated_effort_days = 3.0
468
+ elif complexity_score <= 30:
469
+ result.migration_complexity = "high"
470
+ result.estimated_effort_days = 7.0
471
+ else:
472
+ result.migration_complexity = "very_high"
473
+ result.estimated_effort_days = 14.0
474
+
475
+ def _identify_enterprise_opportunities(self, result: AnalysisResult) -> None:
476
+ """Identify opportunities for enterprise feature adoption."""
477
+ opportunities = set()
478
+
479
+ for issue in result.issues:
480
+ if issue.enterprise_feature:
481
+ opportunities.add(issue.description)
482
+
483
+ # Additional opportunities based on patterns
484
+ if "monitoring" in issue.description.lower():
485
+ opportunities.add("Enhanced performance monitoring and analytics")
486
+ if "security" in issue.description.lower():
487
+ opportunities.add("Enterprise security and access control")
488
+ if "audit" in issue.description.lower():
489
+ opportunities.add("Compliance and audit logging")
490
+ if "resource" in issue.description.lower():
491
+ opportunities.add("Advanced resource management and optimization")
492
+
493
+ result.enterprise_opportunities = list(opportunities)
494
+
495
+ def generate_report(
496
+ self, result: AnalysisResult, output_format: str = "text"
497
+ ) -> str:
498
+ """Generate a comprehensive migration report.
499
+
500
+ Args:
501
+ result: Analysis results
502
+ output_format: Report format ("text", "json", "markdown")
503
+
504
+ Returns:
505
+ Formatted report string
506
+ """
507
+ if output_format == "json":
508
+ import json
509
+
510
+ return json.dumps(
511
+ {
512
+ "summary": result.summary,
513
+ "migration_complexity": result.migration_complexity,
514
+ "estimated_effort_days": result.estimated_effort_days,
515
+ "enterprise_opportunities": result.enterprise_opportunities,
516
+ "issues": [
517
+ {
518
+ "type": issue.issue_type.value,
519
+ "severity": issue.severity.value,
520
+ "description": issue.description,
521
+ "file": issue.file_path,
522
+ "line": issue.line_number,
523
+ "recommendation": issue.recommendation,
524
+ "effort": issue.migration_effort,
525
+ "automated_fix": issue.automated_fix,
526
+ "breaking_change": issue.breaking_change,
527
+ }
528
+ for issue in result.issues
529
+ ],
530
+ },
531
+ indent=2,
532
+ )
533
+
534
+ elif output_format == "markdown":
535
+ return self._generate_markdown_report(result)
536
+
537
+ else: # text format
538
+ return self._generate_text_report(result)
539
+
540
+ def _generate_text_report(self, result: AnalysisResult) -> str:
541
+ """Generate text format report."""
542
+ report = []
543
+ report.append("=" * 60)
544
+ report.append("LocalRuntime Migration Compatibility Report")
545
+ report.append("=" * 60)
546
+ report.append("")
547
+
548
+ # Summary section
549
+ report.append("SUMMARY")
550
+ report.append("-" * 20)
551
+ report.append(f"Files Analyzed: {result.total_files_analyzed}")
552
+ report.append(f"Total Issues: {result.summary.get('total_issues', 0)}")
553
+ report.append(f"Migration Complexity: {result.migration_complexity.upper()}")
554
+ report.append(f"Estimated Effort: {result.estimated_effort_days} days")
555
+ report.append("")
556
+
557
+ # Issue breakdown
558
+ report.append("ISSUE BREAKDOWN")
559
+ report.append("-" * 20)
560
+ report.append(f"Critical Issues: {result.summary.get('critical_issues', 0)}")
561
+ report.append(f"High Priority: {result.summary.get('high_issues', 0)}")
562
+ report.append(f"Medium Priority: {result.summary.get('medium_issues', 0)}")
563
+ report.append(f"Low Priority: {result.summary.get('low_issues', 0)}")
564
+ report.append(f"Breaking Changes: {result.summary.get('breaking_changes', 0)}")
565
+ report.append(
566
+ f"Automated Fixes Available: {result.summary.get('automated_fixes', 0)}"
567
+ )
568
+ report.append("")
569
+
570
+ # Critical issues first
571
+ critical_issues = [
572
+ i for i in result.issues if i.severity == IssueSeverity.CRITICAL
573
+ ]
574
+ if critical_issues:
575
+ report.append("CRITICAL ISSUES (Must Fix)")
576
+ report.append("-" * 30)
577
+ for issue in critical_issues:
578
+ report.append(f"• {issue.description}")
579
+ report.append(f" File: {issue.file_path}:{issue.line_number}")
580
+ report.append(f" Code: {issue.code_snippet}")
581
+ report.append(f" Fix: {issue.recommendation}")
582
+ report.append("")
583
+
584
+ # Enterprise opportunities
585
+ if result.enterprise_opportunities:
586
+ report.append("ENTERPRISE UPGRADE OPPORTUNITIES")
587
+ report.append("-" * 35)
588
+ for opportunity in result.enterprise_opportunities:
589
+ report.append(f"• {opportunity}")
590
+ report.append("")
591
+
592
+ return "\n".join(report)
593
+
594
+ def _generate_markdown_report(self, result: AnalysisResult) -> str:
595
+ """Generate markdown format report."""
596
+ report = []
597
+ report.append("# LocalRuntime Migration Compatibility Report")
598
+ report.append("")
599
+
600
+ # Summary table
601
+ report.append("## Summary")
602
+ report.append("")
603
+ report.append("| Metric | Value |")
604
+ report.append("|--------|-------|")
605
+ report.append(f"| Files Analyzed | {result.total_files_analyzed} |")
606
+ report.append(f"| Total Issues | {result.summary.get('total_issues', 0)} |")
607
+ report.append(
608
+ f"| Migration Complexity | {result.migration_complexity.title()} |"
609
+ )
610
+ report.append(f"| Estimated Effort | {result.estimated_effort_days} days |")
611
+ report.append("")
612
+
613
+ # Issue breakdown
614
+ report.append("## Issue Breakdown")
615
+ report.append("")
616
+ report.append("| Severity | Count |")
617
+ report.append("|----------|-------|")
618
+ report.append(f"| Critical | {result.summary.get('critical_issues', 0)} |")
619
+ report.append(f"| High | {result.summary.get('high_issues', 0)} |")
620
+ report.append(f"| Medium | {result.summary.get('medium_issues', 0)} |")
621
+ report.append(f"| Low | {result.summary.get('low_issues', 0)} |")
622
+ report.append("")
623
+
624
+ # Detailed issues
625
+ if result.issues:
626
+ report.append("## Detailed Issues")
627
+ report.append("")
628
+
629
+ for severity in [
630
+ IssueSeverity.CRITICAL,
631
+ IssueSeverity.HIGH,
632
+ IssueSeverity.MEDIUM,
633
+ IssueSeverity.LOW,
634
+ ]:
635
+ severity_issues = [i for i in result.issues if i.severity == severity]
636
+ if severity_issues:
637
+ report.append(f"### {severity.value.title()} Issues")
638
+ report.append("")
639
+
640
+ for issue in severity_issues:
641
+ report.append(f"**{issue.description}**")
642
+ report.append("")
643
+ report.append(
644
+ f"- **File:** `{issue.file_path}:{issue.line_number}`"
645
+ )
646
+ if issue.code_snippet:
647
+ report.append(f"- **Code:** `{issue.code_snippet}`")
648
+ report.append(f"- **Recommendation:** {issue.recommendation}")
649
+ report.append(f"- **Effort:** {issue.migration_effort}")
650
+ if issue.automated_fix:
651
+ report.append("- **Automated Fix:** Available")
652
+ report.append("")
653
+
654
+ # Enterprise opportunities
655
+ if result.enterprise_opportunities:
656
+ report.append("## Enterprise Upgrade Opportunities")
657
+ report.append("")
658
+ for opportunity in result.enterprise_opportunities:
659
+ report.append(f"- {opportunity}")
660
+ report.append("")
661
+
662
+ return "\n".join(report)