runbooks 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. runbooks/cloudops/models.py +20 -14
  2. runbooks/common/aws_pricing_api.py +276 -44
  3. runbooks/common/dry_run_examples.py +587 -0
  4. runbooks/common/dry_run_framework.py +520 -0
  5. runbooks/common/memory_optimization.py +533 -0
  6. runbooks/common/performance_optimization_engine.py +1153 -0
  7. runbooks/common/profile_utils.py +10 -3
  8. runbooks/common/sre_performance_suite.py +574 -0
  9. runbooks/finops/business_case_config.py +314 -0
  10. runbooks/finops/cost_processor.py +19 -4
  11. runbooks/finops/ebs_cost_optimizer.py +1 -1
  12. runbooks/finops/embedded_mcp_validator.py +642 -36
  13. runbooks/finops/executive_export.py +789 -0
  14. runbooks/finops/finops_scenarios.py +34 -27
  15. runbooks/finops/notebook_utils.py +1 -1
  16. runbooks/finops/schemas.py +73 -58
  17. runbooks/finops/single_dashboard.py +20 -4
  18. runbooks/finops/vpc_cleanup_exporter.py +2 -1
  19. runbooks/inventory/models/account.py +5 -3
  20. runbooks/inventory/models/inventory.py +1 -1
  21. runbooks/inventory/models/resource.py +5 -3
  22. runbooks/inventory/organizations_discovery.py +89 -5
  23. runbooks/main.py +182 -61
  24. runbooks/operate/vpc_operations.py +60 -31
  25. runbooks/remediation/workspaces_list.py +2 -2
  26. runbooks/vpc/config.py +17 -8
  27. runbooks/vpc/heatmap_engine.py +425 -53
  28. runbooks/vpc/performance_optimized_analyzer.py +546 -0
  29. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/METADATA +1 -1
  30. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/RECORD +34 -26
  31. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/WHEEL +0 -0
  32. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/entry_points.txt +0 -0
  33. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/licenses/LICENSE +0 -0
  34. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/top_level.txt +0 -0
@@ -20,7 +20,7 @@ Version: 1.0.0 - Universal Compatibility
20
20
 
21
21
  import os
22
22
  import time
23
- from typing import Dict, Optional
23
+ from typing import Dict, Optional, Union, List, Tuple
24
24
 
25
25
  import boto3
26
26
 
@@ -32,7 +32,7 @@ _cache_timestamp = None
32
32
  _cache_ttl = 300 # 5 minutes cache TTL
33
33
 
34
34
 
35
- def get_profile_for_operation(operation_type: str, user_specified_profile: Optional[str] = None) -> str:
35
+ def get_profile_for_operation(operation_type: str, user_specified_profile: Optional[Union[str, Tuple[str, ...], List[str]]] = None) -> str:
36
36
  """
37
37
  Universal AWS profile selection that works with ANY AWS setup.
38
38
 
@@ -45,7 +45,7 @@ def get_profile_for_operation(operation_type: str, user_specified_profile: Optio
45
45
 
46
46
  Args:
47
47
  operation_type: Type of operation (informational only, not used for profile selection)
48
- user_specified_profile: Profile specified by user via --profile parameter
48
+ user_specified_profile: Profile specified by user via --profile parameter (handles both str and tuple)
49
49
 
50
50
  Returns:
51
51
  str: Profile name to use for the operation
@@ -53,6 +53,13 @@ def get_profile_for_operation(operation_type: str, user_specified_profile: Optio
53
53
  Raises:
54
54
  SystemExit: If user-specified profile not found in AWS config
55
55
  """
56
+ # SAFETY NET: Handle tuple profiles (Click multiple=True parameter issue)
57
+ # This prevents errors like: Profile '('profile-name',)' not found
58
+ if isinstance(user_specified_profile, (tuple, list)) and user_specified_profile:
59
+ user_specified_profile = user_specified_profile[0] # Take first profile from tuple/list
60
+ elif isinstance(user_specified_profile, (tuple, list)) and not user_specified_profile:
61
+ user_specified_profile = None # Empty tuple/list becomes None
62
+
56
63
  global _profile_cache, _cache_timestamp
57
64
 
58
65
  # Check cache first to reduce duplicate calls (performance optimization)
@@ -0,0 +1,574 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SRE Performance Optimization Suite - Comprehensive Performance Enhancement
4
+
5
+ 🎯 Enterprise SRE Automation Specialist Implementation
6
+ Following proven systematic delegation patterns for production reliability optimization.
7
+
8
+ This suite integrates all performance optimizations identified from PDCA analysis:
9
+
10
+ CRITICAL PERFORMANCE BOTTLENECKS ADDRESSED:
11
+ 1. Organization Discovery Performance: 52.3s -> <30s target
12
+ 2. VPC Analysis Timeout Issues: Network operations optimization
13
+ 3. Memory Usage Optimization: Large-scale operation memory management
14
+ 4. Concurrent Processing: Multi-account parallel processing with rate limiting
15
+
16
+ ENTERPRISE FEATURES:
17
+ - Unified performance monitoring dashboard
18
+ - Intelligent caching with TTL management
19
+ - Connection pooling for AWS API calls
20
+ - Memory-efficient batch processing
21
+ - Progress indicators for long-running operations
22
+ - Automatic retry with exponential backoff
23
+ - Performance degradation detection and alerting
24
+ - Comprehensive metrics collection and reporting
25
+ """
26
+
27
+ import asyncio
28
+ import logging
29
+ from contextlib import asynccontextmanager, contextmanager
30
+ from dataclasses import dataclass, field
31
+ from datetime import datetime, timedelta, timezone
32
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
33
+ import time
34
+
35
+ from rich.console import Console
36
+ from rich.panel import Panel
37
+ from rich.table import Table
38
+ from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn
39
+ from rich.layout import Layout
40
+ from rich.live import Live
41
+
42
+ from runbooks.common.rich_utils import (
43
+ console,
44
+ print_header,
45
+ print_success,
46
+ print_warning,
47
+ print_error,
48
+ create_table,
49
+ STATUS_INDICATORS
50
+ )
51
+
52
+ from runbooks.common.performance_optimization_engine import (
53
+ PerformanceOptimizationEngine,
54
+ OptimizationMetrics,
55
+ get_optimization_engine
56
+ )
57
+
58
+ from runbooks.common.memory_optimization import (
59
+ MemoryOptimizer,
60
+ get_memory_optimizer
61
+ )
62
+
63
+ from runbooks.common.performance_monitor import (
64
+ PerformanceBenchmark,
65
+ get_performance_benchmark
66
+ )
67
+
68
+ logger = logging.getLogger(__name__)
69
+
70
+
71
+ @dataclass
72
+ class SREPerformanceMetrics:
73
+ """Comprehensive SRE performance metrics"""
74
+ operation_name: str
75
+ start_time: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
76
+ end_time: Optional[datetime] = None
77
+ total_duration_seconds: float = 0.0
78
+ target_duration_seconds: float = 30.0
79
+
80
+ # Performance optimization metrics
81
+ optimization_metrics: Optional[OptimizationMetrics] = None
82
+ memory_peak_mb: float = 0.0
83
+ memory_saved_mb: float = 0.0
84
+
85
+ # Infrastructure metrics
86
+ aws_api_calls: int = 0
87
+ cache_hits: int = 0
88
+ cache_misses: int = 0
89
+ parallel_workers_used: int = 0
90
+
91
+ # Success metrics
92
+ success: bool = False
93
+ error_message: Optional[str] = None
94
+ performance_grade: str = "F"
95
+ optimizations_applied: List[str] = field(default_factory=list)
96
+
97
+ def calculate_performance_improvement(self) -> float:
98
+ """Calculate performance improvement percentage"""
99
+ if self.target_duration_seconds <= 0 or self.total_duration_seconds <= 0:
100
+ return 0.0
101
+ return max(0, (self.target_duration_seconds - self.total_duration_seconds) / self.target_duration_seconds * 100)
102
+
103
+ def get_cache_efficiency(self) -> float:
104
+ """Calculate cache efficiency percentage"""
105
+ total = self.cache_hits + self.cache_misses
106
+ return (self.cache_hits / total * 100) if total > 0 else 0.0
107
+
108
+
109
+ class SREPerformanceSuite:
110
+ """
111
+ Comprehensive SRE performance optimization suite
112
+
113
+ Integrates all performance optimization components:
114
+ - Performance optimization engine for AWS API optimization
115
+ - Memory optimization for large-scale operations
116
+ - Performance monitoring and benchmarking
117
+ - Real-time performance dashboard
118
+ - Automated performance reporting
119
+ """
120
+
121
+ def __init__(self,
122
+ max_workers: int = 20,
123
+ memory_limit_mb: int = 2048,
124
+ cache_ttl_minutes: int = 30,
125
+ performance_target_seconds: float = 30.0):
126
+ """
127
+ Initialize comprehensive SRE performance suite
128
+
129
+ Args:
130
+ max_workers: Maximum concurrent workers for parallel operations
131
+ memory_limit_mb: Memory usage limit in MB
132
+ cache_ttl_minutes: Cache TTL in minutes
133
+ performance_target_seconds: Default performance target in seconds
134
+ """
135
+ self.max_workers = max_workers
136
+ self.memory_limit_mb = memory_limit_mb
137
+ self.cache_ttl_minutes = cache_ttl_minutes
138
+ self.performance_target_seconds = performance_target_seconds
139
+
140
+ # Initialize optimization components
141
+ self.optimization_engine = get_optimization_engine(
142
+ max_workers=max_workers,
143
+ cache_ttl_minutes=cache_ttl_minutes,
144
+ memory_limit_mb=memory_limit_mb
145
+ )
146
+
147
+ self.memory_optimizer = get_memory_optimizer(
148
+ warning_threshold_mb=memory_limit_mb * 0.7,
149
+ critical_threshold_mb=memory_limit_mb * 0.9
150
+ )
151
+
152
+ # Performance tracking
153
+ self.performance_metrics: List[SREPerformanceMetrics] = []
154
+ self.current_operation: Optional[SREPerformanceMetrics] = None
155
+
156
+ @contextmanager
157
+ def optimized_operation(self,
158
+ operation_name: str,
159
+ target_seconds: Optional[float] = None,
160
+ enable_memory_monitoring: bool = True,
161
+ enable_caching: bool = True):
162
+ """
163
+ Context manager for comprehensive SRE-optimized operations
164
+
165
+ Integrates:
166
+ - Performance optimization engine
167
+ - Memory optimization and monitoring
168
+ - Performance benchmarking
169
+ - Real-time metrics collection
170
+ """
171
+ target = target_seconds or self.performance_target_seconds
172
+
173
+ # Initialize comprehensive metrics
174
+ metrics = SREPerformanceMetrics(
175
+ operation_name=operation_name,
176
+ target_duration_seconds=target
177
+ )
178
+ self.current_operation = metrics
179
+
180
+ # Start all optimization components
181
+ with self.optimization_engine.optimize_operation(operation_name, target) as opt_metrics:
182
+ with self.memory_optimizer.optimize_memory_usage(operation_name, enable_memory_monitoring) as mem_metrics:
183
+
184
+ # Start performance benchmark
185
+ benchmark = get_performance_benchmark("sre_suite")
186
+
187
+ with benchmark.measure_operation(operation_name, show_progress=True) as perf_metrics:
188
+ try:
189
+ console.log(f"[cyan]🚀 SRE-optimized operation: {operation_name} (target: {target}s)[/cyan]")
190
+
191
+ yield metrics
192
+
193
+ # Operation succeeded - collect metrics
194
+ metrics.end_time = datetime.now(timezone.utc)
195
+ metrics.total_duration_seconds = (metrics.end_time - metrics.start_time).total_seconds()
196
+ metrics.success = True
197
+
198
+ # Collect optimization metrics
199
+ metrics.optimization_metrics = opt_metrics
200
+ metrics.memory_peak_mb = mem_metrics.memory_peak_mb
201
+ metrics.memory_saved_mb = mem_metrics.memory_saved_mb
202
+ metrics.optimizations_applied = list(set(
203
+ opt_metrics.optimization_applied + mem_metrics.optimization_techniques_applied
204
+ ))
205
+
206
+ # Calculate performance grade
207
+ improvement = metrics.calculate_performance_improvement()
208
+ if improvement >= 20:
209
+ metrics.performance_grade = "A"
210
+ elif improvement >= 10:
211
+ metrics.performance_grade = "B"
212
+ elif metrics.total_duration_seconds <= target:
213
+ metrics.performance_grade = "C"
214
+ else:
215
+ metrics.performance_grade = "D"
216
+
217
+ self._log_comprehensive_results(metrics)
218
+
219
+ except Exception as e:
220
+ # Handle operation failure
221
+ metrics.end_time = datetime.now(timezone.utc)
222
+ metrics.total_duration_seconds = (metrics.end_time - metrics.start_time).total_seconds()
223
+ metrics.success = False
224
+ metrics.error_message = str(e)
225
+ metrics.performance_grade = "F"
226
+
227
+ print_error(f"SRE-optimized operation failed: {operation_name}", e)
228
+ raise
229
+
230
+ finally:
231
+ # Store metrics and cleanup
232
+ self.performance_metrics.append(metrics)
233
+ self.current_operation = None
234
+
235
+ async def optimize_organization_discovery(self,
236
+ management_profile: str,
237
+ target_seconds: float = 30.0) -> Dict[str, Any]:
238
+ """
239
+ Optimize organization discovery with comprehensive SRE patterns
240
+
241
+ Addresses: Organization Discovery Performance (52.3s -> <30s target)
242
+ """
243
+ with self.optimized_operation("organization_discovery_optimization", target_seconds):
244
+
245
+ # Use optimized discovery function from performance engine
246
+ optimized_discovery = self.optimization_engine.optimize_organization_discovery(
247
+ management_profile=management_profile,
248
+ use_parallel_processing=True,
249
+ batch_size=20
250
+ )
251
+
252
+ # Execute optimized discovery
253
+ result = optimized_discovery()
254
+
255
+ # Update metrics
256
+ if self.current_operation:
257
+ self.current_operation.aws_api_calls = result.get('api_calls', 0)
258
+ self.current_operation.cache_hits = self.optimization_engine.cache.hits
259
+ self.current_operation.cache_misses = self.optimization_engine.cache.misses
260
+
261
+ return {
262
+ 'discovery_result': result,
263
+ 'performance_metrics': self.current_operation,
264
+ 'optimization_summary': {
265
+ 'target_achieved': self.current_operation.total_duration_seconds <= target_seconds,
266
+ 'performance_improvement': self.current_operation.calculate_performance_improvement(),
267
+ 'optimizations_applied': self.current_operation.optimizations_applied
268
+ }
269
+ }
270
+
271
+ async def optimize_vpc_analysis(self,
272
+ operational_profile: str,
273
+ regions: Optional[List[str]] = None,
274
+ target_seconds: float = 180.0) -> Dict[str, Any]:
275
+ """
276
+ Optimize VPC analysis with comprehensive SRE patterns
277
+
278
+ Addresses: VPC Analysis Timeout Issues
279
+ """
280
+ with self.optimized_operation("vpc_analysis_optimization", target_seconds):
281
+
282
+ # Import and use the optimized VPC analyzer
283
+ from runbooks.vpc.performance_optimized_analyzer import create_optimized_vpc_analyzer
284
+
285
+ analyzer = create_optimized_vpc_analyzer(
286
+ operational_profile=operational_profile,
287
+ max_workers=min(self.max_workers, 15) # Limit workers for VPC analysis
288
+ )
289
+
290
+ # Execute optimized global VPC analysis
291
+ result = await analyzer.analyze_vpcs_globally(
292
+ regions=regions,
293
+ include_detailed_analysis=True
294
+ )
295
+
296
+ # Update metrics
297
+ if self.current_operation:
298
+ analysis_summary = result.get('analysis_summary', {})
299
+ perf_metrics = result.get('performance_metrics', {})
300
+
301
+ self.current_operation.aws_api_calls = perf_metrics.get('total_api_calls', 0)
302
+ self.current_operation.parallel_workers_used = self.max_workers
303
+
304
+ return {
305
+ 'vpc_analysis_result': result,
306
+ 'performance_metrics': self.current_operation,
307
+ 'optimization_summary': {
308
+ 'target_achieved': self.current_operation.total_duration_seconds <= target_seconds,
309
+ 'regions_analyzed': result.get('analysis_summary', {}).get('total_regions_analyzed', 0),
310
+ 'vpcs_discovered': result.get('analysis_summary', {}).get('total_vpcs_discovered', 0),
311
+ 'performance_grade': result.get('analysis_summary', {}).get('performance_grade', 'N/A')
312
+ }
313
+ }
314
+
315
+ def create_performance_dashboard(self) -> None:
316
+ """Create comprehensive SRE performance dashboard"""
317
+ print_header("SRE Performance Optimization Dashboard", "Enterprise Performance Suite")
318
+
319
+ if not self.performance_metrics:
320
+ console.print("[yellow]No performance metrics available yet[/yellow]")
321
+ return
322
+
323
+ # Performance summary table
324
+ self._create_performance_summary_table()
325
+
326
+ # Optimization details table
327
+ self._create_optimization_details_table()
328
+
329
+ # System resource status
330
+ self._create_resource_status_panel()
331
+
332
+ # Performance recommendations
333
+ self._create_performance_recommendations()
334
+
335
+ def _create_performance_summary_table(self):
336
+ """Create performance summary table"""
337
+ table = create_table(
338
+ title="SRE Performance Summary",
339
+ columns=[
340
+ {"name": "Operation", "style": "cyan", "justify": "left"},
341
+ {"name": "Duration", "style": "white", "justify": "right"},
342
+ {"name": "Target", "style": "white", "justify": "right"},
343
+ {"name": "Grade", "style": "white", "justify": "center"},
344
+ {"name": "Improvement", "style": "green", "justify": "right"},
345
+ {"name": "Memory (MB)", "style": "blue", "justify": "right"},
346
+ {"name": "API Calls", "style": "yellow", "justify": "right"},
347
+ {"name": "Status", "style": "white", "justify": "center"}
348
+ ]
349
+ )
350
+
351
+ for metrics in self.performance_metrics:
352
+ improvement = metrics.calculate_performance_improvement()
353
+ status_icon = STATUS_INDICATORS['success'] if metrics.success else STATUS_INDICATORS['error']
354
+ status_color = 'green' if metrics.success else 'red'
355
+
356
+ # Grade color coding
357
+ grade_colors = {'A': 'green', 'B': 'green', 'C': 'yellow', 'D': 'red', 'F': 'red'}
358
+ grade_color = grade_colors.get(metrics.performance_grade, 'white')
359
+
360
+ table.add_row(
361
+ metrics.operation_name,
362
+ f"{metrics.total_duration_seconds:.1f}s",
363
+ f"{metrics.target_duration_seconds:.1f}s",
364
+ f"[{grade_color}]{metrics.performance_grade}[/{grade_color}]",
365
+ f"+{improvement:.1f}%" if improvement > 0 else f"{improvement:.1f}%",
366
+ f"{metrics.memory_peak_mb:.1f}",
367
+ str(metrics.aws_api_calls),
368
+ f"[{status_color}]{status_icon}[/{status_color}]"
369
+ )
370
+
371
+ console.print(table)
372
+
373
+ def _create_optimization_details_table(self):
374
+ """Create optimization details table"""
375
+ table = create_table(
376
+ title="Optimization Techniques Applied",
377
+ columns=[
378
+ {"name": "Operation", "style": "cyan", "justify": "left"},
379
+ {"name": "Cache Efficiency", "style": "blue", "justify": "right"},
380
+ {"name": "Memory Saved", "style": "green", "justify": "right"},
381
+ {"name": "Workers Used", "style": "yellow", "justify": "right"},
382
+ {"name": "Optimizations", "style": "dim", "justify": "left", "max_width": 40}
383
+ ]
384
+ )
385
+
386
+ for metrics in self.performance_metrics:
387
+ cache_efficiency = metrics.get_cache_efficiency()
388
+ memory_saved = f"+{metrics.memory_saved_mb:.1f}MB" if metrics.memory_saved_mb > 0 else "0MB"
389
+
390
+ table.add_row(
391
+ metrics.operation_name,
392
+ f"{cache_efficiency:.1f}%",
393
+ memory_saved,
394
+ str(metrics.parallel_workers_used),
395
+ ", ".join(metrics.optimizations_applied[:3]) + ("..." if len(metrics.optimizations_applied) > 3 else "")
396
+ )
397
+
398
+ console.print(table)
399
+
400
+ def _create_resource_status_panel(self):
401
+ """Create system resource status panel"""
402
+ # Get current resource status
403
+ memory_report = self.memory_optimizer.get_memory_usage_report()
404
+ cache_stats = self.optimization_engine.cache.get_stats()
405
+
406
+ # Status colors
407
+ memory_color = {
408
+ 'good': 'green',
409
+ 'moderate': 'yellow',
410
+ 'warning': 'yellow',
411
+ 'critical': 'red'
412
+ }.get(memory_report.get('memory_status', 'good'), 'white')
413
+
414
+ status_text = f"""
415
+ [bold cyan]💾 Memory Status:[/bold cyan] [{memory_color}]{memory_report['memory_status'].upper()}[/{memory_color}] ({memory_report['current_memory_mb']:.1f}MB / {memory_report['critical_threshold_mb']:.0f}MB)
416
+
417
+ [bold blue]🗄️ Cache Performance:[/bold blue] {cache_stats['hit_rate']:.1f}% hit rate ({cache_stats['hits']} hits, {cache_stats['misses']} misses)
418
+
419
+ [bold yellow]🔧 System Resources:[/bold yellow] {cache_stats['size']}/{cache_stats['max_size']} cache entries, {memory_report['active_objects']:,} active objects
420
+ """
421
+
422
+ console.print(Panel(
423
+ status_text.strip(),
424
+ title="[bold]System Resource Status[/bold]",
425
+ border_style=memory_color
426
+ ))
427
+
428
+ def _create_performance_recommendations(self):
429
+ """Create performance optimization recommendations"""
430
+ recommendations = []
431
+
432
+ # Analyze recent performance metrics
433
+ if self.performance_metrics:
434
+ recent_metrics = self.performance_metrics[-5:] # Last 5 operations
435
+
436
+ # Check for performance issues
437
+ slow_operations = [m for m in recent_metrics if m.total_duration_seconds > m.target_duration_seconds]
438
+ if slow_operations:
439
+ recommendations.append(f"🐌 {len(slow_operations)} operations exceeded target duration")
440
+
441
+ # Check memory usage
442
+ high_memory_ops = [m for m in recent_metrics if m.memory_peak_mb > self.memory_limit_mb * 0.8]
443
+ if high_memory_ops:
444
+ recommendations.append(f"🧠 {len(high_memory_ops)} operations had high memory usage")
445
+
446
+ # Check cache efficiency
447
+ low_cache_ops = [m for m in recent_metrics if m.get_cache_efficiency() < 50]
448
+ if low_cache_ops:
449
+ recommendations.append(f"📦 {len(low_cache_ops)} operations had low cache efficiency")
450
+
451
+ # System-level recommendations
452
+ memory_report = self.memory_optimizer.get_memory_usage_report()
453
+ recommendations.extend(memory_report.get('optimization_recommendations', []))
454
+
455
+ if recommendations:
456
+ recommendations_text = "\n".join(f"• {rec}" for rec in recommendations[:8])
457
+ console.print(Panel(
458
+ recommendations_text,
459
+ title="[bold yellow]Performance Recommendations[/bold yellow]",
460
+ border_style="yellow"
461
+ ))
462
+ else:
463
+ console.print(Panel(
464
+ "[green]✅ System performance is optimal - no recommendations at this time[/green]",
465
+ title="[bold green]Performance Status[/bold green]",
466
+ border_style="green"
467
+ ))
468
+
469
+ def _log_comprehensive_results(self, metrics: SREPerformanceMetrics):
470
+ """Log comprehensive SRE optimization results"""
471
+ improvement = metrics.calculate_performance_improvement()
472
+ cache_efficiency = metrics.get_cache_efficiency()
473
+
474
+ if metrics.success:
475
+ if improvement > 0:
476
+ print_success(
477
+ f"SRE optimization completed: {metrics.operation_name} "
478
+ f"({metrics.total_duration_seconds:.1f}s, {improvement:+.1f}% improvement, Grade: {metrics.performance_grade})"
479
+ )
480
+ else:
481
+ console.log(
482
+ f"[green]SRE operation completed: {metrics.operation_name} "
483
+ f"({metrics.total_duration_seconds:.1f}s, Grade: {metrics.performance_grade})[/green]"
484
+ )
485
+
486
+ # Log optimization details
487
+ if metrics.optimizations_applied:
488
+ console.log(f"[dim]🔧 Optimizations: {', '.join(metrics.optimizations_applied)}[/dim]")
489
+
490
+ if cache_efficiency > 0:
491
+ console.log(f"[dim]📦 Cache: {cache_efficiency:.1f}% efficiency ({metrics.cache_hits} hits)[/dim]")
492
+
493
+ if metrics.memory_saved_mb > 0:
494
+ console.log(f"[dim]🧠 Memory: {metrics.memory_saved_mb:.1f}MB saved (peak: {metrics.memory_peak_mb:.1f}MB)[/dim]")
495
+
496
+ def get_performance_summary(self) -> Dict[str, Any]:
497
+ """Get comprehensive performance summary"""
498
+ if not self.performance_metrics:
499
+ return {"status": "no_data", "message": "No performance metrics available"}
500
+
501
+ # Calculate aggregate statistics
502
+ successful_ops = [m for m in self.performance_metrics if m.success]
503
+ total_ops = len(self.performance_metrics)
504
+
505
+ avg_duration = sum(m.total_duration_seconds for m in successful_ops) / len(successful_ops) if successful_ops else 0
506
+ avg_improvement = sum(m.calculate_performance_improvement() for m in successful_ops) / len(successful_ops) if successful_ops else 0
507
+ success_rate = len(successful_ops) / total_ops if total_ops > 0 else 0
508
+
509
+ # Performance grade distribution
510
+ grade_counts = {}
511
+ for metrics in self.performance_metrics:
512
+ grade_counts[metrics.performance_grade] = grade_counts.get(metrics.performance_grade, 0) + 1
513
+
514
+ return {
515
+ "status": "active",
516
+ "total_operations": total_ops,
517
+ "successful_operations": len(successful_ops),
518
+ "success_rate": success_rate,
519
+ "average_duration_seconds": avg_duration,
520
+ "average_improvement_percent": avg_improvement,
521
+ "performance_grade_distribution": grade_counts,
522
+ "system_resources": {
523
+ "memory_status": self.memory_optimizer.get_memory_usage_report(),
524
+ "cache_status": self.optimization_engine.cache.get_stats()
525
+ },
526
+ "recent_operations": [
527
+ {
528
+ "name": m.operation_name,
529
+ "duration": m.total_duration_seconds,
530
+ "grade": m.performance_grade,
531
+ "success": m.success
532
+ } for m in self.performance_metrics[-5:]
533
+ ]
534
+ }
535
+
536
+ def clear_performance_data(self):
537
+ """Clear all performance tracking data"""
538
+ self.performance_metrics.clear()
539
+ self.optimization_engine.clear_caches()
540
+ self.memory_optimizer.clear_optimization_data()
541
+ print_success("SRE performance data cleared")
542
+
543
+
544
+ # Global SRE performance suite instance
545
+ _sre_performance_suite: Optional[SREPerformanceSuite] = None
546
+
547
+
548
+ def get_sre_performance_suite(max_workers: int = 20,
549
+ memory_limit_mb: int = 2048,
550
+ cache_ttl_minutes: int = 30) -> SREPerformanceSuite:
551
+ """Get or create global SRE performance suite instance"""
552
+ global _sre_performance_suite
553
+ if _sre_performance_suite is None:
554
+ _sre_performance_suite = SREPerformanceSuite(
555
+ max_workers=max_workers,
556
+ memory_limit_mb=memory_limit_mb,
557
+ cache_ttl_minutes=cache_ttl_minutes
558
+ )
559
+ return _sre_performance_suite
560
+
561
+
562
+ def create_sre_performance_dashboard():
563
+ """Create comprehensive SRE performance dashboard"""
564
+ suite = get_sre_performance_suite()
565
+ suite.create_performance_dashboard()
566
+
567
+
568
+ # Export public interface
569
+ __all__ = [
570
+ "SREPerformanceSuite",
571
+ "SREPerformanceMetrics",
572
+ "get_sre_performance_suite",
573
+ "create_sre_performance_dashboard"
574
+ ]