runbooks 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. runbooks/__init__.py +10 -5
  2. runbooks/__init__.py.backup +134 -0
  3. runbooks/__init___optimized.py +110 -0
  4. runbooks/cloudops/base.py +56 -3
  5. runbooks/cloudops/cost_optimizer.py +496 -42
  6. runbooks/common/aws_pricing.py +236 -80
  7. runbooks/common/business_logic.py +485 -0
  8. runbooks/common/cli_decorators.py +219 -0
  9. runbooks/common/error_handling.py +424 -0
  10. runbooks/common/lazy_loader.py +186 -0
  11. runbooks/common/module_cli_base.py +378 -0
  12. runbooks/common/performance_monitoring.py +512 -0
  13. runbooks/common/profile_utils.py +133 -6
  14. runbooks/enterprise/logging.py +30 -2
  15. runbooks/enterprise/validation.py +177 -0
  16. runbooks/finops/README.md +311 -236
  17. runbooks/finops/aws_client.py +1 -1
  18. runbooks/finops/business_case_config.py +723 -19
  19. runbooks/finops/cli.py +136 -0
  20. runbooks/finops/commvault_ec2_analysis.py +25 -9
  21. runbooks/finops/config.py +272 -0
  22. runbooks/finops/dashboard_runner.py +136 -23
  23. runbooks/finops/ebs_cost_optimizer.py +39 -40
  24. runbooks/finops/enhanced_trend_visualization.py +7 -2
  25. runbooks/finops/enterprise_wrappers.py +45 -18
  26. runbooks/finops/finops_dashboard.py +50 -25
  27. runbooks/finops/finops_scenarios.py +22 -7
  28. runbooks/finops/helpers.py +115 -2
  29. runbooks/finops/multi_dashboard.py +7 -5
  30. runbooks/finops/optimizer.py +97 -6
  31. runbooks/finops/scenario_cli_integration.py +247 -0
  32. runbooks/finops/scenarios.py +12 -1
  33. runbooks/finops/unlimited_scenarios.py +393 -0
  34. runbooks/finops/validation_framework.py +19 -7
  35. runbooks/finops/workspaces_analyzer.py +1 -5
  36. runbooks/inventory/mcp_inventory_validator.py +2 -1
  37. runbooks/main.py +132 -94
  38. runbooks/main_final.py +358 -0
  39. runbooks/main_minimal.py +84 -0
  40. runbooks/main_optimized.py +493 -0
  41. runbooks/main_ultra_minimal.py +47 -0
  42. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/METADATA +1 -1
  43. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/RECORD +47 -31
  44. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/WHEEL +0 -0
  45. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/entry_points.txt +0 -0
  46. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/licenses/LICENSE +0 -0
  47. {runbooks-1.0.3.dist-info → runbooks-1.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,512 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Performance Monitoring Decorators for CloudOps Runbooks - Enterprise Metrics
4
+
5
+ Provides comprehensive performance tracking and optimization guidance across
6
+ all runbooks modules with enterprise-grade monitoring capabilities.
7
+
8
+ Features:
9
+ - Execution time monitoring with module-specific targets
10
+ - Memory usage tracking and optimization recommendations
11
+ - API call rate monitoring for AWS operations
12
+ - Business value correlation with performance metrics
13
+ - Executive reporting with performance dashboards
14
+
15
+ Author: CloudOps Runbooks Team
16
+ Version: 1.0.0 - Enterprise Performance Monitoring
17
+ """
18
+
19
+ import time
20
+ import tracemalloc
21
+ from functools import wraps
22
+ from typing import Dict, Any, Optional, Callable, List
23
+ from dataclasses import dataclass, field
24
+ from datetime import datetime
25
+ import json
26
+
27
+ from runbooks.common.rich_utils import (
28
+ console, print_success, print_warning, print_info, print_error,
29
+ create_table, create_progress_bar, STATUS_INDICATORS
30
+ )
31
+
32
+
33
+ @dataclass
34
+ class PerformanceMetrics:
35
+ """Enterprise performance metrics tracking."""
36
+ operation_name: str
37
+ module_name: str
38
+ start_time: float
39
+ end_time: float
40
+ execution_time: float = 0.0
41
+ memory_peak_mb: float = 0.0
42
+ memory_current_mb: float = 0.0
43
+ api_calls_count: int = 0
44
+ success: bool = True
45
+ error_message: Optional[str] = None
46
+ target_seconds: int = 30
47
+ business_value: float = 0.0
48
+ resources_processed: int = 0
49
+
50
+ def __post_init__(self):
51
+ """Calculate derived metrics."""
52
+ self.execution_time = self.end_time - self.start_time
53
+ self.performance_ratio = self.execution_time / self.target_seconds
54
+ self.efficiency_score = min(100, (self.target_seconds / max(self.execution_time, 0.1)) * 100)
55
+
56
+ def to_dict(self) -> Dict[str, Any]:
57
+ """Convert metrics to dictionary for export."""
58
+ return {
59
+ "operation": self.operation_name,
60
+ "module": self.module_name,
61
+ "timestamp": datetime.fromtimestamp(self.start_time).isoformat(),
62
+ "execution_time_seconds": round(self.execution_time, 2),
63
+ "target_seconds": self.target_seconds,
64
+ "performance_ratio": round(self.performance_ratio, 2),
65
+ "efficiency_score": round(self.efficiency_score, 1),
66
+ "memory_peak_mb": round(self.memory_peak_mb, 2),
67
+ "memory_current_mb": round(self.memory_current_mb, 2),
68
+ "api_calls": self.api_calls_count,
69
+ "success": self.success,
70
+ "business_value": self.business_value,
71
+ "resources_processed": self.resources_processed
72
+ }
73
+
74
+
75
+ @dataclass
76
+ class ModulePerformanceTargets:
77
+ """Module-specific performance targets for enterprise operations."""
78
+ finops: int = 15 # FinOps cost analysis operations
79
+ inventory: int = 45 # Multi-account discovery operations
80
+ operate: int = 15 # Resource operations with safety validation
81
+ security: int = 45 # Comprehensive security assessments
82
+ cfat: int = 60 # Cloud foundations assessments
83
+ vpc: int = 30 # Network analysis with cost integration
84
+ remediation: int = 15 # Automated security remediation
85
+ sre: int = 30 # Site reliability engineering operations
86
+
87
+ def get_target(self, module_name: str) -> int:
88
+ """Get performance target for module."""
89
+ return getattr(self, module_name.lower(), 30) # Default 30s
90
+
91
+
92
+ # Global performance tracking
93
+ _performance_targets = ModulePerformanceTargets()
94
+ _performance_history: List[PerformanceMetrics] = []
95
+ _api_call_counter = 0
96
+
97
+
98
+ def track_api_call():
99
+ """Increment API call counter for performance monitoring."""
100
+ global _api_call_counter
101
+ _api_call_counter += 1
102
+
103
+
104
+ def reset_api_counter():
105
+ """Reset API call counter."""
106
+ global _api_call_counter
107
+ _api_call_counter = 0
108
+
109
+
110
+ def monitor_performance(module_name: str = "runbooks",
111
+ operation_name: Optional[str] = None,
112
+ target_seconds: Optional[int] = None,
113
+ track_memory: bool = True):
114
+ """
115
+ Decorator for comprehensive performance monitoring.
116
+
117
+ Monitors execution time, memory usage, and provides optimization
118
+ recommendations when operations exceed enterprise targets.
119
+
120
+ Args:
121
+ module_name: Name of the runbooks module
122
+ operation_name: Specific operation being monitored
123
+ target_seconds: Custom target time (uses module default if None)
124
+ track_memory: Enable memory usage tracking
125
+
126
+ Usage:
127
+ @monitor_performance(module_name="finops", operation_name="cost_analysis")
128
+ def analyze_costs(**kwargs):
129
+ # Your operation code here
130
+ """
131
+ def decorator(f: Callable) -> Callable:
132
+ @wraps(f)
133
+ def wrapper(*args, **kwargs):
134
+ global _api_call_counter, _performance_history
135
+
136
+ # Determine operation name and target
137
+ op_name = operation_name or f.__name__
138
+ target = target_seconds or _performance_targets.get_target(module_name)
139
+
140
+ # Initialize metrics
141
+ metrics = PerformanceMetrics(
142
+ operation_name=op_name,
143
+ module_name=module_name,
144
+ start_time=time.time(),
145
+ end_time=0.0,
146
+ target_seconds=target
147
+ )
148
+
149
+ # Start memory tracking if enabled
150
+ if track_memory:
151
+ tracemalloc.start()
152
+ start_memory = tracemalloc.get_traced_memory()[0]
153
+ else:
154
+ start_memory = 0
155
+
156
+ # Reset API counter
157
+ reset_api_counter()
158
+
159
+ try:
160
+ # Execute the function
161
+ result = f(*args, **kwargs)
162
+
163
+ # Mark as successful
164
+ metrics.success = True
165
+ metrics.end_time = time.time()
166
+
167
+ # Extract business metrics if available
168
+ if isinstance(result, dict):
169
+ metrics.business_value = result.get('annual_savings', 0.0)
170
+ metrics.resources_processed = result.get('resources_count', 0)
171
+
172
+ # Capture performance data
173
+ metrics.api_calls_count = _api_call_counter
174
+
175
+ if track_memory:
176
+ current_memory, peak_memory = tracemalloc.get_traced_memory()
177
+ metrics.memory_current_mb = (current_memory - start_memory) / 1024 / 1024
178
+ metrics.memory_peak_mb = peak_memory / 1024 / 1024
179
+ tracemalloc.stop()
180
+
181
+ # Performance feedback
182
+ _provide_performance_feedback(metrics)
183
+
184
+ # Store metrics for analysis
185
+ _performance_history.append(metrics)
186
+
187
+ return result
188
+
189
+ except Exception as e:
190
+ # Handle errors
191
+ metrics.success = False
192
+ metrics.error_message = str(e)
193
+ metrics.end_time = time.time()
194
+
195
+ if track_memory and tracemalloc.is_tracing():
196
+ current_memory, peak_memory = tracemalloc.get_traced_memory()
197
+ metrics.memory_current_mb = (current_memory - start_memory) / 1024 / 1024
198
+ metrics.memory_peak_mb = peak_memory / 1024 / 1024
199
+ tracemalloc.stop()
200
+
201
+ # Store failed metrics
202
+ _performance_history.append(metrics)
203
+
204
+ print_error(f"❌ Operation failed after {metrics.execution_time:.1f}s: {str(e)}")
205
+ raise
206
+
207
+ return wrapper
208
+ return decorator
209
+
210
+
211
+ def _provide_performance_feedback(metrics: PerformanceMetrics):
212
+ """
213
+ Provide performance feedback and optimization recommendations.
214
+
215
+ Args:
216
+ metrics: Performance metrics from operation
217
+ """
218
+ execution_time = metrics.execution_time
219
+ target = metrics.target_seconds
220
+
221
+ if execution_time <= target:
222
+ # Performance target met
223
+ print_success(
224
+ f"⚡ Performance: {execution_time:.1f}s "
225
+ f"(target: <{target}s) - {metrics.efficiency_score:.1f}% efficient"
226
+ )
227
+
228
+ # Celebrate exceptional performance
229
+ if execution_time <= target * 0.5:
230
+ print_success("🏆 Exceptional performance - well below target!")
231
+
232
+ else:
233
+ # Performance target exceeded
234
+ print_warning(
235
+ f"⚠️ Performance: {execution_time:.1f}s "
236
+ f"(exceeded {target}s target by {execution_time - target:.1f}s)"
237
+ )
238
+
239
+ # Provide optimization recommendations
240
+ _provide_optimization_recommendations(metrics)
241
+
242
+
243
+ def _provide_optimization_recommendations(metrics: PerformanceMetrics):
244
+ """
245
+ Provide specific optimization recommendations based on performance data.
246
+
247
+ Args:
248
+ metrics: Performance metrics showing degradation
249
+ """
250
+ print_info("🔧 Performance optimization suggestions:")
251
+
252
+ # Time-based recommendations
253
+ if metrics.execution_time > metrics.target_seconds * 2:
254
+ print_info(f" • Consider using --parallel for {metrics.operation_name}")
255
+ print_info(" • Try a different AWS region for better API performance")
256
+
257
+ # Memory-based recommendations
258
+ if metrics.memory_peak_mb > 200: # 200MB threshold
259
+ print_info(f" • High memory usage: {metrics.memory_peak_mb:.1f}MB")
260
+ print_info(" • Consider processing resources in smaller batches")
261
+
262
+ # API call recommendations
263
+ if metrics.api_calls_count > 100:
264
+ print_info(f" • High API call volume: {metrics.api_calls_count} calls")
265
+ print_info(" • Consider implementing result caching")
266
+ print_info(" • Check for API throttling issues")
267
+
268
+ # Module-specific recommendations
269
+ module_recommendations = {
270
+ "finops": [
271
+ "Use more specific date ranges to reduce Cost Explorer data",
272
+ "Consider account filtering for large organizations"
273
+ ],
274
+ "inventory": [
275
+ "Use service-specific discovery instead of full scan",
276
+ "Implement parallel account processing"
277
+ ],
278
+ "security": [
279
+ "Focus on high-priority security checks first",
280
+ "Use incremental scanning for large environments"
281
+ ]
282
+ }
283
+
284
+ if metrics.module_name in module_recommendations:
285
+ for rec in module_recommendations[metrics.module_name]:
286
+ print_info(f" • {rec}")
287
+
288
+
289
+ def benchmark_operation(module_name: str, operation_name: str,
290
+ target_calls: int = 10):
291
+ """
292
+ Decorator for benchmarking operation performance over multiple runs.
293
+
294
+ Args:
295
+ module_name: Module being benchmarked
296
+ operation_name: Operation being benchmarked
297
+ target_calls: Number of benchmark runs
298
+
299
+ Usage:
300
+ @benchmark_operation(module_name="finops", operation_name="cost_analysis")
301
+ def analyze_costs(**kwargs):
302
+ # Your operation code here
303
+ """
304
+ def decorator(f: Callable) -> Callable:
305
+ @wraps(f)
306
+ def wrapper(*args, **kwargs):
307
+ print_info(f"🏁 Starting benchmark: {target_calls} runs of {operation_name}")
308
+
309
+ benchmark_results = []
310
+
311
+ for run in range(target_calls):
312
+ print_info(f" Run {run + 1}/{target_calls}")
313
+
314
+ # Execute with monitoring
315
+ monitored_func = monitor_performance(
316
+ module_name=module_name,
317
+ operation_name=f"{operation_name}_benchmark_{run+1}"
318
+ )(f)
319
+
320
+ result = monitored_func(*args, **kwargs)
321
+
322
+ # Collect metrics from last run
323
+ if _performance_history:
324
+ benchmark_results.append(_performance_history[-1])
325
+
326
+ # Analyze benchmark results
327
+ _analyze_benchmark_results(benchmark_results, operation_name)
328
+
329
+ return result
330
+
331
+ return wrapper
332
+ return decorator
333
+
334
+
335
+ def _analyze_benchmark_results(results: List[PerformanceMetrics], operation_name: str):
336
+ """
337
+ Analyze and report benchmark results.
338
+
339
+ Args:
340
+ results: List of performance metrics from benchmark runs
341
+ operation_name: Name of operation benchmarked
342
+ """
343
+ if not results:
344
+ return
345
+
346
+ execution_times = [r.execution_time for r in results if r.success]
347
+
348
+ if not execution_times:
349
+ print_error("❌ No successful benchmark runs to analyze")
350
+ return
351
+
352
+ # Calculate statistics
353
+ avg_time = sum(execution_times) / len(execution_times)
354
+ min_time = min(execution_times)
355
+ max_time = max(execution_times)
356
+
357
+ # Create benchmark summary table
358
+ table = create_table(
359
+ title=f"📊 Benchmark Results: {operation_name}",
360
+ columns=[
361
+ {"name": "Metric", "style": "cyan"},
362
+ {"name": "Value", "style": "green"},
363
+ {"name": "Assessment", "style": "yellow"}
364
+ ]
365
+ )
366
+
367
+ target = results[0].target_seconds
368
+
369
+ table.add_row("Average Time", f"{avg_time:.2f}s",
370
+ "✅ Good" if avg_time <= target else "⚠️ Needs optimization")
371
+ table.add_row("Best Time", f"{min_time:.2f}s",
372
+ "🏆 Excellent" if min_time <= target * 0.5 else "✅ Good")
373
+ table.add_row("Worst Time", f"{max_time:.2f}s",
374
+ "⚠️ Investigate" if max_time > target * 1.5 else "✅ Acceptable")
375
+ table.add_row("Success Rate", f"{len(execution_times)}/{len(results)}",
376
+ "✅ Perfect" if len(execution_times) == len(results) else "⚠️ Some failures")
377
+ table.add_row("Consistency", f"±{(max_time - min_time):.2f}s",
378
+ "✅ Consistent" if (max_time - min_time) <= target * 0.2 else "⚠️ Variable")
379
+
380
+ console.print(table)
381
+
382
+
383
+ def get_performance_report(module_name: Optional[str] = None,
384
+ last_n_operations: int = 10) -> Dict[str, Any]:
385
+ """
386
+ Generate performance report for operations.
387
+
388
+ Args:
389
+ module_name: Filter by specific module (None for all)
390
+ last_n_operations: Number of recent operations to include
391
+
392
+ Returns:
393
+ Performance report dictionary
394
+ """
395
+ # Filter operations
396
+ filtered_operations = _performance_history
397
+ if module_name:
398
+ filtered_operations = [op for op in filtered_operations
399
+ if op.module_name == module_name]
400
+
401
+ # Get recent operations
402
+ recent_operations = filtered_operations[-last_n_operations:]
403
+
404
+ if not recent_operations:
405
+ return {"message": "No performance data available"}
406
+
407
+ # Calculate summary statistics
408
+ successful_ops = [op for op in recent_operations if op.success]
409
+ failed_ops = [op for op in recent_operations if not op.success]
410
+
411
+ avg_time = sum(op.execution_time for op in successful_ops) / len(successful_ops) if successful_ops else 0
412
+ avg_efficiency = sum(op.efficiency_score for op in successful_ops) / len(successful_ops) if successful_ops else 0
413
+
414
+ report = {
415
+ "summary": {
416
+ "total_operations": len(recent_operations),
417
+ "successful_operations": len(successful_ops),
418
+ "failed_operations": len(failed_ops),
419
+ "success_rate_percent": (len(successful_ops) / len(recent_operations)) * 100,
420
+ "average_execution_time": round(avg_time, 2),
421
+ "average_efficiency_score": round(avg_efficiency, 1)
422
+ },
423
+ "operations": [op.to_dict() for op in recent_operations],
424
+ "recommendations": _generate_performance_recommendations(recent_operations)
425
+ }
426
+
427
+ return report
428
+
429
+
430
+ def _generate_performance_recommendations(operations: List[PerformanceMetrics]) -> List[str]:
431
+ """Generate performance recommendations based on operation history."""
432
+ recommendations = []
433
+
434
+ if not operations:
435
+ return recommendations
436
+
437
+ # Analyze patterns
438
+ slow_operations = [op for op in operations
439
+ if op.success and op.performance_ratio > 1.5]
440
+ high_memory_operations = [op for op in operations
441
+ if op.memory_peak_mb > 150]
442
+ high_api_operations = [op for op in operations
443
+ if op.api_calls_count > 50]
444
+
445
+ if slow_operations:
446
+ recommendations.append(
447
+ f"⚠️ {len(slow_operations)} operations exceeded target by >50% - consider optimization"
448
+ )
449
+
450
+ if high_memory_operations:
451
+ recommendations.append(
452
+ f"🧠 {len(high_memory_operations)} operations used >150MB memory - consider batch processing"
453
+ )
454
+
455
+ if high_api_operations:
456
+ recommendations.append(
457
+ f"🔄 {len(high_api_operations)} operations made >50 API calls - consider caching"
458
+ )
459
+
460
+ # Success rate recommendations
461
+ failed_ops = [op for op in operations if not op.success]
462
+ if len(failed_ops) > len(operations) * 0.1: # >10% failure rate
463
+ recommendations.append(
464
+ "❌ High failure rate detected - review error handling and retry logic"
465
+ )
466
+
467
+ return recommendations
468
+
469
+
470
+ def clear_performance_history():
471
+ """Clear performance history for fresh tracking."""
472
+ global _performance_history
473
+ _performance_history.clear()
474
+ print_info("Performance history cleared")
475
+
476
+
477
+ def export_performance_data(output_path: str = "performance_report.json") -> bool:
478
+ """
479
+ Export performance data to JSON file.
480
+
481
+ Args:
482
+ output_path: Path for output file
483
+
484
+ Returns:
485
+ True if export successful
486
+ """
487
+ try:
488
+ report = get_performance_report()
489
+
490
+ with open(output_path, 'w') as f:
491
+ json.dump(report, f, indent=2)
492
+
493
+ print_success(f"Performance data exported to {output_path}")
494
+ return True
495
+
496
+ except Exception as e:
497
+ print_error(f"Failed to export performance data: {str(e)}")
498
+ return False
499
+
500
+
501
+ # Export public interface
502
+ __all__ = [
503
+ "PerformanceMetrics",
504
+ "ModulePerformanceTargets",
505
+ "monitor_performance",
506
+ "benchmark_operation",
507
+ "track_api_call",
508
+ "reset_api_counter",
509
+ "get_performance_report",
510
+ "clear_performance_history",
511
+ "export_performance_data"
512
+ ]
@@ -4,7 +4,7 @@ Universal AWS Profile Management for CloudOps Runbooks Platform
4
4
 
5
5
  This module provides truly universal AWS profile management that works with ANY AWS setup:
6
6
  - Single account setups
7
- - Multi-account setups
7
+ - Multi-account setups
8
8
  - Any profile naming convention
9
9
  - No specific environment variable requirements
10
10
 
@@ -13,18 +13,21 @@ Features:
13
13
  - Works with ANY AWS profile names (not just specific test profiles)
14
14
  - No hardcoded environment variable assumptions
15
15
  - Simple, reliable profile selection for all users
16
+ - Enhanced profile validation with enterprise error handling
16
17
 
17
18
  Author: CloudOps Runbooks Team
18
- Version: 1.0.0 - Universal Compatibility
19
+ Version: 1.1.0 - Enhanced Profile Validation
19
20
  """
20
21
 
21
22
  import os
22
23
  import time
23
- from typing import Dict, Optional, Union, List, Tuple
24
+ from functools import wraps
25
+ from typing import Dict, Optional, Union, List, Tuple, Callable
24
26
 
25
27
  import boto3
28
+ from botocore.exceptions import ProfileNotFound, NoCredentialsError
26
29
 
27
- from runbooks.common.rich_utils import console
30
+ from runbooks.common.rich_utils import console, print_error, print_success, print_info, print_warning
28
31
 
29
32
  # Profile cache to reduce duplicate calls (performance optimization)
30
33
  _profile_cache = {}
@@ -267,14 +270,138 @@ def get_available_profiles_for_validation() -> list:
267
270
  return ['default'] # Fallback to default profile
268
271
 
269
272
 
270
- # Export all public functions
273
+ def validate_profile_access_decorator(operation_type: str = "general"):
274
+ """
275
+ Decorator to validate profile has required permissions before executing operation.
276
+
277
+ Enhances existing profile management with enterprise validation capabilities.
278
+
279
+ Args:
280
+ operation_type: Type of operation for context and recommendations
281
+
282
+ Usage:
283
+ @validate_profile_access_decorator(operation_type="finops")
284
+ def my_cost_analysis(profile=None, **kwargs):
285
+ # Your operation code here
286
+ """
287
+ def decorator(f: Callable) -> Callable:
288
+ @wraps(f)
289
+ def wrapper(*args, **kwargs):
290
+ # Extract profile from kwargs
291
+ profile = kwargs.get('profile')
292
+
293
+ # Get the profile that would be used
294
+ selected_profile = get_profile_for_operation(operation_type, profile)
295
+
296
+ # Validate profile access
297
+ try:
298
+ session = boto3.Session(profile_name=selected_profile)
299
+ sts = session.client('sts')
300
+ identity = sts.get_caller_identity()
301
+
302
+ print_success(f"Profile validation successful: {selected_profile}")
303
+ print_info(f"Account: {identity.get('Account', 'Unknown')}")
304
+
305
+ # Store validated profile in kwargs for operation
306
+ kwargs['_validated_profile'] = selected_profile
307
+ kwargs['_account_id'] = identity.get('Account')
308
+
309
+ except ProfileNotFound:
310
+ print_error(f"AWS profile not found: {selected_profile}")
311
+ print_info("Available profiles:")
312
+ for p in boto3.Session().available_profiles:
313
+ print_info(f" • {p}")
314
+ raise SystemExit(1)
315
+
316
+ except NoCredentialsError:
317
+ print_error("No AWS credentials configured")
318
+ print_info("Configure credentials with: [bold green]aws configure[/]")
319
+ print_info("Or use SSO login: [bold green]aws sso login --profile your-profile[/]")
320
+ raise SystemExit(1)
321
+
322
+ except Exception as e:
323
+ print_error(f"Profile validation failed: {selected_profile}")
324
+ print_warning(f"Error: {str(e)}")
325
+
326
+ # Provide operation-specific recommendations
327
+ if operation_type == "finops" and "Access" in str(e):
328
+ print_info("Cost operations may require billing permissions")
329
+ print_info("Try: [bold green]--profile BILLING_PROFILE[/]")
330
+ elif operation_type == "inventory" and "Access" in str(e):
331
+ print_info("Inventory operations may require organizations permissions")
332
+ print_info("Try: [bold green]--profile MANAGEMENT_PROFILE[/]")
333
+ elif operation_type == "operate" and "Access" in str(e):
334
+ print_info("Resource operations may require operational permissions")
335
+ print_info("Try: [bold green]--profile CENTRALISED_OPS_PROFILE[/]")
336
+
337
+ raise SystemExit(1)
338
+
339
+ return f(*args, **kwargs)
340
+ return wrapper
341
+ return decorator
342
+
343
+
344
+ def quick_profile_check(profile: Optional[str] = None, quiet: bool = False) -> bool:
345
+ """
346
+ Quick profile accessibility check without raising exceptions.
347
+
348
+ Args:
349
+ profile: Profile to check (None for auto-detection)
350
+ quiet: Suppress output messages
351
+
352
+ Returns:
353
+ True if profile is accessible, False otherwise
354
+ """
355
+ try:
356
+ selected_profile = get_profile_for_operation("general", profile)
357
+ session = boto3.Session(profile_name=selected_profile)
358
+ sts = session.client('sts')
359
+ sts.get_caller_identity()
360
+
361
+ if not quiet:
362
+ print_success(f"Profile {selected_profile} is accessible")
363
+ return True
364
+
365
+ except Exception as e:
366
+ if not quiet:
367
+ print_warning(f"Profile accessibility check failed: {str(e)}")
368
+ return False
369
+
370
+
371
+ def get_profile_recommendations(operation_type: str) -> List[str]:
372
+ """
373
+ Get profile recommendations for specific operation types.
374
+
375
+ Args:
376
+ operation_type: Type of operation (finops, inventory, operate, security)
377
+
378
+ Returns:
379
+ List of recommended profile names (environment variable names)
380
+ """
381
+ recommendations = {
382
+ "finops": ["BILLING_PROFILE", "MANAGEMENT_PROFILE"],
383
+ "cost": ["BILLING_PROFILE", "MANAGEMENT_PROFILE"],
384
+ "inventory": ["MANAGEMENT_PROFILE", "CENTRALISED_OPS_PROFILE"],
385
+ "operate": ["CENTRALISED_OPS_PROFILE", "MANAGEMENT_PROFILE"],
386
+ "security": ["MANAGEMENT_PROFILE", "SECURITY_PROFILE"],
387
+ "cfat": ["MANAGEMENT_PROFILE"],
388
+ "vpc": ["CENTRALISED_OPS_PROFILE", "MANAGEMENT_PROFILE"]
389
+ }
390
+
391
+ return recommendations.get(operation_type, ["AWS_PROFILE", "MANAGEMENT_PROFILE"])
392
+
393
+
394
+ # Export all public functions including new validation enhancements
271
395
  __all__ = [
272
396
  "get_profile_for_operation",
273
397
  "resolve_profile_for_operation_silent",
274
- "create_cost_session",
398
+ "create_cost_session",
275
399
  "create_management_session",
276
400
  "create_operational_session",
277
401
  "get_current_profile_info",
278
402
  "validate_profile_access",
279
403
  "get_available_profiles_for_validation",
404
+ "validate_profile_access_decorator",
405
+ "quick_profile_check",
406
+ "get_profile_recommendations",
280
407
  ]