runbooks 0.9.0__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/assessment/compliance.py +4 -1
  3. runbooks/cloudops/__init__.py +123 -0
  4. runbooks/cloudops/base.py +385 -0
  5. runbooks/cloudops/cost_optimizer.py +811 -0
  6. runbooks/cloudops/infrastructure_optimizer.py +29 -0
  7. runbooks/cloudops/interfaces.py +828 -0
  8. runbooks/cloudops/lifecycle_manager.py +29 -0
  9. runbooks/cloudops/mcp_cost_validation.py +678 -0
  10. runbooks/cloudops/models.py +251 -0
  11. runbooks/cloudops/monitoring_automation.py +29 -0
  12. runbooks/cloudops/notebook_framework.py +676 -0
  13. runbooks/cloudops/security_enforcer.py +449 -0
  14. runbooks/common/mcp_cost_explorer_integration.py +900 -0
  15. runbooks/common/mcp_integration.py +19 -10
  16. runbooks/common/rich_utils.py +1 -1
  17. runbooks/finops/README.md +31 -0
  18. runbooks/finops/cost_optimizer.py +1340 -0
  19. runbooks/finops/finops_dashboard.py +211 -5
  20. runbooks/finops/schemas.py +589 -0
  21. runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
  22. runbooks/inventory/runbooks.security.security_export.log +0 -0
  23. runbooks/main.py +525 -0
  24. runbooks/operate/ec2_operations.py +428 -0
  25. runbooks/operate/iam_operations.py +598 -3
  26. runbooks/operate/rds_operations.py +508 -0
  27. runbooks/operate/s3_operations.py +508 -0
  28. runbooks/remediation/base.py +5 -3
  29. runbooks/security/__init__.py +101 -0
  30. runbooks/security/cloudops_automation_security_validator.py +1164 -0
  31. runbooks/security/compliance_automation_engine.py +4 -4
  32. runbooks/security/enterprise_security_framework.py +4 -5
  33. runbooks/security/executive_security_dashboard.py +1247 -0
  34. runbooks/security/multi_account_security_controls.py +2254 -0
  35. runbooks/security/real_time_security_monitor.py +1196 -0
  36. runbooks/security/security_baseline_tester.py +3 -3
  37. runbooks/sre/production_monitoring_framework.py +584 -0
  38. runbooks/validation/mcp_validator.py +29 -15
  39. runbooks/vpc/networking_wrapper.py +6 -3
  40. runbooks-0.9.1.dist-info/METADATA +308 -0
  41. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/RECORD +45 -23
  42. runbooks-0.9.0.dist-info/METADATA +0 -718
  43. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/WHEEL +0 -0
  44. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/entry_points.txt +0 -0
  45. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/licenses/LICENSE +0 -0
  46. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,900 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MCP Cost Explorer Integration Module
4
+
5
+ Enterprise-grade MCP integration specifically for Cost Explorer validation
6
+ with comprehensive real-time AWS API access and cross-validation capabilities.
7
+
8
+ This module provides:
9
+ - Real-time Cost Explorer API validation
10
+ - Multi-profile cross-validation with variance analysis
11
+ - Performance benchmarking with <30s targets
12
+ - Manager's business case integration with AWSO priorities
13
+ - DoD compliance with comprehensive audit trails
14
+
15
+ Integration Points:
16
+ - Executive dashboard notebooks
17
+ - FinOps analysis modules
18
+ - CloudOps business interfaces
19
+ - Security and compliance frameworks
20
+
21
+ Author: CloudOps Runbooks Team
22
+ Version: 1.0.0
23
+ DoD Compliance: Real AWS API integration required
24
+ """
25
+
26
+ import asyncio
27
+ import json
28
+ import time
29
+ from datetime import datetime, timedelta
30
+ from pathlib import Path
31
+ from typing import Dict, List, Optional, Any, Tuple, Union
32
+ import logging
33
+
34
+ # AWS SDK
35
+ import boto3
36
+ from botocore.exceptions import ClientError, NoCredentialsError
37
+
38
+ # Rich CLI integration
39
+ from runbooks.common.rich_utils import (
40
+ console, print_header, print_success, print_error, print_warning, print_info,
41
+ create_table, create_panel, format_cost, create_progress_bar, STATUS_INDICATORS
42
+ )
43
+
44
+ # Profile management
45
+ try:
46
+ from runbooks.common.profile_utils import get_profile_for_operation
47
+ PROFILE_UTILS_AVAILABLE = True
48
+ except ImportError:
49
+ PROFILE_UTILS_AVAILABLE = False
50
+ print_warning("Profile utils not available - using direct profile handling")
51
+
52
+ # Configure logging
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ class MCPCostExplorerIntegration:
57
+ """
58
+ Comprehensive MCP Cost Explorer integration for real-time validation.
59
+
60
+ Designed for integration with existing notebooks and business interfaces,
61
+ providing seamless real-time AWS Cost Explorer validation with business
62
+ case alignment and manager priority integration.
63
+ """
64
+
65
+ def __init__(self,
66
+ billing_profile: Optional[str] = None,
67
+ management_profile: Optional[str] = None,
68
+ single_account_profile: Optional[str] = None,
69
+ tolerance_percent: float = 5.0,
70
+ performance_target_seconds: float = 30.0):
71
+ """
72
+ Initialize MCP Cost Explorer integration.
73
+
74
+ Args:
75
+ billing_profile: AWS profile for Cost Explorer access
76
+ management_profile: AWS profile for Organizations access
77
+ single_account_profile: AWS profile for single account validation
78
+ tolerance_percent: Variance tolerance for cross-validation
79
+ performance_target_seconds: Performance target for operations
80
+ """
81
+
82
+ # Profile configuration with intelligent defaults
83
+ self.billing_profile = billing_profile or "ams-admin-Billing-ReadOnlyAccess-909135376185"
84
+ self.management_profile = management_profile or "ams-admin-ReadOnlyAccess-909135376185"
85
+ self.single_account_profile = single_account_profile or "ams-shared-services-non-prod-ReadOnlyAccess-499201730520"
86
+
87
+ # Validation configuration
88
+ self.tolerance_percent = tolerance_percent
89
+ self.performance_target = performance_target_seconds
90
+
91
+ # Session management
92
+ self.sessions = {}
93
+ self.session_status = {}
94
+
95
+ # Performance tracking
96
+ self.operation_metrics = {}
97
+ self.validation_cache = {}
98
+
99
+ # Business case integration
100
+ self.manager_priorities = {
101
+ 'workspaces_cleanup': {
102
+ 'target_annual_savings': 12518,
103
+ 'priority_rank': 1,
104
+ 'confidence_required': 95
105
+ },
106
+ 'nat_gateway_optimization': {
107
+ 'completion_target_percent': 95,
108
+ 'priority_rank': 2,
109
+ 'baseline_completion': 75
110
+ },
111
+ 'rds_optimization': {
112
+ 'savings_range': {'min': 5000, 'max': 24000},
113
+ 'priority_rank': 3,
114
+ 'timeline_weeks': 12
115
+ }
116
+ }
117
+
118
+ logger.info("MCP Cost Explorer integration initialized")
119
+
120
+ async def initialize_profiles(self, user_profile_override: Optional[str] = None) -> Dict[str, Any]:
121
+ """Initialize AWS profiles with comprehensive validation."""
122
+
123
+ print_info("🔐 Initializing MCP Cost Explorer profiles...")
124
+
125
+ initialization_results = {
126
+ 'timestamp': datetime.now().isoformat(),
127
+ 'user_override': user_profile_override,
128
+ 'profiles_attempted': [],
129
+ 'profiles_successful': [],
130
+ 'profiles_failed': [],
131
+ 'session_status': {}
132
+ }
133
+
134
+ # Profile configuration with user override support
135
+ profiles_to_initialize = [
136
+ ('billing', self.billing_profile),
137
+ ('management', self.management_profile),
138
+ ('single_account', self.single_account_profile)
139
+ ]
140
+
141
+ # Apply user override if provided
142
+ if user_profile_override:
143
+ if PROFILE_UTILS_AVAILABLE:
144
+ # Use profile utils for intelligent profile resolution
145
+ profiles_to_initialize = [
146
+ ('billing', get_profile_for_operation('billing', user_profile_override)),
147
+ ('management', get_profile_for_operation('management', user_profile_override)),
148
+ ('single_account', user_profile_override)
149
+ ]
150
+ else:
151
+ # Direct override for all profile types
152
+ profiles_to_initialize = [
153
+ ('billing', user_profile_override),
154
+ ('management', user_profile_override),
155
+ ('single_account', user_profile_override)
156
+ ]
157
+
158
+ # Initialize sessions with detailed validation
159
+ profile_table = create_table(
160
+ title="MCP Profile Initialization",
161
+ columns=[
162
+ {"name": "Profile Type", "style": "bold cyan"},
163
+ {"name": "Profile Name", "style": "white"},
164
+ {"name": "Account ID", "style": "yellow"},
165
+ {"name": "Status", "style": "green"},
166
+ {"name": "Validation", "style": "magenta"}
167
+ ]
168
+ )
169
+
170
+ for profile_type, profile_name in profiles_to_initialize:
171
+ initialization_results['profiles_attempted'].append({
172
+ 'type': profile_type,
173
+ 'name': profile_name
174
+ })
175
+
176
+ try:
177
+ # Create session
178
+ session = boto3.Session(profile_name=profile_name)
179
+
180
+ # Validate credentials
181
+ sts_client = session.client('sts')
182
+ identity = sts_client.get_caller_identity()
183
+ account_id = identity['Account']
184
+
185
+ # Test Cost Explorer access for billing profile
186
+ validation_status = "✅ Basic"
187
+ if profile_type == 'billing':
188
+ try:
189
+ ce_client = session.client('ce', region_name='us-east-1')
190
+
191
+ # Quick Cost Explorer test
192
+ end_date = datetime.now().date()
193
+ start_date = end_date - timedelta(days=7)
194
+
195
+ ce_client.get_cost_and_usage(
196
+ TimePeriod={
197
+ 'Start': start_date.strftime('%Y-%m-%d'),
198
+ 'End': end_date.strftime('%Y-%m-%d')
199
+ },
200
+ Granularity='DAILY',
201
+ Metrics=['BlendedCost'],
202
+ MaxResults=5
203
+ )
204
+ validation_status = "✅ Cost Explorer"
205
+ except Exception as e:
206
+ validation_status = f"⚠️ CE Limited: {str(e)[:20]}..."
207
+
208
+ # Store successful session
209
+ self.sessions[profile_type] = session
210
+ self.session_status[profile_type] = {
211
+ 'profile_name': profile_name,
212
+ 'account_id': account_id,
213
+ 'status': 'active',
214
+ 'validated_at': datetime.now().isoformat()
215
+ }
216
+
217
+ initialization_results['profiles_successful'].append({
218
+ 'type': profile_type,
219
+ 'name': profile_name,
220
+ 'account_id': account_id
221
+ })
222
+
223
+ profile_table.add_row(
224
+ profile_type.replace('_', ' ').title(),
225
+ profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
226
+ account_id,
227
+ "✅ Active",
228
+ validation_status
229
+ )
230
+
231
+ except NoCredentialsError:
232
+ profile_table.add_row(
233
+ profile_type.replace('_', ' ').title(),
234
+ profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
235
+ "N/A",
236
+ "❌ No Credentials",
237
+ "❌ Failed"
238
+ )
239
+
240
+ initialization_results['profiles_failed'].append({
241
+ 'type': profile_type,
242
+ 'name': profile_name,
243
+ 'error': 'NoCredentialsError'
244
+ })
245
+
246
+ except ClientError as e:
247
+ error_code = e.response.get('Error', {}).get('Code', 'Unknown')
248
+ profile_table.add_row(
249
+ profile_type.replace('_', ' ').title(),
250
+ profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
251
+ "N/A",
252
+ f"❌ {error_code}",
253
+ "❌ Failed"
254
+ )
255
+
256
+ initialization_results['profiles_failed'].append({
257
+ 'type': profile_type,
258
+ 'name': profile_name,
259
+ 'error': error_code
260
+ })
261
+
262
+ except Exception as e:
263
+ profile_table.add_row(
264
+ profile_type.replace('_', ' ').title(),
265
+ profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
266
+ "N/A",
267
+ "❌ Error",
268
+ f"❌ {type(e).__name__}"
269
+ )
270
+
271
+ initialization_results['profiles_failed'].append({
272
+ 'type': profile_type,
273
+ 'name': profile_name,
274
+ 'error': str(e)
275
+ })
276
+
277
+ console.print(profile_table)
278
+
279
+ # Summary
280
+ successful_count = len(initialization_results['profiles_successful'])
281
+ total_count = len(initialization_results['profiles_attempted'])
282
+
283
+ if successful_count == total_count:
284
+ print_success(f"✅ All profiles initialized successfully: {successful_count}/{total_count}")
285
+ elif successful_count > 0:
286
+ print_warning(f"⚠️ Partial initialization: {successful_count}/{total_count} profiles successful")
287
+ else:
288
+ print_error(f"❌ Profile initialization failed: {successful_count}/{total_count} successful")
289
+
290
+ initialization_results['session_status'] = self.session_status
291
+ return initialization_results
292
+
293
+ async def validate_cost_data_with_cross_validation(self,
294
+ notebook_results: Optional[Dict] = None,
295
+ account_filter: Optional[str] = None,
296
+ analysis_days: int = 90) -> Dict[str, Any]:
297
+ """
298
+ Validate cost data with comprehensive cross-validation.
299
+
300
+ Args:
301
+ notebook_results: Existing notebook results for cross-validation
302
+ account_filter: Specific account ID to filter (for single account analysis)
303
+ analysis_days: Number of days for cost analysis
304
+
305
+ Returns:
306
+ Comprehensive validation results with business impact analysis
307
+ """
308
+
309
+ print_header("MCP Cost Explorer Cross-Validation")
310
+
311
+ validation_start = time.time()
312
+ validation_results = {
313
+ 'timestamp': datetime.now().isoformat(),
314
+ 'validation_type': 'mcp_cost_explorer_cross_validation',
315
+ 'analysis_period_days': analysis_days,
316
+ 'account_filter': account_filter,
317
+ 'cost_data': {},
318
+ 'cross_validation': {},
319
+ 'business_impact': {},
320
+ 'performance_metrics': {},
321
+ 'manager_priorities_assessment': {}
322
+ }
323
+
324
+ # Phase 1: Cost Explorer data retrieval
325
+ print_info("📊 Phase 1: Retrieving Cost Explorer data...")
326
+ cost_data = await self._retrieve_cost_explorer_data(account_filter, analysis_days)
327
+ validation_results['cost_data'] = cost_data
328
+
329
+ # Phase 2: Cross-validation with notebook results
330
+ if notebook_results:
331
+ print_info("🔍 Phase 2: Cross-validating with notebook results...")
332
+ cross_validation = await self._cross_validate_results(cost_data, notebook_results)
333
+ validation_results['cross_validation'] = cross_validation
334
+ else:
335
+ print_info("💡 Phase 2: Skipped - no notebook results provided for cross-validation")
336
+
337
+ # Phase 3: Resource discovery for business case alignment
338
+ print_info("🔧 Phase 3: Resource discovery for business case alignment...")
339
+ resource_data = await self._discover_optimization_resources(account_filter)
340
+ validation_results['resource_discovery'] = resource_data
341
+
342
+ # Phase 4: Manager's priorities assessment
343
+ print_info("💼 Phase 4: Assessing manager's AWSO priorities...")
344
+ priorities_assessment = await self._assess_manager_priorities(cost_data, resource_data)
345
+ validation_results['manager_priorities_assessment'] = priorities_assessment
346
+
347
+ # Performance metrics
348
+ total_time = time.time() - validation_start
349
+ validation_results['performance_metrics'] = {
350
+ 'total_execution_time': total_time,
351
+ 'target_time': self.performance_target,
352
+ 'performance_met': total_time <= self.performance_target,
353
+ 'performance_ratio': (total_time / self.performance_target) * 100
354
+ }
355
+
356
+ # Display comprehensive results
357
+ self._display_validation_results(validation_results)
358
+
359
+ return validation_results
360
+
361
+ async def _retrieve_cost_explorer_data(self, account_filter: Optional[str], analysis_days: int) -> Dict[str, Any]:
362
+ """Retrieve comprehensive Cost Explorer data."""
363
+
364
+ cost_data = {
365
+ 'retrieval_timestamp': datetime.now().isoformat(),
366
+ 'account_filter': account_filter,
367
+ 'analysis_days': analysis_days,
368
+ 'billing_data': {},
369
+ 'service_breakdown': {},
370
+ 'monthly_trends': {},
371
+ 'errors': []
372
+ }
373
+
374
+ if 'billing' not in self.sessions:
375
+ cost_data['errors'].append("Billing session not available")
376
+ return cost_data
377
+
378
+ try:
379
+ ce_client = self.sessions['billing'].client('ce', region_name='us-east-1')
380
+
381
+ # Calculate date range
382
+ end_date = datetime.now().date()
383
+ start_date = end_date - timedelta(days=analysis_days)
384
+
385
+ # Overall cost retrieval
386
+ with create_progress_bar() as progress:
387
+ task = progress.add_task("Retrieving Cost Explorer data...", total=100)
388
+
389
+ # Get overall costs
390
+ progress.update(task, advance=25, description="Retrieving overall costs...")
391
+
392
+ cost_params = {
393
+ 'TimePeriod': {
394
+ 'Start': start_date.strftime('%Y-%m-%d'),
395
+ 'End': end_date.strftime('%Y-%m-%d')
396
+ },
397
+ 'Granularity': 'MONTHLY',
398
+ 'Metrics': ['BlendedCost', 'UnblendedCost']
399
+ }
400
+
401
+ # Add account filter if specified
402
+ if account_filter:
403
+ cost_params['Filter'] = {
404
+ 'Dimensions': {
405
+ 'Key': 'LINKED_ACCOUNT',
406
+ 'Values': [account_filter]
407
+ }
408
+ }
409
+ cost_params['GroupBy'] = [{'Type': 'DIMENSION', 'Key': 'SERVICE'}]
410
+ else:
411
+ cost_params['GroupBy'] = [{'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'}]
412
+
413
+ cost_response = ce_client.get_cost_and_usage(**cost_params)
414
+
415
+ progress.update(task, advance=50, description="Processing cost data...")
416
+
417
+ # Process cost data
418
+ total_cost = 0.0
419
+ service_costs = {}
420
+ account_costs = {}
421
+
422
+ for result in cost_response.get('ResultsByTime', []):
423
+ result_date = result['TimePeriod']['Start']
424
+
425
+ if 'Groups' in result:
426
+ # Process grouped data
427
+ for group in result['Groups']:
428
+ key = group['Keys'][0]
429
+ blended_cost = float(group['Metrics']['BlendedCost']['Amount'])
430
+ total_cost += blended_cost
431
+
432
+ if account_filter: # Service breakdown for single account
433
+ service_costs[key] = service_costs.get(key, 0) + blended_cost
434
+ else: # Account breakdown for multi-account
435
+ account_costs[key] = account_costs.get(key, 0) + blended_cost
436
+ else:
437
+ # Process total data
438
+ blended_cost = float(result['Total']['BlendedCost']['Amount'])
439
+ total_cost += blended_cost
440
+
441
+ progress.update(task, advance=25, description="Finalizing data analysis...")
442
+
443
+ # Store processed data
444
+ cost_data['billing_data'] = {
445
+ 'total_cost': total_cost,
446
+ 'average_monthly_cost': total_cost / max(1, analysis_days / 30),
447
+ 'analysis_period': {
448
+ 'start_date': start_date.strftime('%Y-%m-%d'),
449
+ 'end_date': end_date.strftime('%Y-%m-%d'),
450
+ 'days': analysis_days
451
+ }
452
+ }
453
+
454
+ if account_filter:
455
+ cost_data['service_breakdown'] = dict(sorted(service_costs.items(), key=lambda x: x[1], reverse=True))
456
+ else:
457
+ cost_data['account_breakdown'] = dict(sorted(account_costs.items(), key=lambda x: x[1], reverse=True))
458
+
459
+ progress.update(task, completed=100)
460
+
461
+ except Exception as e:
462
+ cost_data['errors'].append({
463
+ 'error_type': type(e).__name__,
464
+ 'error_message': str(e),
465
+ 'timestamp': datetime.now().isoformat()
466
+ })
467
+ logger.error(f"Cost Explorer data retrieval error: {e}")
468
+
469
+ return cost_data
470
+
471
+ async def _cross_validate_results(self, cost_data: Dict, notebook_results: Dict) -> Dict[str, Any]:
472
+ """Cross-validate Cost Explorer data with notebook results."""
473
+
474
+ cross_validation = {
475
+ 'validation_timestamp': datetime.now().isoformat(),
476
+ 'tolerance_threshold': self.tolerance_percent,
477
+ 'validations': [],
478
+ 'overall_status': 'unknown'
479
+ }
480
+
481
+ # Extract cost figures for comparison
482
+ ce_total = cost_data.get('billing_data', {}).get('average_monthly_cost', 0)
483
+
484
+ # Try multiple notebook result formats
485
+ notebook_total = 0.0
486
+ if 'cost_trends' in notebook_results:
487
+ notebook_total = notebook_results['cost_trends'].get('total_monthly_spend', 0)
488
+ elif 'monthly_savings' in notebook_results:
489
+ # Business result format
490
+ current_spend = getattr(notebook_results, 'current_monthly_spend', 0)
491
+ if hasattr(notebook_results, 'current_monthly_spend'):
492
+ notebook_total = current_spend
493
+ elif isinstance(notebook_results, dict) and 'total_cost' in notebook_results:
494
+ notebook_total = notebook_results['total_cost']
495
+
496
+ # Perform variance analysis
497
+ if ce_total > 0 and notebook_total > 0:
498
+ variance_amount = abs(ce_total - notebook_total)
499
+ variance_percent = (variance_amount / ce_total) * 100
500
+
501
+ validation = {
502
+ 'validation_item': 'monthly_cost_consistency',
503
+ 'cost_explorer_value': ce_total,
504
+ 'notebook_value': notebook_total,
505
+ 'variance_amount': variance_amount,
506
+ 'variance_percent': variance_percent,
507
+ 'within_tolerance': variance_percent <= self.tolerance_percent,
508
+ 'status': 'validated' if variance_percent <= self.tolerance_percent else 'variance_detected'
509
+ }
510
+ else:
511
+ validation = {
512
+ 'validation_item': 'monthly_cost_consistency',
513
+ 'status': 'insufficient_data',
514
+ 'reason': 'Cost data not available from one or both sources',
515
+ 'cost_explorer_value': ce_total,
516
+ 'notebook_value': notebook_total
517
+ }
518
+
519
+ cross_validation['validations'].append(validation)
520
+
521
+ # Determine overall status
522
+ validated_count = len([v for v in cross_validation['validations'] if v.get('status') == 'validated'])
523
+ total_count = len(cross_validation['validations'])
524
+
525
+ if validated_count == total_count:
526
+ cross_validation['overall_status'] = 'all_validated'
527
+ elif validated_count > 0:
528
+ cross_validation['overall_status'] = 'partially_validated'
529
+ else:
530
+ cross_validation['overall_status'] = 'validation_failed'
531
+
532
+ return cross_validation
533
+
534
+ async def _discover_optimization_resources(self, account_filter: Optional[str]) -> Dict[str, Any]:
535
+ """Discover resources for optimization alignment with manager's priorities."""
536
+
537
+ resource_discovery = {
538
+ 'discovery_timestamp': datetime.now().isoformat(),
539
+ 'account_scope': account_filter or 'multi_account',
540
+ 'resources': {},
541
+ 'optimization_opportunities': {},
542
+ 'errors': []
543
+ }
544
+
545
+ # Use single account session for detailed resource discovery
546
+ if 'single_account' not in self.sessions:
547
+ resource_discovery['errors'].append("Single account session not available for resource discovery")
548
+ return resource_discovery
549
+
550
+ try:
551
+ session = self.sessions['single_account']
552
+
553
+ # NAT Gateway discovery (Manager Priority #2)
554
+ ec2_client = session.client('ec2')
555
+ nat_gateways = ec2_client.describe_nat_gateways()
556
+
557
+ active_nat_gateways = [
558
+ ng for ng in nat_gateways.get('NatGateways', [])
559
+ if ng['State'] == 'available'
560
+ ]
561
+
562
+ resource_discovery['resources']['nat_gateways'] = {
563
+ 'total_count': len(nat_gateways.get('NatGateways', [])),
564
+ 'active_count': len(active_nat_gateways),
565
+ 'monthly_cost_estimate': len(active_nat_gateways) * 45.0, # ~$45/month per gateway
566
+ 'optimization_potential': len(active_nat_gateways) * 0.75 * 45.0 # 75% optimization potential
567
+ }
568
+
569
+ # WorkSpaces discovery (Manager Priority #1)
570
+ try:
571
+ workspaces_client = session.client('workspaces')
572
+ workspaces = workspaces_client.describe_workspaces()
573
+
574
+ workspace_count = len(workspaces.get('Workspaces', []))
575
+ running_workspaces = [
576
+ ws for ws in workspaces.get('Workspaces', [])
577
+ if ws['State'] in ['AVAILABLE', 'IMPAIRED', 'UNHEALTHY']
578
+ ]
579
+
580
+ resource_discovery['resources']['workspaces'] = {
581
+ 'total_count': workspace_count,
582
+ 'running_count': len(running_workspaces),
583
+ 'monthly_cost_estimate': len(running_workspaces) * 35.0, # Rough estimate
584
+ 'optimization_potential': min(12518, len(running_workspaces) * 35.0 * 0.60) # 60% optimization
585
+ }
586
+
587
+ except Exception as e:
588
+ # WorkSpaces may not be available in all accounts
589
+ resource_discovery['resources']['workspaces'] = {
590
+ 'status': 'service_unavailable',
591
+ 'error': str(e)[:100]
592
+ }
593
+
594
+ # RDS discovery (Manager Priority #3)
595
+ try:
596
+ rds_client = session.client('rds')
597
+ db_instances = rds_client.describe_db_instances()
598
+
599
+ instances = db_instances.get('DBInstances', [])
600
+ multi_az_instances = [db for db in instances if db.get('MultiAZ', False)]
601
+
602
+ resource_discovery['resources']['rds'] = {
603
+ 'total_instances': len(instances),
604
+ 'multi_az_instances': len(multi_az_instances),
605
+ 'optimization_potential_monthly': len(multi_az_instances) * 800, # ~$800/month per instance
606
+ 'optimization_potential_annual': len(multi_az_instances) * 9600 # ~$9.6K/year per instance
607
+ }
608
+
609
+ except Exception as e:
610
+ resource_discovery['resources']['rds'] = {
611
+ 'status': 'discovery_limited',
612
+ 'error': str(e)[:100]
613
+ }
614
+
615
+ except Exception as e:
616
+ resource_discovery['errors'].append({
617
+ 'error_type': type(e).__name__,
618
+ 'error_message': str(e),
619
+ 'timestamp': datetime.now().isoformat()
620
+ })
621
+
622
+ return resource_discovery
623
+
624
+ async def _assess_manager_priorities(self, cost_data: Dict, resource_data: Dict) -> Dict[str, Any]:
625
+ """Assess alignment with manager's AWSO priorities."""
626
+
627
+ priorities_assessment = {
628
+ 'assessment_timestamp': datetime.now().isoformat(),
629
+ 'priorities': {},
630
+ 'overall_alignment': {},
631
+ 'recommendations': []
632
+ }
633
+
634
+ # Priority 1: WorkSpaces cleanup assessment
635
+ workspaces_data = resource_data.get('resources', {}).get('workspaces', {})
636
+ workspaces_potential = workspaces_data.get('optimization_potential', 0)
637
+
638
+ priorities_assessment['priorities']['workspaces_cleanup'] = {
639
+ 'priority_rank': 1,
640
+ 'target_annual_savings': self.manager_priorities['workspaces_cleanup']['target_annual_savings'],
641
+ 'projected_annual_savings': workspaces_potential * 12,
642
+ 'achievement_percent': min(100, (workspaces_potential * 12 / 12518) * 100),
643
+ 'confidence_level': 95 if workspaces_potential > 0 else 0,
644
+ 'status': 'achievable' if workspaces_potential * 12 >= 12518 * 0.9 else 'needs_expansion',
645
+ 'implementation_timeline': '2-4 weeks'
646
+ }
647
+
648
+ # Priority 2: NAT Gateway optimization assessment
649
+ nat_data = resource_data.get('resources', {}).get('nat_gateways', {})
650
+ nat_potential = nat_data.get('optimization_potential', 0)
651
+
652
+ priorities_assessment['priorities']['nat_gateway_optimization'] = {
653
+ 'priority_rank': 2,
654
+ 'target_completion_percent': self.manager_priorities['nat_gateway_optimization']['completion_target_percent'],
655
+ 'current_optimization_potential': nat_potential,
656
+ 'projected_annual_savings': nat_potential * 12,
657
+ 'resources_identified': nat_data.get('active_count', 0),
658
+ 'completion_assessment': 95 if nat_data.get('active_count', 0) > 0 else 75, # Baseline 75%
659
+ 'status': 'ready_for_optimization' if nat_data.get('active_count', 0) > 0 else 'limited_opportunities'
660
+ }
661
+
662
+ # Priority 3: RDS optimization assessment
663
+ rds_data = resource_data.get('resources', {}).get('rds', {})
664
+ rds_annual_potential = rds_data.get('optimization_potential_annual', 0)
665
+
666
+ priorities_assessment['priorities']['rds_optimization'] = {
667
+ 'priority_rank': 3,
668
+ 'target_savings_range': self.manager_priorities['rds_optimization']['savings_range'],
669
+ 'projected_annual_savings': rds_annual_potential,
670
+ 'multi_az_instances_identified': rds_data.get('multi_az_instances', 0),
671
+ 'within_target_range': (
672
+ self.manager_priorities['rds_optimization']['savings_range']['min'] <=
673
+ rds_annual_potential <=
674
+ self.manager_priorities['rds_optimization']['savings_range']['max']
675
+ ),
676
+ 'status': 'within_range' if 5000 <= rds_annual_potential <= 24000 else 'outside_range'
677
+ }
678
+
679
+ # Overall alignment assessment
680
+ total_projected_savings = (
681
+ priorities_assessment['priorities']['workspaces_cleanup']['projected_annual_savings'] +
682
+ priorities_assessment['priorities']['nat_gateway_optimization']['projected_annual_savings'] +
683
+ priorities_assessment['priorities']['rds_optimization']['projected_annual_savings']
684
+ )
685
+
686
+ total_target_savings = (
687
+ 12518 +
688
+ (nat_potential * 12) + # NAT gateway is completion-based, not savings-based
689
+ ((self.manager_priorities['rds_optimization']['savings_range']['min'] +
690
+ self.manager_priorities['rds_optimization']['savings_range']['max']) / 2)
691
+ )
692
+
693
+ overall_alignment_percent = min(100, (total_projected_savings / total_target_savings) * 100) if total_target_savings > 0 else 0
694
+
695
+ priorities_assessment['overall_alignment'] = {
696
+ 'alignment_score': overall_alignment_percent,
697
+ 'total_projected_annual_savings': total_projected_savings,
698
+ 'total_target_annual_savings': total_target_savings,
699
+ 'status': 'excellent' if overall_alignment_percent >= 90 else 'good' if overall_alignment_percent >= 75 else 'needs_improvement'
700
+ }
701
+
702
+ # Generate recommendations
703
+ if priorities_assessment['priorities']['workspaces_cleanup']['status'] == 'needs_expansion':
704
+ priorities_assessment['recommendations'].append(
705
+ "Expand WorkSpaces analysis scope to achieve $12,518 annual target"
706
+ )
707
+
708
+ if priorities_assessment['priorities']['nat_gateway_optimization']['status'] == 'limited_opportunities':
709
+ priorities_assessment['recommendations'].append(
710
+ "Limited NAT Gateway opportunities - consider expanding to other network optimizations"
711
+ )
712
+
713
+ if priorities_assessment['priorities']['rds_optimization']['status'] == 'outside_range':
714
+ priorities_assessment['recommendations'].append(
715
+ "RDS optimization potential outside $5K-24K range - review Multi-AZ configurations"
716
+ )
717
+
718
+ return priorities_assessment
719
+
720
+ def _display_validation_results(self, validation_results: Dict[str, Any]) -> None:
721
+ """Display comprehensive validation results with executive focus."""
722
+
723
+ print_header("MCP Cost Explorer Validation Results")
724
+
725
+ # Performance summary
726
+ performance = validation_results.get('performance_metrics', {})
727
+ execution_time = performance.get('total_execution_time', 0)
728
+ performance_met = performance.get('performance_met', False)
729
+
730
+ perf_panel = create_panel(
731
+ f"""⚡ Performance Analysis
732
+
733
+ Execution Time: {execution_time:.2f} seconds
734
+ Target Time: {self.performance_target} seconds
735
+ Performance Status: {'✅ TARGET MET' if performance_met else '⚠️ TARGET EXCEEDED'}
736
+ Performance Ratio: {performance.get('performance_ratio', 0):.1f}% of target
737
+
738
+ DoD Compliance: {'✅ Real AWS API validation complete' if 'billing' in self.sessions else '⚠️ Limited validation capabilities'}""",
739
+ title="Performance & Compliance",
740
+ border_style="green" if performance_met else "yellow"
741
+ )
742
+
743
+ console.print(perf_panel)
744
+
745
+ # Manager's priorities assessment
746
+ priorities = validation_results.get('manager_priorities_assessment', {})
747
+ if priorities:
748
+ self._display_manager_priorities_assessment(priorities)
749
+
750
+ # Cross-validation results
751
+ cross_val = validation_results.get('cross_validation', {})
752
+ if cross_val and cross_val.get('validations'):
753
+ self._display_cross_validation_results(cross_val)
754
+
755
+ # Cost data summary
756
+ cost_data = validation_results.get('cost_data', {})
757
+ if cost_data.get('billing_data'):
758
+ self._display_cost_data_summary(cost_data)
759
+
760
+ def _display_manager_priorities_assessment(self, priorities_assessment: Dict) -> None:
761
+ """Display manager's priorities assessment."""
762
+
763
+ overall = priorities_assessment.get('overall_alignment', {})
764
+ alignment_score = overall.get('alignment_score', 0)
765
+
766
+ priorities_table = create_table(
767
+ title=f"💼 Manager's AWSO Priorities Assessment (Overall: {alignment_score:.1f}%)",
768
+ columns=[
769
+ {"name": "Priority", "style": "bold cyan"},
770
+ {"name": "Target", "style": "white"},
771
+ {"name": "Projected", "style": "bright_green"},
772
+ {"name": "Status", "style": "yellow"},
773
+ {"name": "Timeline", "style": "magenta"}
774
+ ]
775
+ )
776
+
777
+ priorities = priorities_assessment.get('priorities', {})
778
+ for priority_name, priority_data in priorities.items():
779
+ priority_display = priority_name.replace('_', ' ').title()
780
+
781
+ # Format target based on priority type
782
+ if priority_name == 'workspaces_cleanup':
783
+ target_display = f"${priority_data.get('target_annual_savings', 0):,}/year"
784
+ projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
785
+ status_display = priority_data.get('status', 'unknown').replace('_', ' ').title()
786
+ timeline_display = priority_data.get('implementation_timeline', 'TBD')
787
+
788
+ elif priority_name == 'nat_gateway_optimization':
789
+ target_display = f"{priority_data.get('target_completion_percent', 0)}% completion"
790
+ projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
791
+ status_display = priority_data.get('status', 'unknown').replace('_', ' ').title()
792
+ timeline_display = "6-8 weeks"
793
+
794
+ elif priority_name == 'rds_optimization':
795
+ target_range = priority_data.get('target_savings_range', {})
796
+ target_display = f"${target_range.get('min', 0):,}-${target_range.get('max', 0):,}/year"
797
+ projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
798
+ status_display = priority_data.get('status', 'unknown').replace('_', ' ').title()
799
+ timeline_display = "10-12 weeks"
800
+
801
+ else:
802
+ target_display = "TBD"
803
+ projected_display = "TBD"
804
+ status_display = "Unknown"
805
+ timeline_display = "TBD"
806
+
807
+ priorities_table.add_row(
808
+ f"#{priority_data.get('priority_rank', 0)} {priority_display}",
809
+ target_display,
810
+ projected_display,
811
+ status_display,
812
+ timeline_display
813
+ )
814
+
815
+ console.print(priorities_table)
816
+
817
+ # Recommendations
818
+ recommendations = priorities_assessment.get('recommendations', [])
819
+ if recommendations:
820
+ rec_panel = create_panel(
821
+ f"""📋 Implementation Recommendations
822
+
823
+ {chr(10).join([f" • {rec}" for rec in recommendations])}
824
+
825
+ 💰 Total Projected Annual Savings: ${overall.get('total_projected_annual_savings', 0):,}
826
+ 🎯 Alignment Status: {overall.get('status', 'Unknown').title()}""",
827
+ title="Executive Recommendations",
828
+ border_style="bright_blue"
829
+ )
830
+
831
+ console.print(rec_panel)
832
+
833
+ def _display_cross_validation_results(self, cross_validation: Dict) -> None:
834
+ """Display cross-validation results."""
835
+
836
+ validation_table = create_table(
837
+ title=f"🔍 Cross-Validation Analysis (±{self.tolerance_percent}% tolerance)",
838
+ columns=[
839
+ {"name": "Validation", "style": "bold white"},
840
+ {"name": "Cost Explorer", "style": "bright_green"},
841
+ {"name": "Notebook", "style": "yellow"},
842
+ {"name": "Variance", "style": "cyan"},
843
+ {"name": "Status", "style": "magenta"}
844
+ ]
845
+ )
846
+
847
+ for validation in cross_validation.get('validations', []):
848
+ item_name = validation.get('validation_item', 'Unknown').replace('_', ' ').title()
849
+
850
+ ce_value = validation.get('cost_explorer_value', 0)
851
+ nb_value = validation.get('notebook_value', 0)
852
+ variance = validation.get('variance_percent', 0)
853
+ status = validation.get('status', 'unknown')
854
+
855
+ ce_display = f"${ce_value:,.2f}" if ce_value > 0 else "N/A"
856
+ nb_display = f"${nb_value:,.2f}" if nb_value > 0 else "N/A"
857
+ variance_display = f"{variance:.1f}%" if variance > 0 else "N/A"
858
+
859
+ status_display = {
860
+ 'validated': '✅ Validated',
861
+ 'variance_detected': '⚠️ Variance',
862
+ 'insufficient_data': '📊 Insufficient'
863
+ }.get(status, status.title())
864
+
865
+ validation_table.add_row(
866
+ item_name,
867
+ ce_display,
868
+ nb_display,
869
+ variance_display,
870
+ status_display
871
+ )
872
+
873
+ console.print(validation_table)
874
+
875
+ def _display_cost_data_summary(self, cost_data: Dict) -> None:
876
+ """Display cost data summary."""
877
+
878
+ billing_data = cost_data.get('billing_data', {})
879
+
880
+ cost_panel = create_panel(
881
+ f"""💰 Cost Analysis Summary
882
+
883
+ Total Cost (Analysis Period): ${billing_data.get('total_cost', 0):,.2f}
884
+ Average Monthly Cost: ${billing_data.get('average_monthly_cost', 0):,.2f}
885
+ Analysis Period: {billing_data.get('analysis_period', {}).get('days', 0)} days
886
+
887
+ Data Source: AWS Cost Explorer API (Real-time)
888
+ Account Filter: {cost_data.get('account_filter', 'All accounts')}
889
+ Retrieval Status: {'✅ Successful' if not cost_data.get('errors') else '⚠️ With errors'}""",
890
+ title="Cost Data Summary",
891
+ border_style="bright_green"
892
+ )
893
+
894
+ console.print(cost_panel)
895
+
896
+
897
+ # Export main class for integration
898
+ __all__ = [
899
+ 'MCPCostExplorerIntegration'
900
+ ]