runbooks 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +31 -2
- runbooks/__init___optimized.py +18 -4
- runbooks/_platform/__init__.py +1 -5
- runbooks/_platform/core/runbooks_wrapper.py +141 -138
- runbooks/aws2/accuracy_validator.py +812 -0
- runbooks/base.py +7 -0
- runbooks/cfat/assessment/compliance.py +1 -1
- runbooks/cfat/assessment/runner.py +1 -0
- runbooks/cfat/cloud_foundations_assessment.py +227 -239
- runbooks/cli/__init__.py +1 -1
- runbooks/cli/commands/cfat.py +64 -23
- runbooks/cli/commands/finops.py +1005 -54
- runbooks/cli/commands/inventory.py +135 -91
- runbooks/cli/commands/operate.py +9 -36
- runbooks/cli/commands/security.py +42 -18
- runbooks/cli/commands/validation.py +432 -18
- runbooks/cli/commands/vpc.py +81 -17
- runbooks/cli/registry.py +22 -10
- runbooks/cloudops/__init__.py +20 -27
- runbooks/cloudops/base.py +96 -107
- runbooks/cloudops/cost_optimizer.py +544 -542
- runbooks/cloudops/infrastructure_optimizer.py +5 -4
- runbooks/cloudops/interfaces.py +224 -225
- runbooks/cloudops/lifecycle_manager.py +5 -4
- runbooks/cloudops/mcp_cost_validation.py +252 -235
- runbooks/cloudops/models.py +78 -53
- runbooks/cloudops/monitoring_automation.py +5 -4
- runbooks/cloudops/notebook_framework.py +177 -213
- runbooks/cloudops/security_enforcer.py +125 -159
- runbooks/common/accuracy_validator.py +17 -12
- runbooks/common/aws_pricing.py +349 -326
- runbooks/common/aws_pricing_api.py +211 -212
- runbooks/common/aws_profile_manager.py +40 -36
- runbooks/common/aws_utils.py +74 -79
- runbooks/common/business_logic.py +126 -104
- runbooks/common/cli_decorators.py +36 -60
- runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
- runbooks/common/cross_account_manager.py +197 -204
- runbooks/common/date_utils.py +27 -39
- runbooks/common/decorators.py +29 -19
- runbooks/common/dry_run_examples.py +173 -208
- runbooks/common/dry_run_framework.py +157 -155
- runbooks/common/enhanced_exception_handler.py +15 -4
- runbooks/common/enhanced_logging_example.py +50 -64
- runbooks/common/enhanced_logging_integration_example.py +65 -37
- runbooks/common/env_utils.py +16 -16
- runbooks/common/error_handling.py +40 -38
- runbooks/common/lazy_loader.py +41 -23
- runbooks/common/logging_integration_helper.py +79 -86
- runbooks/common/mcp_cost_explorer_integration.py +476 -493
- runbooks/common/mcp_integration.py +99 -79
- runbooks/common/memory_optimization.py +140 -118
- runbooks/common/module_cli_base.py +37 -58
- runbooks/common/organizations_client.py +175 -193
- runbooks/common/patterns.py +23 -25
- runbooks/common/performance_monitoring.py +67 -71
- runbooks/common/performance_optimization_engine.py +283 -274
- runbooks/common/profile_utils.py +111 -37
- runbooks/common/rich_utils.py +315 -141
- runbooks/common/sre_performance_suite.py +177 -186
- runbooks/enterprise/__init__.py +1 -1
- runbooks/enterprise/logging.py +144 -106
- runbooks/enterprise/security.py +187 -204
- runbooks/enterprise/validation.py +43 -56
- runbooks/finops/__init__.py +26 -30
- runbooks/finops/account_resolver.py +1 -1
- runbooks/finops/advanced_optimization_engine.py +980 -0
- runbooks/finops/automation_core.py +268 -231
- runbooks/finops/business_case_config.py +184 -179
- runbooks/finops/cli.py +660 -139
- runbooks/finops/commvault_ec2_analysis.py +157 -164
- runbooks/finops/compute_cost_optimizer.py +336 -320
- runbooks/finops/config.py +20 -20
- runbooks/finops/cost_optimizer.py +484 -618
- runbooks/finops/cost_processor.py +332 -214
- runbooks/finops/dashboard_runner.py +1006 -172
- runbooks/finops/ebs_cost_optimizer.py +991 -657
- runbooks/finops/elastic_ip_optimizer.py +317 -257
- runbooks/finops/enhanced_mcp_integration.py +340 -0
- runbooks/finops/enhanced_progress.py +32 -29
- runbooks/finops/enhanced_trend_visualization.py +3 -2
- runbooks/finops/enterprise_wrappers.py +223 -285
- runbooks/finops/executive_export.py +203 -160
- runbooks/finops/helpers.py +130 -288
- runbooks/finops/iam_guidance.py +1 -1
- runbooks/finops/infrastructure/__init__.py +80 -0
- runbooks/finops/infrastructure/commands.py +506 -0
- runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
- runbooks/finops/markdown_exporter.py +337 -174
- runbooks/finops/mcp_validator.py +1952 -0
- runbooks/finops/nat_gateway_optimizer.py +1512 -481
- runbooks/finops/network_cost_optimizer.py +657 -587
- runbooks/finops/notebook_utils.py +226 -188
- runbooks/finops/optimization_engine.py +1136 -0
- runbooks/finops/optimizer.py +19 -23
- runbooks/finops/rds_snapshot_optimizer.py +367 -411
- runbooks/finops/reservation_optimizer.py +427 -363
- runbooks/finops/scenario_cli_integration.py +64 -65
- runbooks/finops/scenarios.py +1277 -438
- runbooks/finops/schemas.py +218 -182
- runbooks/finops/snapshot_manager.py +2289 -0
- runbooks/finops/types.py +3 -3
- runbooks/finops/validation_framework.py +259 -265
- runbooks/finops/vpc_cleanup_exporter.py +189 -144
- runbooks/finops/vpc_cleanup_optimizer.py +591 -573
- runbooks/finops/workspaces_analyzer.py +171 -182
- runbooks/integration/__init__.py +89 -0
- runbooks/integration/mcp_integration.py +1920 -0
- runbooks/inventory/CLAUDE.md +816 -0
- runbooks/inventory/__init__.py +2 -2
- runbooks/inventory/aws_decorators.py +2 -3
- runbooks/inventory/check_cloudtrail_compliance.py +2 -4
- runbooks/inventory/check_controltower_readiness.py +152 -151
- runbooks/inventory/check_landingzone_readiness.py +85 -84
- runbooks/inventory/cloud_foundations_integration.py +144 -149
- runbooks/inventory/collectors/aws_comprehensive.py +1 -1
- runbooks/inventory/collectors/aws_networking.py +109 -99
- runbooks/inventory/collectors/base.py +4 -0
- runbooks/inventory/core/collector.py +495 -313
- runbooks/inventory/core/formatter.py +11 -0
- runbooks/inventory/draw_org_structure.py +8 -9
- runbooks/inventory/drift_detection_cli.py +69 -96
- runbooks/inventory/ec2_vpc_utils.py +2 -2
- runbooks/inventory/find_cfn_drift_detection.py +5 -7
- runbooks/inventory/find_cfn_orphaned_stacks.py +7 -9
- runbooks/inventory/find_cfn_stackset_drift.py +5 -6
- runbooks/inventory/find_ec2_security_groups.py +48 -42
- runbooks/inventory/find_landingzone_versions.py +4 -6
- runbooks/inventory/find_vpc_flow_logs.py +7 -9
- runbooks/inventory/inventory_mcp_cli.py +48 -46
- runbooks/inventory/inventory_modules.py +103 -91
- runbooks/inventory/list_cfn_stacks.py +9 -10
- runbooks/inventory/list_cfn_stackset_operation_results.py +1 -3
- runbooks/inventory/list_cfn_stackset_operations.py +79 -57
- runbooks/inventory/list_cfn_stacksets.py +8 -10
- runbooks/inventory/list_config_recorders_delivery_channels.py +49 -39
- runbooks/inventory/list_ds_directories.py +65 -53
- runbooks/inventory/list_ec2_availability_zones.py +2 -4
- runbooks/inventory/list_ec2_ebs_volumes.py +32 -35
- runbooks/inventory/list_ec2_instances.py +23 -28
- runbooks/inventory/list_ecs_clusters_and_tasks.py +26 -34
- runbooks/inventory/list_elbs_load_balancers.py +22 -20
- runbooks/inventory/list_enis_network_interfaces.py +26 -33
- runbooks/inventory/list_guardduty_detectors.py +2 -4
- runbooks/inventory/list_iam_policies.py +2 -4
- runbooks/inventory/list_iam_roles.py +5 -7
- runbooks/inventory/list_iam_saml_providers.py +4 -6
- runbooks/inventory/list_lambda_functions.py +38 -38
- runbooks/inventory/list_org_accounts.py +6 -8
- runbooks/inventory/list_org_accounts_users.py +55 -44
- runbooks/inventory/list_rds_db_instances.py +31 -33
- runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
- runbooks/inventory/list_route53_hosted_zones.py +3 -5
- runbooks/inventory/list_servicecatalog_provisioned_products.py +37 -41
- runbooks/inventory/list_sns_topics.py +2 -4
- runbooks/inventory/list_ssm_parameters.py +4 -7
- runbooks/inventory/list_vpc_subnets.py +2 -4
- runbooks/inventory/list_vpcs.py +7 -10
- runbooks/inventory/mcp_inventory_validator.py +554 -468
- runbooks/inventory/mcp_vpc_validator.py +359 -442
- runbooks/inventory/organizations_discovery.py +63 -55
- runbooks/inventory/recover_cfn_stack_ids.py +7 -8
- runbooks/inventory/requirements.txt +0 -1
- runbooks/inventory/rich_inventory_display.py +35 -34
- runbooks/inventory/run_on_multi_accounts.py +3 -5
- runbooks/inventory/unified_validation_engine.py +281 -253
- runbooks/inventory/verify_ec2_security_groups.py +1 -1
- runbooks/inventory/vpc_analyzer.py +735 -697
- runbooks/inventory/vpc_architecture_validator.py +293 -348
- runbooks/inventory/vpc_dependency_analyzer.py +384 -380
- runbooks/inventory/vpc_flow_analyzer.py +1 -1
- runbooks/main.py +49 -34
- runbooks/main_final.py +91 -60
- runbooks/main_minimal.py +22 -10
- runbooks/main_optimized.py +131 -100
- runbooks/main_ultra_minimal.py +7 -2
- runbooks/mcp/__init__.py +36 -0
- runbooks/mcp/integration.py +679 -0
- runbooks/monitoring/performance_monitor.py +9 -4
- runbooks/operate/dynamodb_operations.py +3 -1
- runbooks/operate/ec2_operations.py +145 -137
- runbooks/operate/iam_operations.py +146 -152
- runbooks/operate/networking_cost_heatmap.py +29 -8
- runbooks/operate/rds_operations.py +223 -254
- runbooks/operate/s3_operations.py +107 -118
- runbooks/operate/vpc_operations.py +646 -616
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/commons.py +10 -7
- runbooks/remediation/commvault_ec2_analysis.py +70 -66
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
- runbooks/remediation/multi_account.py +24 -21
- runbooks/remediation/rds_snapshot_list.py +86 -60
- runbooks/remediation/remediation_cli.py +92 -146
- runbooks/remediation/universal_account_discovery.py +83 -79
- runbooks/remediation/workspaces_list.py +46 -41
- runbooks/security/__init__.py +19 -0
- runbooks/security/assessment_runner.py +1150 -0
- runbooks/security/baseline_checker.py +812 -0
- runbooks/security/cloudops_automation_security_validator.py +509 -535
- runbooks/security/compliance_automation_engine.py +17 -17
- runbooks/security/config/__init__.py +2 -2
- runbooks/security/config/compliance_config.py +50 -50
- runbooks/security/config_template_generator.py +63 -76
- runbooks/security/enterprise_security_framework.py +1 -1
- runbooks/security/executive_security_dashboard.py +519 -508
- runbooks/security/multi_account_security_controls.py +959 -1210
- runbooks/security/real_time_security_monitor.py +422 -444
- runbooks/security/security_baseline_tester.py +1 -1
- runbooks/security/security_cli.py +143 -112
- runbooks/security/test_2way_validation.py +439 -0
- runbooks/security/two_way_validation_framework.py +852 -0
- runbooks/sre/production_monitoring_framework.py +167 -177
- runbooks/tdd/__init__.py +15 -0
- runbooks/tdd/cli.py +1071 -0
- runbooks/utils/__init__.py +14 -17
- runbooks/utils/logger.py +7 -2
- runbooks/utils/version_validator.py +50 -47
- runbooks/validation/__init__.py +6 -6
- runbooks/validation/cli.py +9 -3
- runbooks/validation/comprehensive_2way_validator.py +745 -704
- runbooks/validation/mcp_validator.py +906 -228
- runbooks/validation/terraform_citations_validator.py +104 -115
- runbooks/validation/terraform_drift_detector.py +461 -454
- runbooks/vpc/README.md +617 -0
- runbooks/vpc/__init__.py +8 -1
- runbooks/vpc/analyzer.py +577 -0
- runbooks/vpc/cleanup_wrapper.py +476 -413
- runbooks/vpc/cli_cloudtrail_commands.py +339 -0
- runbooks/vpc/cli_mcp_validation_commands.py +480 -0
- runbooks/vpc/cloudtrail_audit_integration.py +717 -0
- runbooks/vpc/config.py +92 -97
- runbooks/vpc/cost_engine.py +411 -148
- runbooks/vpc/cost_explorer_integration.py +553 -0
- runbooks/vpc/cross_account_session.py +101 -106
- runbooks/vpc/enhanced_mcp_validation.py +917 -0
- runbooks/vpc/eni_gate_validator.py +961 -0
- runbooks/vpc/heatmap_engine.py +185 -160
- runbooks/vpc/mcp_no_eni_validator.py +680 -639
- runbooks/vpc/nat_gateway_optimizer.py +358 -0
- runbooks/vpc/networking_wrapper.py +15 -8
- runbooks/vpc/pdca_remediation_planner.py +528 -0
- runbooks/vpc/performance_optimized_analyzer.py +219 -231
- runbooks/vpc/runbooks_adapter.py +1167 -241
- runbooks/vpc/tdd_red_phase_stubs.py +601 -0
- runbooks/vpc/test_data_loader.py +358 -0
- runbooks/vpc/tests/conftest.py +314 -4
- runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
- runbooks/vpc/tests/test_cost_engine.py +0 -2
- runbooks/vpc/topology_generator.py +326 -0
- runbooks/vpc/unified_scenarios.py +1297 -1124
- runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
- runbooks-1.1.6.dist-info/METADATA +327 -0
- runbooks-1.1.6.dist-info/RECORD +489 -0
- runbooks/finops/README.md +0 -414
- runbooks/finops/accuracy_cross_validator.py +0 -647
- runbooks/finops/business_cases.py +0 -950
- runbooks/finops/dashboard_router.py +0 -922
- runbooks/finops/ebs_optimizer.py +0 -973
- runbooks/finops/embedded_mcp_validator.py +0 -1629
- runbooks/finops/enhanced_dashboard_runner.py +0 -527
- runbooks/finops/finops_dashboard.py +0 -584
- runbooks/finops/finops_scenarios.py +0 -1218
- runbooks/finops/legacy_migration.py +0 -730
- runbooks/finops/multi_dashboard.py +0 -1519
- runbooks/finops/single_dashboard.py +0 -1113
- runbooks/finops/unlimited_scenarios.py +0 -393
- runbooks-1.1.4.dist-info/METADATA +0 -800
- runbooks-1.1.4.dist-info/RECORD +0 -468
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/WHEEL +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/top_level.txt +0 -0
@@ -37,13 +37,23 @@ from botocore.exceptions import ClientError, NoCredentialsError
|
|
37
37
|
|
38
38
|
# Rich CLI integration
|
39
39
|
from runbooks.common.rich_utils import (
|
40
|
-
console,
|
41
|
-
|
40
|
+
console,
|
41
|
+
print_header,
|
42
|
+
print_success,
|
43
|
+
print_error,
|
44
|
+
print_warning,
|
45
|
+
print_info,
|
46
|
+
create_table,
|
47
|
+
create_panel,
|
48
|
+
format_cost,
|
49
|
+
create_progress_bar,
|
50
|
+
STATUS_INDICATORS,
|
42
51
|
)
|
43
52
|
|
44
53
|
# Profile management
|
45
54
|
try:
|
46
55
|
from runbooks.common.profile_utils import get_profile_for_operation
|
56
|
+
|
47
57
|
PROFILE_UTILS_AVAILABLE = True
|
48
58
|
except ImportError:
|
49
59
|
PROFILE_UTILS_AVAILABLE = False
|
@@ -56,21 +66,23 @@ logger = logging.getLogger(__name__)
|
|
56
66
|
class MCPCostExplorerIntegration:
|
57
67
|
"""
|
58
68
|
Comprehensive MCP Cost Explorer integration for real-time validation.
|
59
|
-
|
69
|
+
|
60
70
|
Designed for integration with existing notebooks and business interfaces,
|
61
71
|
providing seamless real-time AWS Cost Explorer validation with business
|
62
72
|
case alignment and manager priority integration.
|
63
73
|
"""
|
64
|
-
|
65
|
-
def __init__(
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
74
|
+
|
75
|
+
def __init__(
|
76
|
+
self,
|
77
|
+
billing_profile: Optional[str] = None,
|
78
|
+
management_profile: Optional[str] = None,
|
79
|
+
single_account_profile: Optional[str] = None,
|
80
|
+
tolerance_percent: float = 5.0,
|
81
|
+
performance_target_seconds: float = 30.0,
|
82
|
+
):
|
71
83
|
"""
|
72
84
|
Initialize MCP Cost Explorer integration.
|
73
|
-
|
85
|
+
|
74
86
|
Args:
|
75
87
|
billing_profile: AWS profile for Cost Explorer access
|
76
88
|
management_profile: AWS profile for Organizations access
|
@@ -78,84 +90,81 @@ class MCPCostExplorerIntegration:
|
|
78
90
|
tolerance_percent: Variance tolerance for cross-validation
|
79
91
|
performance_target_seconds: Performance target for operations
|
80
92
|
"""
|
81
|
-
|
82
|
-
# Profile configuration with universal environment support
|
93
|
+
|
94
|
+
# Profile configuration with universal environment support
|
83
95
|
from runbooks.common.profile_utils import get_profile_for_operation
|
96
|
+
|
84
97
|
self.billing_profile = billing_profile or get_profile_for_operation("billing", None)
|
85
98
|
self.management_profile = management_profile or get_profile_for_operation("management", None)
|
86
99
|
self.single_account_profile = single_account_profile or get_profile_for_operation("single_account", None)
|
87
|
-
|
100
|
+
|
88
101
|
# Validation configuration
|
89
102
|
self.tolerance_percent = tolerance_percent
|
90
103
|
self.performance_target = performance_target_seconds
|
91
|
-
|
104
|
+
|
92
105
|
# Session management
|
93
106
|
self.sessions = {}
|
94
107
|
self.session_status = {}
|
95
|
-
|
108
|
+
|
96
109
|
# Performance tracking
|
97
110
|
self.operation_metrics = {}
|
98
111
|
self.validation_cache = {}
|
99
|
-
|
112
|
+
|
100
113
|
# Business case integration
|
101
114
|
self.manager_priorities = {
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
115
|
+
"workspaces_cleanup": {"target_annual_savings": 12518, "priority_rank": 1, "confidence_required": 95},
|
116
|
+
"nat_gateway_optimization": {
|
117
|
+
"completion_target_percent": 95,
|
118
|
+
"priority_rank": 2,
|
119
|
+
"baseline_completion": 75,
|
106
120
|
},
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
121
|
+
"rds_optimization": {
|
122
|
+
"savings_range": {"min": 5000, "max": 24000},
|
123
|
+
"priority_rank": 3,
|
124
|
+
"timeline_weeks": 12,
|
111
125
|
},
|
112
|
-
'rds_optimization': {
|
113
|
-
'savings_range': {'min': 5000, 'max': 24000},
|
114
|
-
'priority_rank': 3,
|
115
|
-
'timeline_weeks': 12
|
116
|
-
}
|
117
126
|
}
|
118
|
-
|
127
|
+
|
119
128
|
logger.info("MCP Cost Explorer integration initialized")
|
120
|
-
|
129
|
+
|
121
130
|
async def initialize_profiles(self, user_profile_override: Optional[str] = None) -> Dict[str, Any]:
|
122
131
|
"""Initialize AWS profiles with comprehensive validation."""
|
123
|
-
|
132
|
+
|
124
133
|
print_info("🔐 Initializing MCP Cost Explorer profiles...")
|
125
|
-
|
134
|
+
|
126
135
|
initialization_results = {
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
136
|
+
"timestamp": datetime.now().isoformat(),
|
137
|
+
"user_override": user_profile_override,
|
138
|
+
"profiles_attempted": [],
|
139
|
+
"profiles_successful": [],
|
140
|
+
"profiles_failed": [],
|
141
|
+
"session_status": {},
|
133
142
|
}
|
134
|
-
|
143
|
+
|
135
144
|
# Profile configuration with user override support
|
136
145
|
profiles_to_initialize = [
|
137
|
-
(
|
138
|
-
(
|
139
|
-
(
|
146
|
+
("billing", self.billing_profile),
|
147
|
+
("management", self.management_profile),
|
148
|
+
("single_account", self.single_account_profile),
|
140
149
|
]
|
141
|
-
|
150
|
+
|
142
151
|
# Apply user override if provided
|
143
152
|
if user_profile_override:
|
144
153
|
if PROFILE_UTILS_AVAILABLE:
|
145
154
|
# Use profile utils for intelligent profile resolution
|
146
155
|
profiles_to_initialize = [
|
147
|
-
(
|
148
|
-
(
|
149
|
-
(
|
156
|
+
("billing", get_profile_for_operation("billing", user_profile_override)),
|
157
|
+
("management", get_profile_for_operation("management", user_profile_override)),
|
158
|
+
("single_account", user_profile_override),
|
150
159
|
]
|
151
160
|
else:
|
152
161
|
# Direct override for all profile types
|
153
162
|
profiles_to_initialize = [
|
154
|
-
(
|
155
|
-
(
|
156
|
-
(
|
163
|
+
("billing", user_profile_override),
|
164
|
+
("management", user_profile_override),
|
165
|
+
("single_account", user_profile_override),
|
157
166
|
]
|
158
|
-
|
167
|
+
|
159
168
|
# Initialize sessions with detailed validation
|
160
169
|
profile_table = create_table(
|
161
170
|
title="MCP Profile Initialization",
|
@@ -164,606 +173,588 @@ class MCPCostExplorerIntegration:
|
|
164
173
|
{"name": "Profile Name", "style": "white"},
|
165
174
|
{"name": "Account ID", "style": "yellow"},
|
166
175
|
{"name": "Status", "style": "green"},
|
167
|
-
{"name": "Validation", "style": "magenta"}
|
168
|
-
]
|
176
|
+
{"name": "Validation", "style": "magenta"},
|
177
|
+
],
|
169
178
|
)
|
170
|
-
|
179
|
+
|
171
180
|
for profile_type, profile_name in profiles_to_initialize:
|
172
|
-
initialization_results[
|
173
|
-
|
174
|
-
'name': profile_name
|
175
|
-
})
|
176
|
-
|
181
|
+
initialization_results["profiles_attempted"].append({"type": profile_type, "name": profile_name})
|
182
|
+
|
177
183
|
try:
|
178
184
|
# Create session
|
179
185
|
session = boto3.Session(profile_name=profile_name)
|
180
|
-
|
186
|
+
|
181
187
|
# Validate credentials
|
182
|
-
sts_client = session.client(
|
188
|
+
sts_client = session.client("sts")
|
183
189
|
identity = sts_client.get_caller_identity()
|
184
|
-
account_id = identity[
|
185
|
-
|
190
|
+
account_id = identity["Account"]
|
191
|
+
|
186
192
|
# Test Cost Explorer access for billing profile
|
187
193
|
validation_status = "✅ Basic"
|
188
|
-
if profile_type ==
|
194
|
+
if profile_type == "billing":
|
189
195
|
try:
|
190
|
-
ce_client = session.client(
|
191
|
-
|
196
|
+
ce_client = session.client("ce", region_name="us-east-1")
|
197
|
+
|
192
198
|
# Quick Cost Explorer test
|
193
199
|
end_date = datetime.now().date()
|
194
200
|
start_date = end_date - timedelta(days=7)
|
195
|
-
|
201
|
+
|
196
202
|
ce_client.get_cost_and_usage(
|
197
|
-
TimePeriod={
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
Granularity='DAILY',
|
202
|
-
Metrics=['BlendedCost'],
|
203
|
-
MaxResults=5
|
203
|
+
TimePeriod={"Start": start_date.strftime("%Y-%m-%d"), "End": end_date.strftime("%Y-%m-%d")},
|
204
|
+
Granularity="DAILY",
|
205
|
+
Metrics=["BlendedCost"],
|
206
|
+
MaxResults=5,
|
204
207
|
)
|
205
208
|
validation_status = "✅ Cost Explorer"
|
206
209
|
except Exception as e:
|
207
210
|
validation_status = f"⚠️ CE Limited: {str(e)[:20]}..."
|
208
|
-
|
211
|
+
|
209
212
|
# Store successful session
|
210
213
|
self.sessions[profile_type] = session
|
211
214
|
self.session_status[profile_type] = {
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
215
|
+
"profile_name": profile_name,
|
216
|
+
"account_id": account_id,
|
217
|
+
"status": "active",
|
218
|
+
"validated_at": datetime.now().isoformat(),
|
216
219
|
}
|
217
|
-
|
218
|
-
initialization_results[
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
})
|
223
|
-
|
220
|
+
|
221
|
+
initialization_results["profiles_successful"].append(
|
222
|
+
{"type": profile_type, "name": profile_name, "account_id": account_id}
|
223
|
+
)
|
224
|
+
|
224
225
|
profile_table.add_row(
|
225
|
-
profile_type.replace(
|
226
|
+
profile_type.replace("_", " ").title(),
|
226
227
|
profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
|
227
228
|
account_id,
|
228
229
|
"✅ Active",
|
229
|
-
validation_status
|
230
|
+
validation_status,
|
230
231
|
)
|
231
|
-
|
232
|
+
|
232
233
|
except NoCredentialsError:
|
233
234
|
profile_table.add_row(
|
234
|
-
profile_type.replace(
|
235
|
+
profile_type.replace("_", " ").title(),
|
235
236
|
profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
|
236
237
|
"N/A",
|
237
238
|
"❌ No Credentials",
|
238
|
-
"❌ Failed"
|
239
|
+
"❌ Failed",
|
239
240
|
)
|
240
|
-
|
241
|
-
initialization_results[
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
})
|
246
|
-
|
241
|
+
|
242
|
+
initialization_results["profiles_failed"].append(
|
243
|
+
{"type": profile_type, "name": profile_name, "error": "NoCredentialsError"}
|
244
|
+
)
|
245
|
+
|
247
246
|
except ClientError as e:
|
248
|
-
error_code = e.response.get(
|
247
|
+
error_code = e.response.get("Error", {}).get("Code", "Unknown")
|
249
248
|
profile_table.add_row(
|
250
|
-
profile_type.replace(
|
249
|
+
profile_type.replace("_", " ").title(),
|
251
250
|
profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
|
252
251
|
"N/A",
|
253
252
|
f"❌ {error_code}",
|
254
|
-
"❌ Failed"
|
253
|
+
"❌ Failed",
|
255
254
|
)
|
256
|
-
|
257
|
-
initialization_results[
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
})
|
262
|
-
|
255
|
+
|
256
|
+
initialization_results["profiles_failed"].append(
|
257
|
+
{"type": profile_type, "name": profile_name, "error": error_code}
|
258
|
+
)
|
259
|
+
|
263
260
|
except Exception as e:
|
264
261
|
profile_table.add_row(
|
265
|
-
profile_type.replace(
|
262
|
+
profile_type.replace("_", " ").title(),
|
266
263
|
profile_name[:35] + "..." if len(profile_name) > 35 else profile_name,
|
267
264
|
"N/A",
|
268
265
|
"❌ Error",
|
269
|
-
f"❌ {type(e).__name__}"
|
266
|
+
f"❌ {type(e).__name__}",
|
270
267
|
)
|
271
|
-
|
272
|
-
initialization_results[
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
})
|
277
|
-
|
268
|
+
|
269
|
+
initialization_results["profiles_failed"].append(
|
270
|
+
{"type": profile_type, "name": profile_name, "error": str(e)}
|
271
|
+
)
|
272
|
+
|
278
273
|
console.print(profile_table)
|
279
|
-
|
274
|
+
|
280
275
|
# Summary
|
281
|
-
successful_count = len(initialization_results[
|
282
|
-
total_count = len(initialization_results[
|
283
|
-
|
276
|
+
successful_count = len(initialization_results["profiles_successful"])
|
277
|
+
total_count = len(initialization_results["profiles_attempted"])
|
278
|
+
|
284
279
|
if successful_count == total_count:
|
285
280
|
print_success(f"✅ All profiles initialized successfully: {successful_count}/{total_count}")
|
286
281
|
elif successful_count > 0:
|
287
282
|
print_warning(f"⚠️ Partial initialization: {successful_count}/{total_count} profiles successful")
|
288
283
|
else:
|
289
284
|
print_error(f"❌ Profile initialization failed: {successful_count}/{total_count} successful")
|
290
|
-
|
291
|
-
initialization_results[
|
285
|
+
|
286
|
+
initialization_results["session_status"] = self.session_status
|
292
287
|
return initialization_results
|
293
|
-
|
294
|
-
async def validate_cost_data_with_cross_validation(
|
295
|
-
|
296
|
-
|
297
|
-
analysis_days: int = 90) -> Dict[str, Any]:
|
288
|
+
|
289
|
+
async def validate_cost_data_with_cross_validation(
|
290
|
+
self, notebook_results: Optional[Dict] = None, account_filter: Optional[str] = None, analysis_days: int = 90
|
291
|
+
) -> Dict[str, Any]:
|
298
292
|
"""
|
299
293
|
Validate cost data with comprehensive cross-validation.
|
300
|
-
|
294
|
+
|
301
295
|
Args:
|
302
296
|
notebook_results: Existing notebook results for cross-validation
|
303
297
|
account_filter: Specific account ID to filter (for single account analysis)
|
304
298
|
analysis_days: Number of days for cost analysis
|
305
|
-
|
299
|
+
|
306
300
|
Returns:
|
307
301
|
Comprehensive validation results with business impact analysis
|
308
302
|
"""
|
309
|
-
|
303
|
+
|
310
304
|
print_header("MCP Cost Explorer Cross-Validation")
|
311
|
-
|
305
|
+
|
312
306
|
validation_start = time.time()
|
313
307
|
validation_results = {
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
308
|
+
"timestamp": datetime.now().isoformat(),
|
309
|
+
"validation_type": "mcp_cost_explorer_cross_validation",
|
310
|
+
"analysis_period_days": analysis_days,
|
311
|
+
"account_filter": account_filter,
|
312
|
+
"cost_data": {},
|
313
|
+
"cross_validation": {},
|
314
|
+
"business_impact": {},
|
315
|
+
"performance_metrics": {},
|
316
|
+
"manager_priorities_assessment": {},
|
323
317
|
}
|
324
|
-
|
318
|
+
|
325
319
|
# Phase 1: Cost Explorer data retrieval
|
326
320
|
print_info("📊 Phase 1: Retrieving Cost Explorer data...")
|
327
321
|
cost_data = await self._retrieve_cost_explorer_data(account_filter, analysis_days)
|
328
|
-
validation_results[
|
329
|
-
|
322
|
+
validation_results["cost_data"] = cost_data
|
323
|
+
|
330
324
|
# Phase 2: Cross-validation with notebook results
|
331
325
|
if notebook_results:
|
332
326
|
print_info("🔍 Phase 2: Cross-validating with notebook results...")
|
333
327
|
cross_validation = await self._cross_validate_results(cost_data, notebook_results)
|
334
|
-
validation_results[
|
328
|
+
validation_results["cross_validation"] = cross_validation
|
335
329
|
else:
|
336
330
|
print_info("💡 Phase 2: Skipped - no notebook results provided for cross-validation")
|
337
|
-
|
331
|
+
|
338
332
|
# Phase 3: Resource discovery for business case alignment
|
339
333
|
print_info("🔧 Phase 3: Resource discovery for business case alignment...")
|
340
334
|
resource_data = await self._discover_optimization_resources(account_filter)
|
341
|
-
validation_results[
|
342
|
-
|
335
|
+
validation_results["resource_discovery"] = resource_data
|
336
|
+
|
343
337
|
# Phase 4: Manager's priorities assessment
|
344
338
|
print_info("💼 Phase 4: Assessing manager's AWSO priorities...")
|
345
339
|
priorities_assessment = await self._assess_manager_priorities(cost_data, resource_data)
|
346
|
-
validation_results[
|
347
|
-
|
340
|
+
validation_results["manager_priorities_assessment"] = priorities_assessment
|
341
|
+
|
348
342
|
# Performance metrics
|
349
343
|
total_time = time.time() - validation_start
|
350
|
-
validation_results[
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
344
|
+
validation_results["performance_metrics"] = {
|
345
|
+
"total_execution_time": total_time,
|
346
|
+
"target_time": self.performance_target,
|
347
|
+
"performance_met": total_time <= self.performance_target,
|
348
|
+
"performance_ratio": (total_time / self.performance_target) * 100,
|
355
349
|
}
|
356
|
-
|
350
|
+
|
357
351
|
# Display comprehensive results
|
358
352
|
self._display_validation_results(validation_results)
|
359
|
-
|
353
|
+
|
360
354
|
return validation_results
|
361
|
-
|
355
|
+
|
362
356
|
async def _retrieve_cost_explorer_data(self, account_filter: Optional[str], analysis_days: int) -> Dict[str, Any]:
|
363
357
|
"""Retrieve comprehensive Cost Explorer data."""
|
364
|
-
|
358
|
+
|
365
359
|
cost_data = {
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
360
|
+
"retrieval_timestamp": datetime.now().isoformat(),
|
361
|
+
"account_filter": account_filter,
|
362
|
+
"analysis_days": analysis_days,
|
363
|
+
"billing_data": {},
|
364
|
+
"service_breakdown": {},
|
365
|
+
"monthly_trends": {},
|
366
|
+
"errors": [],
|
373
367
|
}
|
374
|
-
|
375
|
-
if
|
376
|
-
cost_data[
|
368
|
+
|
369
|
+
if "billing" not in self.sessions:
|
370
|
+
cost_data["errors"].append("Billing session not available")
|
377
371
|
return cost_data
|
378
|
-
|
372
|
+
|
379
373
|
try:
|
380
|
-
ce_client = self.sessions[
|
381
|
-
|
374
|
+
ce_client = self.sessions["billing"].client("ce", region_name="us-east-1")
|
375
|
+
|
382
376
|
# Calculate date range
|
383
377
|
end_date = datetime.now().date()
|
384
378
|
start_date = end_date - timedelta(days=analysis_days)
|
385
|
-
|
379
|
+
|
386
380
|
# Overall cost retrieval
|
387
381
|
with create_progress_bar() as progress:
|
388
382
|
task = progress.add_task("Retrieving Cost Explorer data...", total=100)
|
389
|
-
|
383
|
+
|
390
384
|
# Get overall costs
|
391
385
|
progress.update(task, advance=25, description="Retrieving overall costs...")
|
392
|
-
|
386
|
+
|
393
387
|
cost_params = {
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
},
|
398
|
-
'Granularity': 'MONTHLY',
|
399
|
-
'Metrics': ['BlendedCost', 'UnblendedCost']
|
388
|
+
"TimePeriod": {"Start": start_date.strftime("%Y-%m-%d"), "End": end_date.strftime("%Y-%m-%d")},
|
389
|
+
"Granularity": "MONTHLY",
|
390
|
+
"Metrics": ["BlendedCost", "UnblendedCost"],
|
400
391
|
}
|
401
|
-
|
392
|
+
|
402
393
|
# Add account filter if specified
|
403
394
|
if account_filter:
|
404
|
-
cost_params[
|
405
|
-
|
406
|
-
'Key': 'LINKED_ACCOUNT',
|
407
|
-
'Values': [account_filter]
|
408
|
-
}
|
409
|
-
}
|
410
|
-
cost_params['GroupBy'] = [{'Type': 'DIMENSION', 'Key': 'SERVICE'}]
|
395
|
+
cost_params["Filter"] = {"Dimensions": {"Key": "LINKED_ACCOUNT", "Values": [account_filter]}}
|
396
|
+
cost_params["GroupBy"] = [{"Type": "DIMENSION", "Key": "SERVICE"}]
|
411
397
|
else:
|
412
|
-
cost_params[
|
413
|
-
|
398
|
+
cost_params["GroupBy"] = [{"Type": "DIMENSION", "Key": "LINKED_ACCOUNT"}]
|
399
|
+
|
414
400
|
cost_response = ce_client.get_cost_and_usage(**cost_params)
|
415
|
-
|
401
|
+
|
416
402
|
progress.update(task, advance=50, description="Processing cost data...")
|
417
|
-
|
403
|
+
|
418
404
|
# Process cost data
|
419
405
|
total_cost = 0.0
|
420
406
|
service_costs = {}
|
421
407
|
account_costs = {}
|
422
|
-
|
423
|
-
for result in cost_response.get(
|
424
|
-
result_date = result[
|
425
|
-
|
426
|
-
if
|
408
|
+
|
409
|
+
for result in cost_response.get("ResultsByTime", []):
|
410
|
+
result_date = result["TimePeriod"]["Start"]
|
411
|
+
|
412
|
+
if "Groups" in result:
|
427
413
|
# Process grouped data
|
428
|
-
for group in result[
|
429
|
-
key = group[
|
430
|
-
blended_cost = float(group[
|
414
|
+
for group in result["Groups"]:
|
415
|
+
key = group["Keys"][0]
|
416
|
+
blended_cost = float(group["Metrics"]["BlendedCost"]["Amount"])
|
431
417
|
total_cost += blended_cost
|
432
|
-
|
418
|
+
|
433
419
|
if account_filter: # Service breakdown for single account
|
434
420
|
service_costs[key] = service_costs.get(key, 0) + blended_cost
|
435
421
|
else: # Account breakdown for multi-account
|
436
422
|
account_costs[key] = account_costs.get(key, 0) + blended_cost
|
437
423
|
else:
|
438
424
|
# Process total data
|
439
|
-
blended_cost = float(result[
|
425
|
+
blended_cost = float(result["Total"]["BlendedCost"]["Amount"])
|
440
426
|
total_cost += blended_cost
|
441
|
-
|
427
|
+
|
442
428
|
progress.update(task, advance=25, description="Finalizing data analysis...")
|
443
|
-
|
429
|
+
|
444
430
|
# Store processed data
|
445
|
-
cost_data[
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
}
|
431
|
+
cost_data["billing_data"] = {
|
432
|
+
"total_cost": total_cost,
|
433
|
+
"average_monthly_cost": total_cost / max(1, analysis_days / 30),
|
434
|
+
"analysis_period": {
|
435
|
+
"start_date": start_date.strftime("%Y-%m-%d"),
|
436
|
+
"end_date": end_date.strftime("%Y-%m-%d"),
|
437
|
+
"days": analysis_days,
|
438
|
+
},
|
453
439
|
}
|
454
|
-
|
440
|
+
|
455
441
|
if account_filter:
|
456
|
-
cost_data[
|
442
|
+
cost_data["service_breakdown"] = dict(
|
443
|
+
sorted(service_costs.items(), key=lambda x: x[1], reverse=True)
|
444
|
+
)
|
457
445
|
else:
|
458
|
-
cost_data[
|
459
|
-
|
446
|
+
cost_data["account_breakdown"] = dict(
|
447
|
+
sorted(account_costs.items(), key=lambda x: x[1], reverse=True)
|
448
|
+
)
|
449
|
+
|
460
450
|
progress.update(task, completed=100)
|
461
|
-
|
451
|
+
|
462
452
|
except Exception as e:
|
463
|
-
cost_data[
|
464
|
-
|
465
|
-
|
466
|
-
'timestamp': datetime.now().isoformat()
|
467
|
-
})
|
453
|
+
cost_data["errors"].append(
|
454
|
+
{"error_type": type(e).__name__, "error_message": str(e), "timestamp": datetime.now().isoformat()}
|
455
|
+
)
|
468
456
|
logger.error(f"Cost Explorer data retrieval error: {e}")
|
469
|
-
|
457
|
+
|
470
458
|
return cost_data
|
471
|
-
|
459
|
+
|
472
460
|
async def _cross_validate_results(self, cost_data: Dict, notebook_results: Dict) -> Dict[str, Any]:
|
473
461
|
"""Cross-validate Cost Explorer data with notebook results."""
|
474
|
-
|
462
|
+
|
475
463
|
cross_validation = {
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
464
|
+
"validation_timestamp": datetime.now().isoformat(),
|
465
|
+
"tolerance_threshold": self.tolerance_percent,
|
466
|
+
"validations": [],
|
467
|
+
"overall_status": "unknown",
|
480
468
|
}
|
481
|
-
|
469
|
+
|
482
470
|
# Extract cost figures for comparison
|
483
|
-
ce_total = cost_data.get(
|
484
|
-
|
471
|
+
ce_total = cost_data.get("billing_data", {}).get("average_monthly_cost", 0)
|
472
|
+
|
485
473
|
# Try multiple notebook result formats
|
486
474
|
notebook_total = 0.0
|
487
|
-
if
|
488
|
-
notebook_total = notebook_results[
|
489
|
-
elif
|
475
|
+
if "cost_trends" in notebook_results:
|
476
|
+
notebook_total = notebook_results["cost_trends"].get("total_monthly_spend", 0)
|
477
|
+
elif "monthly_savings" in notebook_results:
|
490
478
|
# Business result format
|
491
|
-
current_spend = getattr(notebook_results,
|
492
|
-
if hasattr(notebook_results,
|
479
|
+
current_spend = getattr(notebook_results, "current_monthly_spend", 0)
|
480
|
+
if hasattr(notebook_results, "current_monthly_spend"):
|
493
481
|
notebook_total = current_spend
|
494
|
-
elif isinstance(notebook_results, dict) and
|
495
|
-
notebook_total = notebook_results[
|
496
|
-
|
482
|
+
elif isinstance(notebook_results, dict) and "total_cost" in notebook_results:
|
483
|
+
notebook_total = notebook_results["total_cost"]
|
484
|
+
|
497
485
|
# Perform variance analysis
|
498
486
|
if ce_total > 0 and notebook_total > 0:
|
499
487
|
variance_amount = abs(ce_total - notebook_total)
|
500
488
|
variance_percent = (variance_amount / ce_total) * 100
|
501
|
-
|
489
|
+
|
502
490
|
validation = {
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
491
|
+
"validation_item": "monthly_cost_consistency",
|
492
|
+
"cost_explorer_value": ce_total,
|
493
|
+
"notebook_value": notebook_total,
|
494
|
+
"variance_amount": variance_amount,
|
495
|
+
"variance_percent": variance_percent,
|
496
|
+
"within_tolerance": variance_percent <= self.tolerance_percent,
|
497
|
+
"status": "validated" if variance_percent <= self.tolerance_percent else "variance_detected",
|
510
498
|
}
|
511
499
|
else:
|
512
500
|
validation = {
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
501
|
+
"validation_item": "monthly_cost_consistency",
|
502
|
+
"status": "insufficient_data",
|
503
|
+
"reason": "Cost data not available from one or both sources",
|
504
|
+
"cost_explorer_value": ce_total,
|
505
|
+
"notebook_value": notebook_total,
|
518
506
|
}
|
519
|
-
|
520
|
-
cross_validation[
|
521
|
-
|
507
|
+
|
508
|
+
cross_validation["validations"].append(validation)
|
509
|
+
|
522
510
|
# Determine overall status
|
523
|
-
validated_count = len([v for v in cross_validation[
|
524
|
-
total_count = len(cross_validation[
|
525
|
-
|
511
|
+
validated_count = len([v for v in cross_validation["validations"] if v.get("status") == "validated"])
|
512
|
+
total_count = len(cross_validation["validations"])
|
513
|
+
|
526
514
|
if validated_count == total_count:
|
527
|
-
cross_validation[
|
515
|
+
cross_validation["overall_status"] = "all_validated"
|
528
516
|
elif validated_count > 0:
|
529
|
-
cross_validation[
|
517
|
+
cross_validation["overall_status"] = "partially_validated"
|
530
518
|
else:
|
531
|
-
cross_validation[
|
532
|
-
|
519
|
+
cross_validation["overall_status"] = "validation_failed"
|
520
|
+
|
533
521
|
return cross_validation
|
534
|
-
|
522
|
+
|
535
523
|
async def _discover_optimization_resources(self, account_filter: Optional[str]) -> Dict[str, Any]:
|
536
524
|
"""Discover resources for optimization alignment with manager's priorities."""
|
537
|
-
|
525
|
+
|
538
526
|
resource_discovery = {
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
527
|
+
"discovery_timestamp": datetime.now().isoformat(),
|
528
|
+
"account_scope": account_filter or "multi_account",
|
529
|
+
"resources": {},
|
530
|
+
"optimization_opportunities": {},
|
531
|
+
"errors": [],
|
544
532
|
}
|
545
|
-
|
533
|
+
|
546
534
|
# Use single account session for detailed resource discovery
|
547
|
-
if
|
548
|
-
resource_discovery[
|
535
|
+
if "single_account" not in self.sessions:
|
536
|
+
resource_discovery["errors"].append("Single account session not available for resource discovery")
|
549
537
|
return resource_discovery
|
550
|
-
|
538
|
+
|
551
539
|
try:
|
552
|
-
session = self.sessions[
|
553
|
-
|
540
|
+
session = self.sessions["single_account"]
|
541
|
+
|
554
542
|
# NAT Gateway discovery (Manager Priority #2)
|
555
|
-
ec2_client = session.client(
|
543
|
+
ec2_client = session.client("ec2")
|
556
544
|
nat_gateways = ec2_client.describe_nat_gateways()
|
557
|
-
|
558
|
-
active_nat_gateways = [
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
'active_count': len(active_nat_gateways),
|
566
|
-
'monthly_cost_estimate': len(active_nat_gateways) * 45.0, # ~$45/month per gateway
|
567
|
-
'optimization_potential': len(active_nat_gateways) * 0.75 * 45.0 # 75% optimization potential
|
545
|
+
|
546
|
+
active_nat_gateways = [ng for ng in nat_gateways.get("NatGateways", []) if ng["State"] == "available"]
|
547
|
+
|
548
|
+
resource_discovery["resources"]["nat_gateways"] = {
|
549
|
+
"total_count": len(nat_gateways.get("NatGateways", [])),
|
550
|
+
"active_count": len(active_nat_gateways),
|
551
|
+
"monthly_cost_estimate": len(active_nat_gateways) * 45.0, # ~$45/month per gateway
|
552
|
+
"optimization_potential": len(active_nat_gateways) * 0.75 * 45.0, # 75% optimization potential
|
568
553
|
}
|
569
|
-
|
554
|
+
|
570
555
|
# WorkSpaces discovery (Manager Priority #1)
|
571
556
|
try:
|
572
|
-
workspaces_client = session.client(
|
557
|
+
workspaces_client = session.client("workspaces")
|
573
558
|
workspaces = workspaces_client.describe_workspaces()
|
574
|
-
|
575
|
-
workspace_count = len(workspaces.get(
|
559
|
+
|
560
|
+
workspace_count = len(workspaces.get("Workspaces", []))
|
576
561
|
running_workspaces = [
|
577
|
-
ws
|
578
|
-
|
562
|
+
ws
|
563
|
+
for ws in workspaces.get("Workspaces", [])
|
564
|
+
if ws["State"] in ["AVAILABLE", "IMPAIRED", "UNHEALTHY"]
|
579
565
|
]
|
580
|
-
|
581
|
-
resource_discovery[
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
566
|
+
|
567
|
+
resource_discovery["resources"]["workspaces"] = {
|
568
|
+
"total_count": workspace_count,
|
569
|
+
"running_count": len(running_workspaces),
|
570
|
+
"monthly_cost_estimate": len(running_workspaces) * 35.0, # Rough estimate
|
571
|
+
"optimization_potential": min(12518, len(running_workspaces) * 35.0 * 0.60), # 60% optimization
|
586
572
|
}
|
587
|
-
|
573
|
+
|
588
574
|
except Exception as e:
|
589
575
|
# WorkSpaces may not be available in all accounts
|
590
|
-
resource_discovery[
|
591
|
-
|
592
|
-
'error': str(e)[:100]
|
593
|
-
}
|
594
|
-
|
576
|
+
resource_discovery["resources"]["workspaces"] = {"status": "service_unavailable", "error": str(e)[:100]}
|
577
|
+
|
595
578
|
# RDS discovery (Manager Priority #3)
|
596
579
|
try:
|
597
|
-
rds_client = session.client(
|
580
|
+
rds_client = session.client("rds")
|
598
581
|
db_instances = rds_client.describe_db_instances()
|
599
|
-
|
600
|
-
instances = db_instances.get(
|
601
|
-
multi_az_instances = [db for db in instances if db.get(
|
602
|
-
|
603
|
-
resource_discovery[
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
582
|
+
|
583
|
+
instances = db_instances.get("DBInstances", [])
|
584
|
+
multi_az_instances = [db for db in instances if db.get("MultiAZ", False)]
|
585
|
+
|
586
|
+
resource_discovery["resources"]["rds"] = {
|
587
|
+
"total_instances": len(instances),
|
588
|
+
"multi_az_instances": len(multi_az_instances),
|
589
|
+
"optimization_potential_monthly": len(multi_az_instances) * 800, # ~$800/month per instance
|
590
|
+
"optimization_potential_annual": len(multi_az_instances) * 9600, # ~$9.6K/year per instance
|
608
591
|
}
|
609
|
-
|
592
|
+
|
610
593
|
except Exception as e:
|
611
|
-
resource_discovery[
|
612
|
-
|
613
|
-
'error': str(e)[:100]
|
614
|
-
}
|
615
|
-
|
594
|
+
resource_discovery["resources"]["rds"] = {"status": "discovery_limited", "error": str(e)[:100]}
|
595
|
+
|
616
596
|
except Exception as e:
|
617
|
-
resource_discovery[
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
})
|
622
|
-
|
597
|
+
resource_discovery["errors"].append(
|
598
|
+
{"error_type": type(e).__name__, "error_message": str(e), "timestamp": datetime.now().isoformat()}
|
599
|
+
)
|
600
|
+
|
623
601
|
return resource_discovery
|
624
|
-
|
602
|
+
|
625
603
|
async def _assess_manager_priorities(self, cost_data: Dict, resource_data: Dict) -> Dict[str, Any]:
|
626
604
|
"""Assess alignment with manager's AWSO priorities."""
|
627
|
-
|
605
|
+
|
628
606
|
priorities_assessment = {
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
607
|
+
"assessment_timestamp": datetime.now().isoformat(),
|
608
|
+
"priorities": {},
|
609
|
+
"overall_alignment": {},
|
610
|
+
"recommendations": [],
|
633
611
|
}
|
634
|
-
|
612
|
+
|
635
613
|
# Priority 1: WorkSpaces cleanup assessment
|
636
|
-
workspaces_data = resource_data.get(
|
637
|
-
workspaces_potential = workspaces_data.get(
|
638
|
-
|
639
|
-
priorities_assessment[
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
614
|
+
workspaces_data = resource_data.get("resources", {}).get("workspaces", {})
|
615
|
+
workspaces_potential = workspaces_data.get("optimization_potential", 0)
|
616
|
+
|
617
|
+
priorities_assessment["priorities"]["workspaces_cleanup"] = {
|
618
|
+
"priority_rank": 1,
|
619
|
+
"target_annual_savings": self.manager_priorities["workspaces_cleanup"]["target_annual_savings"],
|
620
|
+
"projected_annual_savings": workspaces_potential * 12,
|
621
|
+
"achievement_percent": min(100, (workspaces_potential * 12 / 12518) * 100),
|
622
|
+
"confidence_level": 95 if workspaces_potential > 0 else 0,
|
623
|
+
"status": "achievable" if workspaces_potential * 12 >= 12518 * 0.9 else "needs_expansion",
|
624
|
+
"implementation_timeline": "2-4 weeks",
|
647
625
|
}
|
648
|
-
|
626
|
+
|
649
627
|
# Priority 2: NAT Gateway optimization assessment
|
650
|
-
nat_data = resource_data.get(
|
651
|
-
nat_potential = nat_data.get(
|
652
|
-
|
653
|
-
priorities_assessment[
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
628
|
+
nat_data = resource_data.get("resources", {}).get("nat_gateways", {})
|
629
|
+
nat_potential = nat_data.get("optimization_potential", 0)
|
630
|
+
|
631
|
+
priorities_assessment["priorities"]["nat_gateway_optimization"] = {
|
632
|
+
"priority_rank": 2,
|
633
|
+
"target_completion_percent": self.manager_priorities["nat_gateway_optimization"][
|
634
|
+
"completion_target_percent"
|
635
|
+
],
|
636
|
+
"current_optimization_potential": nat_potential,
|
637
|
+
"projected_annual_savings": nat_potential * 12,
|
638
|
+
"resources_identified": nat_data.get("active_count", 0),
|
639
|
+
"completion_assessment": 95 if nat_data.get("active_count", 0) > 0 else 75, # Baseline 75%
|
640
|
+
"status": "ready_for_optimization" if nat_data.get("active_count", 0) > 0 else "limited_opportunities",
|
661
641
|
}
|
662
|
-
|
663
|
-
# Priority 3: RDS optimization assessment
|
664
|
-
rds_data = resource_data.get(
|
665
|
-
rds_annual_potential = rds_data.get(
|
666
|
-
|
667
|
-
priorities_assessment[
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
self.manager_priorities[
|
674
|
-
|
675
|
-
self.manager_priorities[
|
642
|
+
|
643
|
+
# Priority 3: RDS optimization assessment
|
644
|
+
rds_data = resource_data.get("resources", {}).get("rds", {})
|
645
|
+
rds_annual_potential = rds_data.get("optimization_potential_annual", 0)
|
646
|
+
|
647
|
+
priorities_assessment["priorities"]["rds_optimization"] = {
|
648
|
+
"priority_rank": 3,
|
649
|
+
"target_savings_range": self.manager_priorities["rds_optimization"]["savings_range"],
|
650
|
+
"projected_annual_savings": rds_annual_potential,
|
651
|
+
"multi_az_instances_identified": rds_data.get("multi_az_instances", 0),
|
652
|
+
"within_target_range": (
|
653
|
+
self.manager_priorities["rds_optimization"]["savings_range"]["min"]
|
654
|
+
<= rds_annual_potential
|
655
|
+
<= self.manager_priorities["rds_optimization"]["savings_range"]["max"]
|
676
656
|
),
|
677
|
-
|
657
|
+
"status": "within_range" if 5000 <= rds_annual_potential <= 24000 else "outside_range",
|
678
658
|
}
|
679
|
-
|
659
|
+
|
680
660
|
# Overall alignment assessment
|
681
661
|
total_projected_savings = (
|
682
|
-
priorities_assessment[
|
683
|
-
priorities_assessment[
|
684
|
-
priorities_assessment[
|
662
|
+
priorities_assessment["priorities"]["workspaces_cleanup"]["projected_annual_savings"]
|
663
|
+
+ priorities_assessment["priorities"]["nat_gateway_optimization"]["projected_annual_savings"]
|
664
|
+
+ priorities_assessment["priorities"]["rds_optimization"]["projected_annual_savings"]
|
685
665
|
)
|
686
|
-
|
666
|
+
|
687
667
|
total_target_savings = (
|
688
|
-
12518
|
689
|
-
(nat_potential * 12)
|
690
|
-
|
691
|
-
|
668
|
+
12518
|
669
|
+
+ (nat_potential * 12) # NAT gateway is completion-based, not savings-based
|
670
|
+
+ (
|
671
|
+
(
|
672
|
+
self.manager_priorities["rds_optimization"]["savings_range"]["min"]
|
673
|
+
+ self.manager_priorities["rds_optimization"]["savings_range"]["max"]
|
674
|
+
)
|
675
|
+
/ 2
|
676
|
+
)
|
677
|
+
)
|
678
|
+
|
679
|
+
overall_alignment_percent = (
|
680
|
+
min(100, (total_projected_savings / total_target_savings) * 100) if total_target_savings > 0 else 0
|
692
681
|
)
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
682
|
+
|
683
|
+
priorities_assessment["overall_alignment"] = {
|
684
|
+
"alignment_score": overall_alignment_percent,
|
685
|
+
"total_projected_annual_savings": total_projected_savings,
|
686
|
+
"total_target_annual_savings": total_target_savings,
|
687
|
+
"status": "excellent"
|
688
|
+
if overall_alignment_percent >= 90
|
689
|
+
else "good"
|
690
|
+
if overall_alignment_percent >= 75
|
691
|
+
else "needs_improvement",
|
701
692
|
}
|
702
|
-
|
693
|
+
|
703
694
|
# Generate recommendations
|
704
|
-
if priorities_assessment[
|
705
|
-
priorities_assessment[
|
695
|
+
if priorities_assessment["priorities"]["workspaces_cleanup"]["status"] == "needs_expansion":
|
696
|
+
priorities_assessment["recommendations"].append(
|
706
697
|
"Expand WorkSpaces analysis scope to achieve significant annual savings target"
|
707
698
|
)
|
708
|
-
|
709
|
-
if priorities_assessment[
|
710
|
-
priorities_assessment[
|
699
|
+
|
700
|
+
if priorities_assessment["priorities"]["nat_gateway_optimization"]["status"] == "limited_opportunities":
|
701
|
+
priorities_assessment["recommendations"].append(
|
711
702
|
"Limited NAT Gateway opportunities - consider expanding to other network optimizations"
|
712
703
|
)
|
713
|
-
|
714
|
-
if priorities_assessment[
|
715
|
-
priorities_assessment[
|
704
|
+
|
705
|
+
if priorities_assessment["priorities"]["rds_optimization"]["status"] == "outside_range":
|
706
|
+
priorities_assessment["recommendations"].append(
|
716
707
|
"RDS optimization potential outside measurable range range - review Multi-AZ configurations"
|
717
708
|
)
|
718
|
-
|
709
|
+
|
719
710
|
return priorities_assessment
|
720
|
-
|
711
|
+
|
721
712
|
def _display_validation_results(self, validation_results: Dict[str, Any]) -> None:
|
722
713
|
"""Display comprehensive validation results with executive focus."""
|
723
|
-
|
714
|
+
|
724
715
|
print_header("MCP Cost Explorer Validation Results")
|
725
|
-
|
716
|
+
|
726
717
|
# Performance summary
|
727
|
-
performance = validation_results.get(
|
728
|
-
execution_time = performance.get(
|
729
|
-
performance_met = performance.get(
|
730
|
-
|
718
|
+
performance = validation_results.get("performance_metrics", {})
|
719
|
+
execution_time = performance.get("total_execution_time", 0)
|
720
|
+
performance_met = performance.get("performance_met", False)
|
721
|
+
|
731
722
|
perf_panel = create_panel(
|
732
723
|
f"""⚡ Performance Analysis
|
733
724
|
|
734
725
|
Execution Time: {execution_time:.2f} seconds
|
735
726
|
Target Time: {self.performance_target} seconds
|
736
|
-
Performance Status: {
|
737
|
-
Performance Ratio: {performance.get(
|
727
|
+
Performance Status: {"✅ TARGET MET" if performance_met else "⚠️ TARGET EXCEEDED"}
|
728
|
+
Performance Ratio: {performance.get("performance_ratio", 0):.1f}% of target
|
738
729
|
|
739
|
-
DoD Compliance: {
|
730
|
+
DoD Compliance: {"✅ Real AWS API validation complete" if "billing" in self.sessions else "⚠️ Limited validation capabilities"}""",
|
740
731
|
title="Performance & Compliance",
|
741
|
-
border_style="green" if performance_met else "yellow"
|
732
|
+
border_style="green" if performance_met else "yellow",
|
742
733
|
)
|
743
|
-
|
734
|
+
|
744
735
|
console.print(perf_panel)
|
745
|
-
|
736
|
+
|
746
737
|
# Manager's priorities assessment
|
747
|
-
priorities = validation_results.get(
|
738
|
+
priorities = validation_results.get("manager_priorities_assessment", {})
|
748
739
|
if priorities:
|
749
740
|
self._display_manager_priorities_assessment(priorities)
|
750
|
-
|
741
|
+
|
751
742
|
# Cross-validation results
|
752
|
-
cross_val = validation_results.get(
|
753
|
-
if cross_val and cross_val.get(
|
743
|
+
cross_val = validation_results.get("cross_validation", {})
|
744
|
+
if cross_val and cross_val.get("validations"):
|
754
745
|
self._display_cross_validation_results(cross_val)
|
755
|
-
|
746
|
+
|
756
747
|
# Cost data summary
|
757
|
-
cost_data = validation_results.get(
|
758
|
-
if cost_data.get(
|
748
|
+
cost_data = validation_results.get("cost_data", {})
|
749
|
+
if cost_data.get("billing_data"):
|
759
750
|
self._display_cost_data_summary(cost_data)
|
760
|
-
|
751
|
+
|
761
752
|
def _display_manager_priorities_assessment(self, priorities_assessment: Dict) -> None:
|
762
753
|
"""Display manager's priorities assessment."""
|
763
|
-
|
764
|
-
overall = priorities_assessment.get(
|
765
|
-
alignment_score = overall.get(
|
766
|
-
|
754
|
+
|
755
|
+
overall = priorities_assessment.get("overall_alignment", {})
|
756
|
+
alignment_score = overall.get("alignment_score", 0)
|
757
|
+
|
767
758
|
priorities_table = create_table(
|
768
759
|
title=f"💼 Manager's AWSO Priorities Assessment (Overall: {alignment_score:.1f}%)",
|
769
760
|
columns=[
|
@@ -771,69 +762,69 @@ DoD Compliance: {'✅ Real AWS API validation complete' if 'billing' in self.ses
|
|
771
762
|
{"name": "Target", "style": "white"},
|
772
763
|
{"name": "Projected", "style": "bright_green"},
|
773
764
|
{"name": "Status", "style": "yellow"},
|
774
|
-
{"name": "Timeline", "style": "magenta"}
|
775
|
-
]
|
765
|
+
{"name": "Timeline", "style": "magenta"},
|
766
|
+
],
|
776
767
|
)
|
777
|
-
|
778
|
-
priorities = priorities_assessment.get(
|
768
|
+
|
769
|
+
priorities = priorities_assessment.get("priorities", {})
|
779
770
|
for priority_name, priority_data in priorities.items():
|
780
|
-
priority_display = priority_name.replace(
|
781
|
-
|
771
|
+
priority_display = priority_name.replace("_", " ").title()
|
772
|
+
|
782
773
|
# Format target based on priority type
|
783
|
-
if priority_name ==
|
774
|
+
if priority_name == "workspaces_cleanup":
|
784
775
|
target_display = f"${priority_data.get('target_annual_savings', 0):,}/year"
|
785
776
|
projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
|
786
|
-
status_display = priority_data.get(
|
787
|
-
timeline_display = priority_data.get(
|
788
|
-
|
789
|
-
elif priority_name ==
|
777
|
+
status_display = priority_data.get("status", "unknown").replace("_", " ").title()
|
778
|
+
timeline_display = priority_data.get("implementation_timeline", "TBD")
|
779
|
+
|
780
|
+
elif priority_name == "nat_gateway_optimization":
|
790
781
|
target_display = f"{priority_data.get('target_completion_percent', 0)}% completion"
|
791
782
|
projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
|
792
|
-
status_display = priority_data.get(
|
783
|
+
status_display = priority_data.get("status", "unknown").replace("_", " ").title()
|
793
784
|
timeline_display = "6-8 weeks"
|
794
|
-
|
795
|
-
elif priority_name ==
|
796
|
-
target_range = priority_data.get(
|
785
|
+
|
786
|
+
elif priority_name == "rds_optimization":
|
787
|
+
target_range = priority_data.get("target_savings_range", {})
|
797
788
|
target_display = f"${target_range.get('min', 0):,}-${target_range.get('max', 0):,}/year"
|
798
789
|
projected_display = f"${priority_data.get('projected_annual_savings', 0):,}/year"
|
799
|
-
status_display = priority_data.get(
|
790
|
+
status_display = priority_data.get("status", "unknown").replace("_", " ").title()
|
800
791
|
timeline_display = "10-12 weeks"
|
801
|
-
|
792
|
+
|
802
793
|
else:
|
803
794
|
target_display = "TBD"
|
804
795
|
projected_display = "TBD"
|
805
796
|
status_display = "Unknown"
|
806
797
|
timeline_display = "TBD"
|
807
|
-
|
798
|
+
|
808
799
|
priorities_table.add_row(
|
809
800
|
f"#{priority_data.get('priority_rank', 0)} {priority_display}",
|
810
801
|
target_display,
|
811
802
|
projected_display,
|
812
803
|
status_display,
|
813
|
-
timeline_display
|
804
|
+
timeline_display,
|
814
805
|
)
|
815
|
-
|
806
|
+
|
816
807
|
console.print(priorities_table)
|
817
|
-
|
808
|
+
|
818
809
|
# Recommendations
|
819
|
-
recommendations = priorities_assessment.get(
|
810
|
+
recommendations = priorities_assessment.get("recommendations", [])
|
820
811
|
if recommendations:
|
821
812
|
rec_panel = create_panel(
|
822
813
|
f"""📋 Implementation Recommendations
|
823
814
|
|
824
815
|
{chr(10).join([f" • {rec}" for rec in recommendations])}
|
825
816
|
|
826
|
-
💰 Total Projected Annual Savings: ${overall.get(
|
827
|
-
🎯 Alignment Status: {overall.get(
|
817
|
+
💰 Total Projected Annual Savings: ${overall.get("total_projected_annual_savings", 0):,}
|
818
|
+
🎯 Alignment Status: {overall.get("status", "Unknown").title()}""",
|
828
819
|
title="Executive Recommendations",
|
829
|
-
border_style="bright_blue"
|
820
|
+
border_style="bright_blue",
|
830
821
|
)
|
831
|
-
|
822
|
+
|
832
823
|
console.print(rec_panel)
|
833
|
-
|
824
|
+
|
834
825
|
def _display_cross_validation_results(self, cross_validation: Dict) -> None:
|
835
826
|
"""Display cross-validation results."""
|
836
|
-
|
827
|
+
|
837
828
|
validation_table = create_table(
|
838
829
|
title=f"🔍 Cross-Validation Analysis (±{self.tolerance_percent}% tolerance)",
|
839
830
|
columns=[
|
@@ -841,61 +832,53 @@ DoD Compliance: {'✅ Real AWS API validation complete' if 'billing' in self.ses
|
|
841
832
|
{"name": "Cost Explorer", "style": "bright_green"},
|
842
833
|
{"name": "Notebook", "style": "yellow"},
|
843
834
|
{"name": "Variance", "style": "cyan"},
|
844
|
-
{"name": "Status", "style": "magenta"}
|
845
|
-
]
|
835
|
+
{"name": "Status", "style": "magenta"},
|
836
|
+
],
|
846
837
|
)
|
847
|
-
|
848
|
-
for validation in cross_validation.get(
|
849
|
-
item_name = validation.get(
|
850
|
-
|
851
|
-
ce_value = validation.get(
|
852
|
-
nb_value = validation.get(
|
853
|
-
variance = validation.get(
|
854
|
-
status = validation.get(
|
855
|
-
|
838
|
+
|
839
|
+
for validation in cross_validation.get("validations", []):
|
840
|
+
item_name = validation.get("validation_item", "Unknown").replace("_", " ").title()
|
841
|
+
|
842
|
+
ce_value = validation.get("cost_explorer_value", 0)
|
843
|
+
nb_value = validation.get("notebook_value", 0)
|
844
|
+
variance = validation.get("variance_percent", 0)
|
845
|
+
status = validation.get("status", "unknown")
|
846
|
+
|
856
847
|
ce_display = f"${ce_value:,.2f}" if ce_value > 0 else "N/A"
|
857
848
|
nb_display = f"${nb_value:,.2f}" if nb_value > 0 else "N/A"
|
858
849
|
variance_display = f"{variance:.1f}%" if variance > 0 else "N/A"
|
859
|
-
|
850
|
+
|
860
851
|
status_display = {
|
861
|
-
|
862
|
-
|
863
|
-
|
852
|
+
"validated": "✅ Validated",
|
853
|
+
"variance_detected": "⚠️ Variance",
|
854
|
+
"insufficient_data": "📊 Insufficient",
|
864
855
|
}.get(status, status.title())
|
865
|
-
|
866
|
-
validation_table.add_row(
|
867
|
-
|
868
|
-
ce_display,
|
869
|
-
nb_display,
|
870
|
-
variance_display,
|
871
|
-
status_display
|
872
|
-
)
|
873
|
-
|
856
|
+
|
857
|
+
validation_table.add_row(item_name, ce_display, nb_display, variance_display, status_display)
|
858
|
+
|
874
859
|
console.print(validation_table)
|
875
|
-
|
860
|
+
|
876
861
|
def _display_cost_data_summary(self, cost_data: Dict) -> None:
|
877
862
|
"""Display cost data summary."""
|
878
|
-
|
879
|
-
billing_data = cost_data.get(
|
880
|
-
|
863
|
+
|
864
|
+
billing_data = cost_data.get("billing_data", {})
|
865
|
+
|
881
866
|
cost_panel = create_panel(
|
882
867
|
f"""💰 Cost Analysis Summary
|
883
868
|
|
884
|
-
Total Cost (Analysis Period): ${billing_data.get(
|
885
|
-
Average Monthly Cost: ${billing_data.get(
|
886
|
-
Analysis Period: {billing_data.get(
|
869
|
+
Total Cost (Analysis Period): ${billing_data.get("total_cost", 0):,.2f}
|
870
|
+
Average Monthly Cost: ${billing_data.get("average_monthly_cost", 0):,.2f}
|
871
|
+
Analysis Period: {billing_data.get("analysis_period", {}).get("days", 0)} days
|
887
872
|
|
888
873
|
Data Source: AWS Cost Explorer API (Real-time)
|
889
|
-
Account Filter: {cost_data.get(
|
890
|
-
Retrieval Status: {
|
874
|
+
Account Filter: {cost_data.get("account_filter", "All accounts")}
|
875
|
+
Retrieval Status: {"✅ Successful" if not cost_data.get("errors") else "⚠️ With errors"}""",
|
891
876
|
title="Cost Data Summary",
|
892
|
-
border_style="bright_green"
|
877
|
+
border_style="bright_green",
|
893
878
|
)
|
894
|
-
|
879
|
+
|
895
880
|
console.print(cost_panel)
|
896
881
|
|
897
882
|
|
898
883
|
# Export main class for integration
|
899
|
-
__all__ = [
|
900
|
-
'MCPCostExplorerIntegration'
|
901
|
-
]
|
884
|
+
__all__ = ["MCPCostExplorerIntegration"]
|