runbooks 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +31 -2
- runbooks/__init___optimized.py +18 -4
- runbooks/_platform/__init__.py +1 -5
- runbooks/_platform/core/runbooks_wrapper.py +141 -138
- runbooks/aws2/accuracy_validator.py +812 -0
- runbooks/base.py +7 -0
- runbooks/cfat/assessment/compliance.py +1 -1
- runbooks/cfat/assessment/runner.py +1 -0
- runbooks/cfat/cloud_foundations_assessment.py +227 -239
- runbooks/cli/__init__.py +1 -1
- runbooks/cli/commands/cfat.py +64 -23
- runbooks/cli/commands/finops.py +1005 -54
- runbooks/cli/commands/inventory.py +138 -35
- runbooks/cli/commands/operate.py +9 -36
- runbooks/cli/commands/security.py +42 -18
- runbooks/cli/commands/validation.py +432 -18
- runbooks/cli/commands/vpc.py +81 -17
- runbooks/cli/registry.py +22 -10
- runbooks/cloudops/__init__.py +20 -27
- runbooks/cloudops/base.py +96 -107
- runbooks/cloudops/cost_optimizer.py +544 -542
- runbooks/cloudops/infrastructure_optimizer.py +5 -4
- runbooks/cloudops/interfaces.py +224 -225
- runbooks/cloudops/lifecycle_manager.py +5 -4
- runbooks/cloudops/mcp_cost_validation.py +252 -235
- runbooks/cloudops/models.py +78 -53
- runbooks/cloudops/monitoring_automation.py +5 -4
- runbooks/cloudops/notebook_framework.py +177 -213
- runbooks/cloudops/security_enforcer.py +125 -159
- runbooks/common/accuracy_validator.py +11 -0
- runbooks/common/aws_pricing.py +349 -326
- runbooks/common/aws_pricing_api.py +211 -212
- runbooks/common/aws_profile_manager.py +40 -36
- runbooks/common/aws_utils.py +74 -79
- runbooks/common/business_logic.py +126 -104
- runbooks/common/cli_decorators.py +36 -60
- runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
- runbooks/common/cross_account_manager.py +197 -204
- runbooks/common/date_utils.py +27 -39
- runbooks/common/decorators.py +29 -19
- runbooks/common/dry_run_examples.py +173 -208
- runbooks/common/dry_run_framework.py +157 -155
- runbooks/common/enhanced_exception_handler.py +15 -4
- runbooks/common/enhanced_logging_example.py +50 -64
- runbooks/common/enhanced_logging_integration_example.py +65 -37
- runbooks/common/env_utils.py +16 -16
- runbooks/common/error_handling.py +40 -38
- runbooks/common/lazy_loader.py +41 -23
- runbooks/common/logging_integration_helper.py +79 -86
- runbooks/common/mcp_cost_explorer_integration.py +476 -493
- runbooks/common/mcp_integration.py +63 -74
- runbooks/common/memory_optimization.py +140 -118
- runbooks/common/module_cli_base.py +37 -58
- runbooks/common/organizations_client.py +175 -193
- runbooks/common/patterns.py +23 -25
- runbooks/common/performance_monitoring.py +67 -71
- runbooks/common/performance_optimization_engine.py +283 -274
- runbooks/common/profile_utils.py +111 -37
- runbooks/common/rich_utils.py +201 -141
- runbooks/common/sre_performance_suite.py +177 -186
- runbooks/enterprise/__init__.py +1 -1
- runbooks/enterprise/logging.py +144 -106
- runbooks/enterprise/security.py +187 -204
- runbooks/enterprise/validation.py +43 -56
- runbooks/finops/__init__.py +26 -30
- runbooks/finops/account_resolver.py +1 -1
- runbooks/finops/advanced_optimization_engine.py +980 -0
- runbooks/finops/automation_core.py +268 -231
- runbooks/finops/business_case_config.py +184 -179
- runbooks/finops/cli.py +660 -139
- runbooks/finops/commvault_ec2_analysis.py +157 -164
- runbooks/finops/compute_cost_optimizer.py +336 -320
- runbooks/finops/config.py +20 -20
- runbooks/finops/cost_optimizer.py +484 -618
- runbooks/finops/cost_processor.py +332 -214
- runbooks/finops/dashboard_runner.py +1006 -172
- runbooks/finops/ebs_cost_optimizer.py +991 -657
- runbooks/finops/elastic_ip_optimizer.py +317 -257
- runbooks/finops/enhanced_mcp_integration.py +340 -0
- runbooks/finops/enhanced_progress.py +32 -29
- runbooks/finops/enhanced_trend_visualization.py +3 -2
- runbooks/finops/enterprise_wrappers.py +223 -285
- runbooks/finops/executive_export.py +203 -160
- runbooks/finops/helpers.py +130 -288
- runbooks/finops/iam_guidance.py +1 -1
- runbooks/finops/infrastructure/__init__.py +80 -0
- runbooks/finops/infrastructure/commands.py +506 -0
- runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
- runbooks/finops/markdown_exporter.py +337 -174
- runbooks/finops/mcp_validator.py +1952 -0
- runbooks/finops/nat_gateway_optimizer.py +1512 -481
- runbooks/finops/network_cost_optimizer.py +657 -587
- runbooks/finops/notebook_utils.py +226 -188
- runbooks/finops/optimization_engine.py +1136 -0
- runbooks/finops/optimizer.py +19 -23
- runbooks/finops/rds_snapshot_optimizer.py +367 -411
- runbooks/finops/reservation_optimizer.py +427 -363
- runbooks/finops/scenario_cli_integration.py +64 -65
- runbooks/finops/scenarios.py +1277 -438
- runbooks/finops/schemas.py +218 -182
- runbooks/finops/snapshot_manager.py +2289 -0
- runbooks/finops/types.py +3 -3
- runbooks/finops/validation_framework.py +259 -265
- runbooks/finops/vpc_cleanup_exporter.py +189 -144
- runbooks/finops/vpc_cleanup_optimizer.py +591 -573
- runbooks/finops/workspaces_analyzer.py +171 -182
- runbooks/integration/__init__.py +89 -0
- runbooks/integration/mcp_integration.py +1920 -0
- runbooks/inventory/CLAUDE.md +816 -0
- runbooks/inventory/__init__.py +2 -2
- runbooks/inventory/cloud_foundations_integration.py +144 -149
- runbooks/inventory/collectors/aws_comprehensive.py +1 -1
- runbooks/inventory/collectors/aws_networking.py +109 -99
- runbooks/inventory/collectors/base.py +4 -0
- runbooks/inventory/core/collector.py +495 -313
- runbooks/inventory/drift_detection_cli.py +69 -96
- runbooks/inventory/inventory_mcp_cli.py +48 -46
- runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
- runbooks/inventory/mcp_inventory_validator.py +549 -465
- runbooks/inventory/mcp_vpc_validator.py +359 -442
- runbooks/inventory/organizations_discovery.py +55 -51
- runbooks/inventory/rich_inventory_display.py +33 -32
- runbooks/inventory/unified_validation_engine.py +278 -251
- runbooks/inventory/vpc_analyzer.py +732 -695
- runbooks/inventory/vpc_architecture_validator.py +293 -348
- runbooks/inventory/vpc_dependency_analyzer.py +382 -378
- runbooks/inventory/vpc_flow_analyzer.py +1 -1
- runbooks/main.py +49 -34
- runbooks/main_final.py +91 -60
- runbooks/main_minimal.py +22 -10
- runbooks/main_optimized.py +131 -100
- runbooks/main_ultra_minimal.py +7 -2
- runbooks/mcp/__init__.py +36 -0
- runbooks/mcp/integration.py +679 -0
- runbooks/monitoring/performance_monitor.py +9 -4
- runbooks/operate/dynamodb_operations.py +3 -1
- runbooks/operate/ec2_operations.py +145 -137
- runbooks/operate/iam_operations.py +146 -152
- runbooks/operate/networking_cost_heatmap.py +29 -8
- runbooks/operate/rds_operations.py +223 -254
- runbooks/operate/s3_operations.py +107 -118
- runbooks/operate/vpc_operations.py +646 -616
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/commons.py +10 -7
- runbooks/remediation/commvault_ec2_analysis.py +70 -66
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
- runbooks/remediation/multi_account.py +24 -21
- runbooks/remediation/rds_snapshot_list.py +86 -60
- runbooks/remediation/remediation_cli.py +92 -146
- runbooks/remediation/universal_account_discovery.py +83 -79
- runbooks/remediation/workspaces_list.py +46 -41
- runbooks/security/__init__.py +19 -0
- runbooks/security/assessment_runner.py +1150 -0
- runbooks/security/baseline_checker.py +812 -0
- runbooks/security/cloudops_automation_security_validator.py +509 -535
- runbooks/security/compliance_automation_engine.py +17 -17
- runbooks/security/config/__init__.py +2 -2
- runbooks/security/config/compliance_config.py +50 -50
- runbooks/security/config_template_generator.py +63 -76
- runbooks/security/enterprise_security_framework.py +1 -1
- runbooks/security/executive_security_dashboard.py +519 -508
- runbooks/security/multi_account_security_controls.py +959 -1210
- runbooks/security/real_time_security_monitor.py +422 -444
- runbooks/security/security_baseline_tester.py +1 -1
- runbooks/security/security_cli.py +143 -112
- runbooks/security/test_2way_validation.py +439 -0
- runbooks/security/two_way_validation_framework.py +852 -0
- runbooks/sre/production_monitoring_framework.py +167 -177
- runbooks/tdd/__init__.py +15 -0
- runbooks/tdd/cli.py +1071 -0
- runbooks/utils/__init__.py +14 -17
- runbooks/utils/logger.py +7 -2
- runbooks/utils/version_validator.py +50 -47
- runbooks/validation/__init__.py +6 -6
- runbooks/validation/cli.py +9 -3
- runbooks/validation/comprehensive_2way_validator.py +745 -704
- runbooks/validation/mcp_validator.py +906 -228
- runbooks/validation/terraform_citations_validator.py +104 -115
- runbooks/validation/terraform_drift_detector.py +447 -451
- runbooks/vpc/README.md +617 -0
- runbooks/vpc/__init__.py +8 -1
- runbooks/vpc/analyzer.py +577 -0
- runbooks/vpc/cleanup_wrapper.py +476 -413
- runbooks/vpc/cli_cloudtrail_commands.py +339 -0
- runbooks/vpc/cli_mcp_validation_commands.py +480 -0
- runbooks/vpc/cloudtrail_audit_integration.py +717 -0
- runbooks/vpc/config.py +92 -97
- runbooks/vpc/cost_engine.py +411 -148
- runbooks/vpc/cost_explorer_integration.py +553 -0
- runbooks/vpc/cross_account_session.py +101 -106
- runbooks/vpc/enhanced_mcp_validation.py +917 -0
- runbooks/vpc/eni_gate_validator.py +961 -0
- runbooks/vpc/heatmap_engine.py +185 -160
- runbooks/vpc/mcp_no_eni_validator.py +680 -639
- runbooks/vpc/nat_gateway_optimizer.py +358 -0
- runbooks/vpc/networking_wrapper.py +15 -8
- runbooks/vpc/pdca_remediation_planner.py +528 -0
- runbooks/vpc/performance_optimized_analyzer.py +219 -231
- runbooks/vpc/runbooks_adapter.py +1167 -241
- runbooks/vpc/tdd_red_phase_stubs.py +601 -0
- runbooks/vpc/test_data_loader.py +358 -0
- runbooks/vpc/tests/conftest.py +314 -4
- runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
- runbooks/vpc/tests/test_cost_engine.py +0 -2
- runbooks/vpc/topology_generator.py +326 -0
- runbooks/vpc/unified_scenarios.py +1297 -1124
- runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
- runbooks-1.1.5.dist-info/METADATA +328 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/RECORD +214 -193
- runbooks/finops/README.md +0 -414
- runbooks/finops/accuracy_cross_validator.py +0 -647
- runbooks/finops/business_cases.py +0 -950
- runbooks/finops/dashboard_router.py +0 -922
- runbooks/finops/ebs_optimizer.py +0 -973
- runbooks/finops/embedded_mcp_validator.py +0 -1629
- runbooks/finops/enhanced_dashboard_runner.py +0 -527
- runbooks/finops/finops_dashboard.py +0 -584
- runbooks/finops/finops_scenarios.py +0 -1218
- runbooks/finops/legacy_migration.py +0 -730
- runbooks/finops/multi_dashboard.py +0 -1519
- runbooks/finops/single_dashboard.py +0 -1113
- runbooks/finops/unlimited_scenarios.py +0 -393
- runbooks-1.1.4.dist-info/METADATA +0 -800
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/WHEEL +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,7 @@ CLI interfaces for comprehensive DoD validation.
|
|
8
8
|
|
9
9
|
Key Capabilities:
|
10
10
|
- Real-time Cost Explorer MCP validation
|
11
|
-
- Cross-validation between notebook estimates and AWS APIs
|
11
|
+
- Cross-validation between notebook estimates and AWS APIs
|
12
12
|
- Technical CLI interfaces for automation testing
|
13
13
|
- Comprehensive DoD evidence generation
|
14
14
|
- Performance benchmarking with >99.9% reliability targets
|
@@ -39,22 +39,36 @@ import boto3
|
|
39
39
|
from botocore.exceptions import ClientError
|
40
40
|
|
41
41
|
from runbooks.common.rich_utils import (
|
42
|
-
console,
|
43
|
-
|
42
|
+
console,
|
43
|
+
print_header,
|
44
|
+
print_success,
|
45
|
+
print_error,
|
46
|
+
print_warning,
|
47
|
+
print_info,
|
48
|
+
create_table,
|
49
|
+
create_progress_bar,
|
50
|
+
format_cost,
|
51
|
+
create_panel,
|
52
|
+
STATUS_INDICATORS,
|
44
53
|
)
|
45
54
|
|
46
55
|
# Import MCP integration framework
|
47
|
-
from
|
48
|
-
MCPIntegrationManager,
|
49
|
-
|
56
|
+
from runbooks.mcp import (
|
57
|
+
MCPIntegrationManager,
|
58
|
+
CrossValidationEngine,
|
59
|
+
MCPAWSClient,
|
60
|
+
create_mcp_manager_for_single_account,
|
61
|
+
create_mcp_manager_for_multi_account,
|
50
62
|
)
|
51
63
|
|
52
64
|
from .models import BusinessScenario, ExecutionMode, RiskLevel, CostOptimizationResult
|
53
65
|
from .cost_optimizer import CostOptimizer
|
54
66
|
|
67
|
+
|
55
68
|
@dataclass
|
56
69
|
class MCPValidationResult:
|
57
70
|
"""Result structure for MCP validation operations."""
|
71
|
+
|
58
72
|
scenario_name: str
|
59
73
|
validation_timestamp: datetime
|
60
74
|
mcp_enabled: bool
|
@@ -69,9 +83,11 @@ class MCPValidationResult:
|
|
69
83
|
performance_metrics: Dict[str, float]
|
70
84
|
evidence_files: Dict[str, str]
|
71
85
|
|
72
|
-
|
86
|
+
|
87
|
+
@dataclass
|
73
88
|
class TechnicalTestResult:
|
74
89
|
"""Technical test result for CLI validation."""
|
90
|
+
|
75
91
|
test_name: str
|
76
92
|
success: bool
|
77
93
|
execution_time_ms: float
|
@@ -82,27 +98,28 @@ class TechnicalTestResult:
|
|
82
98
|
performance_benchmark_met: bool
|
83
99
|
evidence_generated: bool
|
84
100
|
|
101
|
+
|
85
102
|
class MCPCostValidationEngine:
|
86
103
|
"""
|
87
104
|
MCP-validated cost optimization engine for technical CLI and business notebook usage.
|
88
|
-
|
105
|
+
|
89
106
|
Provides comprehensive cost optimization validation with real AWS Cost Explorer data,
|
90
107
|
cross-validation capabilities, and DoD-compliant evidence generation.
|
91
108
|
"""
|
92
|
-
|
109
|
+
|
93
110
|
def __init__(
|
94
111
|
self,
|
95
112
|
billing_profile: str,
|
96
113
|
management_profile: str,
|
97
114
|
tolerance_percent: float = 5.0,
|
98
|
-
performance_target_ms: float = 30000.0 # 30 second target
|
115
|
+
performance_target_ms: float = 30000.0, # 30 second target
|
99
116
|
):
|
100
117
|
"""
|
101
118
|
Initialize MCP cost validation engine.
|
102
|
-
|
119
|
+
|
103
120
|
Args:
|
104
121
|
billing_profile: AWS profile with Cost Explorer access
|
105
|
-
management_profile: AWS profile with Organizations access
|
122
|
+
management_profile: AWS profile with Organizations access
|
106
123
|
tolerance_percent: Variance tolerance for cross-validation
|
107
124
|
performance_target_ms: Performance target in milliseconds
|
108
125
|
"""
|
@@ -110,103 +127,94 @@ class MCPCostValidationEngine:
|
|
110
127
|
self.management_profile = management_profile
|
111
128
|
self.tolerance_percent = tolerance_percent
|
112
129
|
self.performance_target_ms = performance_target_ms
|
113
|
-
|
130
|
+
|
114
131
|
# Initialize MCP integration manager
|
115
132
|
self.mcp_manager = MCPIntegrationManager(
|
116
|
-
billing_profile=billing_profile,
|
117
|
-
management_profile=management_profile,
|
118
|
-
tolerance_percent=tolerance_percent
|
133
|
+
billing_profile=billing_profile, management_profile=management_profile, tolerance_percent=tolerance_percent
|
119
134
|
)
|
120
|
-
|
135
|
+
|
121
136
|
# Performance tracking
|
122
137
|
self.start_time = time.time()
|
123
138
|
self.api_calls_made = 0
|
124
139
|
self.cost_data_points = 0
|
125
|
-
|
140
|
+
|
126
141
|
# Evidence collection
|
127
142
|
self.evidence_dir = Path("mcp-validation-evidence")
|
128
143
|
self.evidence_dir.mkdir(parents=True, exist_ok=True)
|
129
|
-
|
144
|
+
|
130
145
|
print_header("MCP Cost Validation Engine", "1.0.0")
|
131
146
|
print_info(f"🔍 Cross-validation tolerance: ±{tolerance_percent}%")
|
132
|
-
print_info(f"⚡ Performance target: <{performance_target_ms/1000:.1f}s")
|
147
|
+
print_info(f"⚡ Performance target: <{performance_target_ms / 1000:.1f}s")
|
133
148
|
print_info(f"📊 Evidence collection: {self.evidence_dir}")
|
134
149
|
|
135
150
|
async def validate_cost_optimization_scenario(
|
136
|
-
self,
|
137
|
-
scenario_name: str,
|
138
|
-
cost_optimizer_params: Dict[str, Any],
|
139
|
-
expected_savings_range: Tuple[float, float]
|
151
|
+
self, scenario_name: str, cost_optimizer_params: Dict[str, Any], expected_savings_range: Tuple[float, float]
|
140
152
|
) -> TechnicalTestResult:
|
141
153
|
"""
|
142
154
|
Validate a complete cost optimization scenario with MCP cross-validation.
|
143
|
-
|
155
|
+
|
144
156
|
Args:
|
145
157
|
scenario_name: Name of the cost optimization scenario
|
146
158
|
cost_optimizer_params: Parameters for cost optimizer execution
|
147
159
|
expected_savings_range: Expected savings range (min, max) for validation
|
148
|
-
|
160
|
+
|
149
161
|
Returns:
|
150
162
|
TechnicalTestResult with comprehensive validation results
|
151
163
|
"""
|
152
164
|
test_start_time = time.time()
|
153
165
|
print_info(f"🧪 Testing scenario: {scenario_name}")
|
154
|
-
|
166
|
+
|
155
167
|
try:
|
156
168
|
# Initialize cost optimizer
|
157
169
|
cost_optimizer = CostOptimizer(
|
158
|
-
profile=cost_optimizer_params.get(
|
170
|
+
profile=cost_optimizer_params.get("profile", self.billing_profile),
|
159
171
|
dry_run=True, # Always safe mode for validation
|
160
|
-
execution_mode=ExecutionMode.VALIDATE_ONLY
|
172
|
+
execution_mode=ExecutionMode.VALIDATE_ONLY,
|
161
173
|
)
|
162
|
-
|
174
|
+
|
163
175
|
# Execute cost optimization scenario
|
164
176
|
if scenario_name.lower().startswith("nat_gateway"):
|
165
177
|
result = await cost_optimizer.optimize_nat_gateways(
|
166
|
-
regions=cost_optimizer_params.get(
|
167
|
-
idle_threshold_days=cost_optimizer_params.get(
|
168
|
-
cost_threshold=cost_optimizer_params.get(
|
178
|
+
regions=cost_optimizer_params.get("regions"),
|
179
|
+
idle_threshold_days=cost_optimizer_params.get("idle_threshold_days", 7),
|
180
|
+
cost_threshold=cost_optimizer_params.get("cost_threshold", 0.0),
|
169
181
|
)
|
170
182
|
elif scenario_name.lower().startswith("ec2_idle"):
|
171
183
|
result = await cost_optimizer.optimize_idle_ec2_instances(
|
172
|
-
regions=cost_optimizer_params.get(
|
173
|
-
cpu_threshold=cost_optimizer_params.get(
|
174
|
-
duration_hours=cost_optimizer_params.get(
|
175
|
-
cost_threshold=cost_optimizer_params.get(
|
184
|
+
regions=cost_optimizer_params.get("regions"),
|
185
|
+
cpu_threshold=cost_optimizer_params.get("cpu_threshold", 5.0),
|
186
|
+
duration_hours=cost_optimizer_params.get("duration_hours", 168),
|
187
|
+
cost_threshold=cost_optimizer_params.get("cost_threshold", 10.0),
|
176
188
|
)
|
177
189
|
elif scenario_name.lower().startswith("emergency_response"):
|
178
190
|
result = await cost_optimizer.emergency_cost_response(
|
179
|
-
cost_spike_threshold=cost_optimizer_params.get(
|
180
|
-
analysis_days=cost_optimizer_params.get(
|
191
|
+
cost_spike_threshold=cost_optimizer_params.get("cost_spike_threshold", 25000.0),
|
192
|
+
analysis_days=cost_optimizer_params.get("analysis_days", 7),
|
181
193
|
)
|
182
194
|
else:
|
183
195
|
raise ValueError(f"Unknown scenario: {scenario_name}")
|
184
|
-
|
196
|
+
|
185
197
|
# Extract cost optimization results
|
186
198
|
notebook_total_cost = result.business_metrics.total_monthly_savings
|
187
199
|
self.cost_data_points += len(result.resources_impacted)
|
188
|
-
|
200
|
+
|
189
201
|
# Validate with MCP Cost Explorer
|
190
202
|
mcp_validation = await self._cross_validate_with_mcp(
|
191
203
|
scenario_name=scenario_name,
|
192
|
-
notebook_result={
|
193
|
-
cost_optimizer_result=result
|
204
|
+
notebook_result={"cost_trends": {"total_monthly_spend": notebook_total_cost}},
|
205
|
+
cost_optimizer_result=result,
|
194
206
|
)
|
195
|
-
|
207
|
+
|
196
208
|
# Check if savings are within expected range
|
197
|
-
savings_in_range =
|
198
|
-
|
199
|
-
)
|
200
|
-
|
209
|
+
savings_in_range = expected_savings_range[0] <= notebook_total_cost <= expected_savings_range[1]
|
210
|
+
|
201
211
|
# Calculate performance metrics
|
202
212
|
execution_time_ms = (time.time() - test_start_time) * 1000
|
203
213
|
performance_met = execution_time_ms <= self.performance_target_ms
|
204
|
-
|
214
|
+
|
205
215
|
# Generate evidence
|
206
|
-
evidence_files = await self._generate_test_evidence(
|
207
|
-
|
208
|
-
)
|
209
|
-
|
216
|
+
evidence_files = await self._generate_test_evidence(scenario_name, result, mcp_validation)
|
217
|
+
|
210
218
|
return TechnicalTestResult(
|
211
219
|
test_name=scenario_name,
|
212
220
|
success=result.success and savings_in_range and mcp_validation.variance_within_tolerance,
|
@@ -216,12 +224,12 @@ class MCPCostValidationEngine:
|
|
216
224
|
aws_api_calls=self.api_calls_made,
|
217
225
|
cost_data_points=self.cost_data_points,
|
218
226
|
performance_benchmark_met=performance_met,
|
219
|
-
evidence_generated=len(evidence_files) > 0
|
227
|
+
evidence_generated=len(evidence_files) > 0,
|
220
228
|
)
|
221
|
-
|
229
|
+
|
222
230
|
except Exception as e:
|
223
231
|
execution_time_ms = (time.time() - test_start_time) * 1000
|
224
|
-
|
232
|
+
|
225
233
|
return TechnicalTestResult(
|
226
234
|
test_name=scenario_name,
|
227
235
|
success=False,
|
@@ -231,56 +239,52 @@ class MCPCostValidationEngine:
|
|
231
239
|
aws_api_calls=self.api_calls_made,
|
232
240
|
cost_data_points=0,
|
233
241
|
performance_benchmark_met=execution_time_ms <= self.performance_target_ms,
|
234
|
-
evidence_generated=False
|
242
|
+
evidence_generated=False,
|
235
243
|
)
|
236
244
|
|
237
245
|
async def _cross_validate_with_mcp(
|
238
|
-
self,
|
239
|
-
scenario_name: str,
|
240
|
-
notebook_result: Dict[str, Any],
|
241
|
-
cost_optimizer_result: CostOptimizationResult
|
246
|
+
self, scenario_name: str, notebook_result: Dict[str, Any], cost_optimizer_result: CostOptimizationResult
|
242
247
|
) -> MCPValidationResult:
|
243
248
|
"""Cross-validate notebook results with MCP Cost Explorer data."""
|
244
249
|
validation_start = time.time()
|
245
|
-
|
250
|
+
|
246
251
|
print_info("🔍 Cross-validating with MCP Cost Explorer...")
|
247
|
-
|
252
|
+
|
248
253
|
# Get MCP validation results
|
249
254
|
validation_report = self.mcp_manager.validate_notebook_results(notebook_result)
|
250
|
-
|
255
|
+
|
251
256
|
# Extract validation metrics
|
252
257
|
cost_validations = [
|
253
|
-
v for v in validation_report.get(
|
254
|
-
if v.get('validation_type') == 'cost_data_cross_check'
|
258
|
+
v for v in validation_report.get("validations", []) if v.get("validation_type") == "cost_data_cross_check"
|
255
259
|
]
|
256
|
-
|
260
|
+
|
257
261
|
if cost_validations:
|
258
262
|
cost_validation = cost_validations[0]
|
259
|
-
variance_analysis = cost_validation.get(
|
260
|
-
|
261
|
-
notebook_total = variance_analysis.get(
|
262
|
-
mcp_total = variance_analysis.get(
|
263
|
-
variance_pct = variance_analysis.get(
|
263
|
+
variance_analysis = cost_validation.get("variance_analysis", {})
|
264
|
+
|
265
|
+
notebook_total = variance_analysis.get("notebook_total", 0.0)
|
266
|
+
mcp_total = variance_analysis.get("mcp_total", 0.0)
|
267
|
+
variance_pct = variance_analysis.get("variance_percent", 0.0)
|
264
268
|
variance_within_tolerance = variance_pct <= self.tolerance_percent
|
265
|
-
|
269
|
+
|
266
270
|
else:
|
267
271
|
# No MCP validation available
|
268
|
-
notebook_total = notebook_result.get(
|
272
|
+
notebook_total = notebook_result.get("cost_trends", {}).get("total_monthly_spend", 0.0)
|
269
273
|
mcp_total = 0.0
|
270
274
|
variance_pct = 0.0
|
271
275
|
variance_within_tolerance = False
|
272
|
-
|
276
|
+
|
273
277
|
# Generate recommendations
|
274
|
-
recommendations = validation_report.get(
|
278
|
+
recommendations = validation_report.get("recommendations", [])
|
275
279
|
if not recommendations:
|
276
280
|
if variance_within_tolerance:
|
277
281
|
recommendations = ["✅ Data validated - proceed with confidence"]
|
278
282
|
else:
|
279
283
|
recommendations = ["⚠️ Variance detected - investigate data sources"]
|
280
|
-
|
284
|
+
|
281
285
|
# Performance metrics
|
282
286
|
validation_time = time.time() - validation_start
|
283
|
-
|
287
|
+
|
284
288
|
return MCPValidationResult(
|
285
289
|
scenario_name=scenario_name,
|
286
290
|
validation_timestamp=datetime.now(),
|
@@ -294,23 +298,20 @@ class MCPCostValidationEngine:
|
|
294
298
|
tolerance_threshold=self.tolerance_percent,
|
295
299
|
validation_recommendations=recommendations,
|
296
300
|
performance_metrics={
|
297
|
-
|
298
|
-
|
299
|
-
|
301
|
+
"validation_time_seconds": validation_time,
|
302
|
+
"api_calls": 2, # Estimate for Cost Explorer + Organizations
|
303
|
+
"data_freshness_minutes": 15, # Cost Explorer data freshness
|
300
304
|
},
|
301
|
-
evidence_files={}
|
305
|
+
evidence_files={},
|
302
306
|
)
|
303
307
|
|
304
308
|
async def _generate_test_evidence(
|
305
|
-
self,
|
306
|
-
scenario_name: str,
|
307
|
-
cost_result: CostOptimizationResult,
|
308
|
-
mcp_validation: MCPValidationResult
|
309
|
+
self, scenario_name: str, cost_result: CostOptimizationResult, mcp_validation: MCPValidationResult
|
309
310
|
) -> Dict[str, str]:
|
310
311
|
"""Generate comprehensive test evidence for DoD validation."""
|
311
312
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
312
313
|
evidence_files = {}
|
313
|
-
|
314
|
+
|
314
315
|
try:
|
315
316
|
# Generate JSON evidence file
|
316
317
|
evidence_data = {
|
@@ -322,119 +323,121 @@ class MCPCostValidationEngine:
|
|
322
323
|
"resources_analyzed": cost_result.resources_analyzed,
|
323
324
|
"resources_impacted": len(cost_result.resources_impacted),
|
324
325
|
"execution_mode": cost_result.execution_mode.value,
|
325
|
-
"risk_level": cost_result.business_metrics.overall_risk_level.value
|
326
|
+
"risk_level": cost_result.business_metrics.overall_risk_level.value,
|
326
327
|
},
|
327
328
|
"mcp_validation": asdict(mcp_validation),
|
328
329
|
"performance_metrics": {
|
329
330
|
"total_execution_time_seconds": time.time() - self.start_time,
|
330
331
|
"api_calls_made": self.api_calls_made,
|
331
|
-
"cost_data_points": self.cost_data_points
|
332
|
+
"cost_data_points": self.cost_data_points,
|
332
333
|
},
|
333
334
|
"dod_compliance": {
|
334
335
|
"real_aws_data_used": True,
|
335
336
|
"mcp_cross_validation": mcp_validation.mcp_enabled,
|
336
337
|
"variance_within_tolerance": mcp_validation.variance_within_tolerance,
|
337
338
|
"evidence_generated": True,
|
338
|
-
"performance_target_met": mcp_validation.performance_metrics.get(
|
339
|
-
}
|
339
|
+
"performance_target_met": mcp_validation.performance_metrics.get("validation_time_seconds", 0) < 30,
|
340
|
+
},
|
340
341
|
}
|
341
|
-
|
342
|
+
|
342
343
|
json_file = self.evidence_dir / f"{scenario_name}_{timestamp}.json"
|
343
|
-
with open(json_file,
|
344
|
+
with open(json_file, "w") as f:
|
344
345
|
json.dump(evidence_data, f, indent=2, default=str)
|
345
|
-
evidence_files[
|
346
|
-
|
346
|
+
evidence_files["json"] = str(json_file)
|
347
|
+
|
347
348
|
# Generate CSV summary for business stakeholders
|
348
349
|
csv_file = self.evidence_dir / f"{scenario_name}_summary_{timestamp}.csv"
|
349
|
-
with open(csv_file,
|
350
|
+
with open(csv_file, "w", newline="") as f:
|
350
351
|
writer = csv.writer(f)
|
351
|
-
writer.writerow(
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
352
|
+
writer.writerow(
|
353
|
+
[
|
354
|
+
"Scenario",
|
355
|
+
"Success",
|
356
|
+
"Monthly Savings",
|
357
|
+
"MCP Validated",
|
358
|
+
"Variance %",
|
359
|
+
"Performance Met",
|
360
|
+
"Evidence Generated",
|
361
|
+
]
|
362
|
+
)
|
363
|
+
writer.writerow(
|
364
|
+
[
|
365
|
+
scenario_name,
|
366
|
+
"YES" if cost_result.success else "NO",
|
367
|
+
f"${cost_result.business_metrics.total_monthly_savings:,.2f}",
|
368
|
+
"YES" if mcp_validation.mcp_enabled else "NO",
|
369
|
+
f"{mcp_validation.variance_percentage:.2f}%",
|
370
|
+
"YES" if mcp_validation.performance_metrics.get("validation_time_seconds", 0) < 30 else "NO",
|
371
|
+
"YES",
|
372
|
+
]
|
373
|
+
)
|
374
|
+
evidence_files["csv"] = str(csv_file)
|
375
|
+
|
366
376
|
print_success(f"📄 Evidence generated: {len(evidence_files)} files")
|
367
|
-
|
377
|
+
|
368
378
|
except Exception as e:
|
369
379
|
print_warning(f"Evidence generation encountered an issue: {str(e)}")
|
370
|
-
evidence_files[
|
371
|
-
|
380
|
+
evidence_files["error"] = str(e)
|
381
|
+
|
372
382
|
return evidence_files
|
373
383
|
|
374
384
|
async def run_comprehensive_cli_test_suite(self) -> List[TechnicalTestResult]:
|
375
385
|
"""
|
376
386
|
Run comprehensive CLI test suite for technical users and DoD validation.
|
377
|
-
|
387
|
+
|
378
388
|
Returns:
|
379
389
|
List of TechnicalTestResult objects with detailed validation results
|
380
390
|
"""
|
381
391
|
print_header("Comprehensive CLI Test Suite - Technical Validation")
|
382
|
-
|
392
|
+
|
383
393
|
# Define test scenarios with business-realistic parameters
|
384
394
|
test_scenarios = [
|
385
395
|
{
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
396
|
+
"name": "nat_gateway_cost_optimization",
|
397
|
+
"params": {
|
398
|
+
"profile": self.billing_profile,
|
399
|
+
"regions": ["us-east-1", "us-west-2"],
|
400
|
+
"idle_threshold_days": 7,
|
401
|
+
"cost_threshold": 100.0,
|
392
402
|
},
|
393
|
-
|
403
|
+
"expected_savings_range": (0.0, 5000.0), # 0-$5K/month realistic range
|
394
404
|
},
|
395
405
|
{
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
406
|
+
"name": "ec2_idle_instance_optimization",
|
407
|
+
"params": {
|
408
|
+
"profile": self.billing_profile,
|
409
|
+
"regions": ["us-east-1"],
|
410
|
+
"cpu_threshold": 5.0,
|
411
|
+
"duration_hours": 168, # 7 days
|
412
|
+
"cost_threshold": 50.0,
|
403
413
|
},
|
404
|
-
|
414
|
+
"expected_savings_range": (0.0, 10000.0), # 0-$10K/month realistic range
|
405
415
|
},
|
406
416
|
{
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
'analysis_days': 7
|
412
|
-
},
|
413
|
-
'expected_savings_range': (5000.0, 15000.0) # measurable range/month emergency response
|
414
|
-
}
|
417
|
+
"name": "emergency_response_validation",
|
418
|
+
"params": {"profile": self.billing_profile, "cost_spike_threshold": 25000.0, "analysis_days": 7},
|
419
|
+
"expected_savings_range": (5000.0, 15000.0), # measurable range/month emergency response
|
420
|
+
},
|
415
421
|
]
|
416
|
-
|
422
|
+
|
417
423
|
test_results = []
|
418
|
-
|
424
|
+
|
419
425
|
# Execute test scenarios with progress tracking
|
420
426
|
with create_progress_bar() as progress:
|
421
|
-
task = progress.add_task(
|
422
|
-
|
423
|
-
total=len(test_scenarios)
|
424
|
-
)
|
425
|
-
|
427
|
+
task = progress.add_task("[cyan]Executing technical test scenarios...", total=len(test_scenarios))
|
428
|
+
|
426
429
|
for scenario in test_scenarios:
|
427
430
|
print_info(f"🧪 Executing: {scenario['name']}")
|
428
|
-
|
431
|
+
|
429
432
|
result = await self.validate_cost_optimization_scenario(
|
430
|
-
scenario_name=scenario[
|
431
|
-
cost_optimizer_params=scenario[
|
432
|
-
expected_savings_range=scenario[
|
433
|
+
scenario_name=scenario["name"],
|
434
|
+
cost_optimizer_params=scenario["params"],
|
435
|
+
expected_savings_range=scenario["expected_savings_range"],
|
433
436
|
)
|
434
|
-
|
437
|
+
|
435
438
|
test_results.append(result)
|
436
439
|
progress.advance(task)
|
437
|
-
|
440
|
+
|
438
441
|
# Display individual test result
|
439
442
|
if result.success:
|
440
443
|
print_success(f"✅ {scenario['name']}: PASSED")
|
@@ -442,27 +445,27 @@ class MCPCostValidationEngine:
|
|
442
445
|
print_error(f"❌ {scenario['name']}: FAILED")
|
443
446
|
if result.error_message:
|
444
447
|
print_warning(f" Error: {result.error_message}")
|
445
|
-
|
448
|
+
|
446
449
|
# Display comprehensive test summary
|
447
450
|
self._display_test_suite_summary(test_results)
|
448
|
-
|
451
|
+
|
449
452
|
return test_results
|
450
453
|
|
451
454
|
def _display_test_suite_summary(self, test_results: List[TechnicalTestResult]) -> None:
|
452
455
|
"""Display comprehensive test suite summary with DoD validation metrics."""
|
453
|
-
|
456
|
+
|
454
457
|
# Calculate aggregate metrics
|
455
458
|
total_tests = len(test_results)
|
456
459
|
passed_tests = sum(1 for r in test_results if r.success)
|
457
460
|
failed_tests = total_tests - passed_tests
|
458
|
-
|
461
|
+
|
459
462
|
total_execution_time = sum(r.execution_time_ms for r in test_results)
|
460
463
|
avg_execution_time = total_execution_time / total_tests if total_tests > 0 else 0
|
461
|
-
|
464
|
+
|
462
465
|
performance_met = sum(1 for r in test_results if r.performance_benchmark_met)
|
463
466
|
mcp_validated = sum(1 for r in test_results if r.mcp_validation and r.mcp_validation.mcp_enabled)
|
464
467
|
evidence_generated = sum(1 for r in test_results if r.evidence_generated)
|
465
|
-
|
468
|
+
|
466
469
|
# Create summary table
|
467
470
|
summary_table = create_table(
|
468
471
|
title="Technical Test Suite Summary - DoD Validation",
|
@@ -472,38 +475,31 @@ class MCPCostValidationEngine:
|
|
472
475
|
{"name": "Execution (ms)", "style": "yellow"},
|
473
476
|
{"name": "MCP Validated", "style": "blue"},
|
474
477
|
{"name": "Performance", "style": "magenta"},
|
475
|
-
{"name": "Evidence", "style": "white"}
|
476
|
-
]
|
478
|
+
{"name": "Evidence", "style": "white"},
|
479
|
+
],
|
477
480
|
)
|
478
|
-
|
481
|
+
|
479
482
|
for result in test_results:
|
480
483
|
status = "✅ PASS" if result.success else "❌ FAIL"
|
481
484
|
mcp_status = "✅" if result.mcp_validation and result.mcp_validation.mcp_enabled else "❌"
|
482
485
|
perf_status = "✅" if result.performance_benchmark_met else "❌"
|
483
486
|
evidence_status = "✅" if result.evidence_generated else "❌"
|
484
|
-
|
487
|
+
|
485
488
|
summary_table.add_row(
|
486
|
-
result.test_name,
|
487
|
-
status,
|
488
|
-
f"{result.execution_time_ms:.0f}ms",
|
489
|
-
mcp_status,
|
490
|
-
perf_status,
|
491
|
-
evidence_status
|
489
|
+
result.test_name, status, f"{result.execution_time_ms:.0f}ms", mcp_status, perf_status, evidence_status
|
492
490
|
)
|
493
|
-
|
491
|
+
|
494
492
|
console.print(summary_table)
|
495
|
-
|
493
|
+
|
496
494
|
# Overall DoD compliance summary
|
497
|
-
dod_compliance_score = (
|
498
|
-
|
499
|
-
)
|
500
|
-
|
495
|
+
dod_compliance_score = (passed_tests / total_tests * 100) if total_tests > 0 else 0
|
496
|
+
|
501
497
|
dod_panel = create_panel(
|
502
498
|
f"""📊 DoD Validation Summary
|
503
499
|
|
504
500
|
✅ Test Results:
|
505
501
|
• Tests executed: {total_tests}
|
506
|
-
• Tests passed: {passed_tests} ({passed_tests/total_tests*100:.1f}%)
|
502
|
+
• Tests passed: {passed_tests} ({passed_tests / total_tests * 100:.1f}%)
|
507
503
|
• Tests failed: {failed_tests}
|
508
504
|
|
509
505
|
⚡ Performance Metrics:
|
@@ -513,20 +509,20 @@ class MCPCostValidationEngine:
|
|
513
509
|
|
514
510
|
🔍 MCP Validation:
|
515
511
|
• MCP cross-validation: {mcp_validated}/{total_tests}
|
516
|
-
• Cost Explorer integration: {
|
517
|
-
• Data accuracy validation: {
|
512
|
+
• Cost Explorer integration: {"✅ Active" if mcp_validated > 0 else "❌ Inactive"}
|
513
|
+
• Data accuracy validation: {"✅ Enabled" if mcp_validated > 0 else "❌ Disabled"}
|
518
514
|
|
519
515
|
📄 Evidence Generation:
|
520
516
|
• Evidence files created: {evidence_generated}/{total_tests}
|
521
|
-
• DoD compliance documentation: {
|
517
|
+
• DoD compliance documentation: {"✅ Complete" if evidence_generated == total_tests else "⚠️ Partial"}
|
522
518
|
|
523
519
|
🎯 Overall DoD Compliance Score: {dod_compliance_score:.1f}%""",
|
524
520
|
title="DoD Validation Results",
|
525
|
-
border_style="green" if dod_compliance_score >= 90 else "yellow" if dod_compliance_score >= 70 else "red"
|
521
|
+
border_style="green" if dod_compliance_score >= 90 else "yellow" if dod_compliance_score >= 70 else "red",
|
526
522
|
)
|
527
|
-
|
523
|
+
|
528
524
|
console.print(dod_panel)
|
529
|
-
|
525
|
+
|
530
526
|
# Success criteria evaluation
|
531
527
|
if dod_compliance_score >= 90 and mcp_validated >= total_tests * 0.8:
|
532
528
|
print_success("🎯 DoD VALIDATION COMPLETE - All criteria met")
|
@@ -542,7 +538,7 @@ class MCPCostValidationEngine:
|
|
542
538
|
"""Export comprehensive DoD validation report for technical documentation."""
|
543
539
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
544
540
|
report_file = self.evidence_dir / f"dod_validation_report_{timestamp}.json"
|
545
|
-
|
541
|
+
|
546
542
|
# Aggregate all validation data
|
547
543
|
dod_report = {
|
548
544
|
"validation_metadata": {
|
@@ -551,128 +547,149 @@ class MCPCostValidationEngine:
|
|
551
547
|
"billing_profile": self.billing_profile,
|
552
548
|
"management_profile": self.management_profile,
|
553
549
|
"tolerance_threshold": self.tolerance_percent,
|
554
|
-
"performance_target_ms": self.performance_target_ms
|
550
|
+
"performance_target_ms": self.performance_target_ms,
|
555
551
|
},
|
556
552
|
"test_execution_summary": {
|
557
553
|
"total_tests_executed": len(test_results),
|
558
554
|
"tests_passed": sum(1 for r in test_results if r.success),
|
559
555
|
"tests_failed": sum(1 for r in test_results if not r.success),
|
560
|
-
"overall_success_rate": (sum(1 for r in test_results if r.success) / len(test_results) * 100)
|
556
|
+
"overall_success_rate": (sum(1 for r in test_results if r.success) / len(test_results) * 100)
|
557
|
+
if test_results
|
558
|
+
else 0,
|
561
559
|
"total_execution_time_ms": sum(r.execution_time_ms for r in test_results),
|
562
|
-
"average_execution_time_ms": sum(r.execution_time_ms for r in test_results) / len(test_results)
|
560
|
+
"average_execution_time_ms": sum(r.execution_time_ms for r in test_results) / len(test_results)
|
561
|
+
if test_results
|
562
|
+
else 0,
|
563
563
|
},
|
564
564
|
"mcp_validation_metrics": {
|
565
|
-
"mcp_integrations_successful": sum(
|
566
|
-
|
567
|
-
|
568
|
-
"
|
565
|
+
"mcp_integrations_successful": sum(
|
566
|
+
1 for r in test_results if r.mcp_validation and r.mcp_validation.mcp_enabled
|
567
|
+
),
|
568
|
+
"cost_explorer_validations": sum(
|
569
|
+
1 for r in test_results if r.mcp_validation and r.mcp_validation.cost_explorer_validated
|
570
|
+
),
|
571
|
+
"variance_within_tolerance": sum(
|
572
|
+
1 for r in test_results if r.mcp_validation and r.mcp_validation.variance_within_tolerance
|
573
|
+
),
|
574
|
+
"average_variance_percentage": sum(
|
575
|
+
r.mcp_validation.variance_percentage for r in test_results if r.mcp_validation
|
576
|
+
)
|
577
|
+
/ len(test_results)
|
578
|
+
if test_results
|
579
|
+
else 0,
|
569
580
|
},
|
570
581
|
"performance_benchmarks": {
|
571
582
|
"performance_targets_met": sum(1 for r in test_results if r.performance_benchmark_met),
|
572
|
-
"performance_compliance_rate": (
|
583
|
+
"performance_compliance_rate": (
|
584
|
+
sum(1 for r in test_results if r.performance_benchmark_met) / len(test_results) * 100
|
585
|
+
)
|
586
|
+
if test_results
|
587
|
+
else 0,
|
573
588
|
"aws_api_calls_total": sum(r.aws_api_calls for r in test_results),
|
574
|
-
"cost_data_points_analyzed": sum(r.cost_data_points for r in test_results)
|
589
|
+
"cost_data_points_analyzed": sum(r.cost_data_points for r in test_results),
|
575
590
|
},
|
576
591
|
"evidence_generation": {
|
577
592
|
"evidence_files_created": sum(1 for r in test_results if r.evidence_generated),
|
578
|
-
"evidence_generation_rate": (
|
579
|
-
|
593
|
+
"evidence_generation_rate": (
|
594
|
+
sum(1 for r in test_results if r.evidence_generated) / len(test_results) * 100
|
595
|
+
)
|
596
|
+
if test_results
|
597
|
+
else 0,
|
598
|
+
"evidence_directory": str(self.evidence_dir),
|
580
599
|
},
|
581
600
|
"detailed_test_results": [asdict(result) for result in test_results],
|
582
601
|
"dod_compliance_assessment": {
|
583
602
|
"requirements_met": {
|
584
|
-
"real_aws_data_integration": sum(
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
"
|
603
|
+
"real_aws_data_integration": sum(
|
604
|
+
1 for r in test_results if r.mcp_validation and r.mcp_validation.mcp_enabled
|
605
|
+
)
|
606
|
+
> 0,
|
607
|
+
"cross_validation_enabled": sum(
|
608
|
+
1 for r in test_results if r.mcp_validation and r.mcp_validation.cost_explorer_validated
|
609
|
+
)
|
610
|
+
> 0,
|
611
|
+
"performance_targets_achieved": sum(1 for r in test_results if r.performance_benchmark_met)
|
612
|
+
>= len(test_results) * 0.8,
|
613
|
+
"evidence_documentation_complete": sum(1 for r in test_results if r.evidence_generated)
|
614
|
+
== len(test_results),
|
615
|
+
"error_handling_validated": sum(1 for r in test_results if r.error_message is not None)
|
616
|
+
< len(test_results) * 0.2,
|
589
617
|
},
|
590
|
-
"overall_compliance_score": 0.0 # Will be calculated below
|
591
|
-
}
|
618
|
+
"overall_compliance_score": 0.0, # Will be calculated below
|
619
|
+
},
|
592
620
|
}
|
593
|
-
|
621
|
+
|
594
622
|
# Calculate overall DoD compliance score
|
595
623
|
requirements_met = dod_report["dod_compliance_assessment"]["requirements_met"]
|
596
624
|
compliance_score = sum(requirements_met.values()) / len(requirements_met) * 100
|
597
625
|
dod_report["dod_compliance_assessment"]["overall_compliance_score"] = compliance_score
|
598
|
-
|
626
|
+
|
599
627
|
# Export report
|
600
628
|
try:
|
601
|
-
with open(report_file,
|
629
|
+
with open(report_file, "w") as f:
|
602
630
|
json.dump(dod_report, f, indent=2, default=str)
|
603
|
-
|
631
|
+
|
604
632
|
print_success(f"📊 DoD validation report exported: {report_file}")
|
605
633
|
return str(report_file)
|
606
|
-
|
634
|
+
|
607
635
|
except Exception as e:
|
608
636
|
print_error(f"Failed to export DoD validation report: {str(e)}")
|
609
637
|
return ""
|
610
638
|
|
639
|
+
|
611
640
|
# CLI command interface for technical users
|
612
641
|
async def main_cli():
|
613
642
|
"""Main CLI entry point for technical cost optimization validation."""
|
614
643
|
import argparse
|
615
|
-
|
616
|
-
parser = argparse.ArgumentParser(
|
617
|
-
description="MCP-Validated Cost Optimization - Technical CLI Interface"
|
618
|
-
)
|
644
|
+
|
645
|
+
parser = argparse.ArgumentParser(description="MCP-Validated Cost Optimization - Technical CLI Interface")
|
619
646
|
parser.add_argument(
|
620
|
-
"--billing-profile",
|
621
|
-
default="${BILLING_PROFILE}",
|
622
|
-
help="AWS billing profile with Cost Explorer access"
|
647
|
+
"--billing-profile", default="${BILLING_PROFILE}", help="AWS billing profile with Cost Explorer access"
|
623
648
|
)
|
624
649
|
parser.add_argument(
|
625
|
-
"--management-profile",
|
626
|
-
default="${MANAGEMENT_PROFILE}",
|
627
|
-
help="AWS management profile with Organizations access"
|
650
|
+
"--management-profile", default="${MANAGEMENT_PROFILE}", help="AWS management profile with Organizations access"
|
628
651
|
)
|
629
652
|
parser.add_argument(
|
630
|
-
"--tolerance-percent",
|
631
|
-
type=float,
|
632
|
-
default=5.0,
|
633
|
-
help="MCP cross-validation tolerance percentage (default: 5.0)"
|
653
|
+
"--tolerance-percent", type=float, default=5.0, help="MCP cross-validation tolerance percentage (default: 5.0)"
|
634
654
|
)
|
635
655
|
parser.add_argument(
|
636
|
-
"--performance-target-ms",
|
656
|
+
"--performance-target-ms",
|
637
657
|
type=float,
|
638
658
|
default=30000.0,
|
639
|
-
help="Performance target in milliseconds (default: 30000)"
|
659
|
+
help="Performance target in milliseconds (default: 30000)",
|
640
660
|
)
|
641
|
-
parser.add_argument(
|
642
|
-
|
643
|
-
action="store_true",
|
644
|
-
help="Run complete DoD validation test suite"
|
645
|
-
)
|
646
|
-
|
661
|
+
parser.add_argument("--run-full-suite", action="store_true", help="Run complete DoD validation test suite")
|
662
|
+
|
647
663
|
args = parser.parse_args()
|
648
|
-
|
664
|
+
|
649
665
|
# Initialize MCP validation engine
|
650
666
|
validation_engine = MCPCostValidationEngine(
|
651
667
|
billing_profile=args.billing_profile,
|
652
668
|
management_profile=args.management_profile,
|
653
669
|
tolerance_percent=args.tolerance_percent,
|
654
|
-
performance_target_ms=args.performance_target_ms
|
670
|
+
performance_target_ms=args.performance_target_ms,
|
655
671
|
)
|
656
|
-
|
672
|
+
|
657
673
|
if args.run_full_suite:
|
658
674
|
# Run comprehensive test suite
|
659
675
|
test_results = await validation_engine.run_comprehensive_cli_test_suite()
|
660
|
-
|
676
|
+
|
661
677
|
# Export DoD validation report
|
662
678
|
report_file = await validation_engine.export_dod_validation_report(test_results)
|
663
|
-
|
679
|
+
|
664
680
|
if report_file:
|
665
681
|
print_success(f"✅ Comprehensive DoD validation complete: {report_file}")
|
666
682
|
else:
|
667
683
|
print_error("❌ DoD validation encountered issues")
|
668
|
-
|
684
|
+
|
669
685
|
else:
|
670
686
|
# Run individual scenario validation
|
671
687
|
print_info("💡 Use --run-full-suite for comprehensive DoD validation")
|
672
688
|
print_info("📖 Available scenarios:")
|
673
689
|
print_info(" • nat_gateway_cost_optimization")
|
674
|
-
print_info(" • ec2_idle_instance_optimization")
|
690
|
+
print_info(" • ec2_idle_instance_optimization")
|
675
691
|
print_info(" • emergency_response_validation")
|
676
692
|
|
693
|
+
|
677
694
|
if __name__ == "__main__":
|
678
|
-
asyncio.run(main_cli())
|
695
|
+
asyncio.run(main_cli())
|