runbooks 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (228) hide show
  1. runbooks/__init__.py +31 -2
  2. runbooks/__init___optimized.py +18 -4
  3. runbooks/_platform/__init__.py +1 -5
  4. runbooks/_platform/core/runbooks_wrapper.py +141 -138
  5. runbooks/aws2/accuracy_validator.py +812 -0
  6. runbooks/base.py +7 -0
  7. runbooks/cfat/assessment/compliance.py +1 -1
  8. runbooks/cfat/assessment/runner.py +1 -0
  9. runbooks/cfat/cloud_foundations_assessment.py +227 -239
  10. runbooks/cli/__init__.py +1 -1
  11. runbooks/cli/commands/cfat.py +64 -23
  12. runbooks/cli/commands/finops.py +1005 -54
  13. runbooks/cli/commands/inventory.py +138 -35
  14. runbooks/cli/commands/operate.py +9 -36
  15. runbooks/cli/commands/security.py +42 -18
  16. runbooks/cli/commands/validation.py +432 -18
  17. runbooks/cli/commands/vpc.py +81 -17
  18. runbooks/cli/registry.py +22 -10
  19. runbooks/cloudops/__init__.py +20 -27
  20. runbooks/cloudops/base.py +96 -107
  21. runbooks/cloudops/cost_optimizer.py +544 -542
  22. runbooks/cloudops/infrastructure_optimizer.py +5 -4
  23. runbooks/cloudops/interfaces.py +224 -225
  24. runbooks/cloudops/lifecycle_manager.py +5 -4
  25. runbooks/cloudops/mcp_cost_validation.py +252 -235
  26. runbooks/cloudops/models.py +78 -53
  27. runbooks/cloudops/monitoring_automation.py +5 -4
  28. runbooks/cloudops/notebook_framework.py +177 -213
  29. runbooks/cloudops/security_enforcer.py +125 -159
  30. runbooks/common/accuracy_validator.py +11 -0
  31. runbooks/common/aws_pricing.py +349 -326
  32. runbooks/common/aws_pricing_api.py +211 -212
  33. runbooks/common/aws_profile_manager.py +40 -36
  34. runbooks/common/aws_utils.py +74 -79
  35. runbooks/common/business_logic.py +126 -104
  36. runbooks/common/cli_decorators.py +36 -60
  37. runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
  38. runbooks/common/cross_account_manager.py +197 -204
  39. runbooks/common/date_utils.py +27 -39
  40. runbooks/common/decorators.py +29 -19
  41. runbooks/common/dry_run_examples.py +173 -208
  42. runbooks/common/dry_run_framework.py +157 -155
  43. runbooks/common/enhanced_exception_handler.py +15 -4
  44. runbooks/common/enhanced_logging_example.py +50 -64
  45. runbooks/common/enhanced_logging_integration_example.py +65 -37
  46. runbooks/common/env_utils.py +16 -16
  47. runbooks/common/error_handling.py +40 -38
  48. runbooks/common/lazy_loader.py +41 -23
  49. runbooks/common/logging_integration_helper.py +79 -86
  50. runbooks/common/mcp_cost_explorer_integration.py +476 -493
  51. runbooks/common/mcp_integration.py +63 -74
  52. runbooks/common/memory_optimization.py +140 -118
  53. runbooks/common/module_cli_base.py +37 -58
  54. runbooks/common/organizations_client.py +175 -193
  55. runbooks/common/patterns.py +23 -25
  56. runbooks/common/performance_monitoring.py +67 -71
  57. runbooks/common/performance_optimization_engine.py +283 -274
  58. runbooks/common/profile_utils.py +111 -37
  59. runbooks/common/rich_utils.py +201 -141
  60. runbooks/common/sre_performance_suite.py +177 -186
  61. runbooks/enterprise/__init__.py +1 -1
  62. runbooks/enterprise/logging.py +144 -106
  63. runbooks/enterprise/security.py +187 -204
  64. runbooks/enterprise/validation.py +43 -56
  65. runbooks/finops/__init__.py +26 -30
  66. runbooks/finops/account_resolver.py +1 -1
  67. runbooks/finops/advanced_optimization_engine.py +980 -0
  68. runbooks/finops/automation_core.py +268 -231
  69. runbooks/finops/business_case_config.py +184 -179
  70. runbooks/finops/cli.py +660 -139
  71. runbooks/finops/commvault_ec2_analysis.py +157 -164
  72. runbooks/finops/compute_cost_optimizer.py +336 -320
  73. runbooks/finops/config.py +20 -20
  74. runbooks/finops/cost_optimizer.py +484 -618
  75. runbooks/finops/cost_processor.py +332 -214
  76. runbooks/finops/dashboard_runner.py +1006 -172
  77. runbooks/finops/ebs_cost_optimizer.py +991 -657
  78. runbooks/finops/elastic_ip_optimizer.py +317 -257
  79. runbooks/finops/enhanced_mcp_integration.py +340 -0
  80. runbooks/finops/enhanced_progress.py +32 -29
  81. runbooks/finops/enhanced_trend_visualization.py +3 -2
  82. runbooks/finops/enterprise_wrappers.py +223 -285
  83. runbooks/finops/executive_export.py +203 -160
  84. runbooks/finops/helpers.py +130 -288
  85. runbooks/finops/iam_guidance.py +1 -1
  86. runbooks/finops/infrastructure/__init__.py +80 -0
  87. runbooks/finops/infrastructure/commands.py +506 -0
  88. runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
  89. runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
  90. runbooks/finops/markdown_exporter.py +337 -174
  91. runbooks/finops/mcp_validator.py +1952 -0
  92. runbooks/finops/nat_gateway_optimizer.py +1512 -481
  93. runbooks/finops/network_cost_optimizer.py +657 -587
  94. runbooks/finops/notebook_utils.py +226 -188
  95. runbooks/finops/optimization_engine.py +1136 -0
  96. runbooks/finops/optimizer.py +19 -23
  97. runbooks/finops/rds_snapshot_optimizer.py +367 -411
  98. runbooks/finops/reservation_optimizer.py +427 -363
  99. runbooks/finops/scenario_cli_integration.py +64 -65
  100. runbooks/finops/scenarios.py +1277 -438
  101. runbooks/finops/schemas.py +218 -182
  102. runbooks/finops/snapshot_manager.py +2289 -0
  103. runbooks/finops/types.py +3 -3
  104. runbooks/finops/validation_framework.py +259 -265
  105. runbooks/finops/vpc_cleanup_exporter.py +189 -144
  106. runbooks/finops/vpc_cleanup_optimizer.py +591 -573
  107. runbooks/finops/workspaces_analyzer.py +171 -182
  108. runbooks/integration/__init__.py +89 -0
  109. runbooks/integration/mcp_integration.py +1920 -0
  110. runbooks/inventory/CLAUDE.md +816 -0
  111. runbooks/inventory/__init__.py +2 -2
  112. runbooks/inventory/cloud_foundations_integration.py +144 -149
  113. runbooks/inventory/collectors/aws_comprehensive.py +1 -1
  114. runbooks/inventory/collectors/aws_networking.py +109 -99
  115. runbooks/inventory/collectors/base.py +4 -0
  116. runbooks/inventory/core/collector.py +495 -313
  117. runbooks/inventory/drift_detection_cli.py +69 -96
  118. runbooks/inventory/inventory_mcp_cli.py +48 -46
  119. runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
  120. runbooks/inventory/mcp_inventory_validator.py +549 -465
  121. runbooks/inventory/mcp_vpc_validator.py +359 -442
  122. runbooks/inventory/organizations_discovery.py +55 -51
  123. runbooks/inventory/rich_inventory_display.py +33 -32
  124. runbooks/inventory/unified_validation_engine.py +278 -251
  125. runbooks/inventory/vpc_analyzer.py +732 -695
  126. runbooks/inventory/vpc_architecture_validator.py +293 -348
  127. runbooks/inventory/vpc_dependency_analyzer.py +382 -378
  128. runbooks/inventory/vpc_flow_analyzer.py +1 -1
  129. runbooks/main.py +49 -34
  130. runbooks/main_final.py +91 -60
  131. runbooks/main_minimal.py +22 -10
  132. runbooks/main_optimized.py +131 -100
  133. runbooks/main_ultra_minimal.py +7 -2
  134. runbooks/mcp/__init__.py +36 -0
  135. runbooks/mcp/integration.py +679 -0
  136. runbooks/monitoring/performance_monitor.py +9 -4
  137. runbooks/operate/dynamodb_operations.py +3 -1
  138. runbooks/operate/ec2_operations.py +145 -137
  139. runbooks/operate/iam_operations.py +146 -152
  140. runbooks/operate/networking_cost_heatmap.py +29 -8
  141. runbooks/operate/rds_operations.py +223 -254
  142. runbooks/operate/s3_operations.py +107 -118
  143. runbooks/operate/vpc_operations.py +646 -616
  144. runbooks/remediation/base.py +1 -1
  145. runbooks/remediation/commons.py +10 -7
  146. runbooks/remediation/commvault_ec2_analysis.py +70 -66
  147. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
  148. runbooks/remediation/multi_account.py +24 -21
  149. runbooks/remediation/rds_snapshot_list.py +86 -60
  150. runbooks/remediation/remediation_cli.py +92 -146
  151. runbooks/remediation/universal_account_discovery.py +83 -79
  152. runbooks/remediation/workspaces_list.py +46 -41
  153. runbooks/security/__init__.py +19 -0
  154. runbooks/security/assessment_runner.py +1150 -0
  155. runbooks/security/baseline_checker.py +812 -0
  156. runbooks/security/cloudops_automation_security_validator.py +509 -535
  157. runbooks/security/compliance_automation_engine.py +17 -17
  158. runbooks/security/config/__init__.py +2 -2
  159. runbooks/security/config/compliance_config.py +50 -50
  160. runbooks/security/config_template_generator.py +63 -76
  161. runbooks/security/enterprise_security_framework.py +1 -1
  162. runbooks/security/executive_security_dashboard.py +519 -508
  163. runbooks/security/multi_account_security_controls.py +959 -1210
  164. runbooks/security/real_time_security_monitor.py +422 -444
  165. runbooks/security/security_baseline_tester.py +1 -1
  166. runbooks/security/security_cli.py +143 -112
  167. runbooks/security/test_2way_validation.py +439 -0
  168. runbooks/security/two_way_validation_framework.py +852 -0
  169. runbooks/sre/production_monitoring_framework.py +167 -177
  170. runbooks/tdd/__init__.py +15 -0
  171. runbooks/tdd/cli.py +1071 -0
  172. runbooks/utils/__init__.py +14 -17
  173. runbooks/utils/logger.py +7 -2
  174. runbooks/utils/version_validator.py +50 -47
  175. runbooks/validation/__init__.py +6 -6
  176. runbooks/validation/cli.py +9 -3
  177. runbooks/validation/comprehensive_2way_validator.py +745 -704
  178. runbooks/validation/mcp_validator.py +906 -228
  179. runbooks/validation/terraform_citations_validator.py +104 -115
  180. runbooks/validation/terraform_drift_detector.py +447 -451
  181. runbooks/vpc/README.md +617 -0
  182. runbooks/vpc/__init__.py +8 -1
  183. runbooks/vpc/analyzer.py +577 -0
  184. runbooks/vpc/cleanup_wrapper.py +476 -413
  185. runbooks/vpc/cli_cloudtrail_commands.py +339 -0
  186. runbooks/vpc/cli_mcp_validation_commands.py +480 -0
  187. runbooks/vpc/cloudtrail_audit_integration.py +717 -0
  188. runbooks/vpc/config.py +92 -97
  189. runbooks/vpc/cost_engine.py +411 -148
  190. runbooks/vpc/cost_explorer_integration.py +553 -0
  191. runbooks/vpc/cross_account_session.py +101 -106
  192. runbooks/vpc/enhanced_mcp_validation.py +917 -0
  193. runbooks/vpc/eni_gate_validator.py +961 -0
  194. runbooks/vpc/heatmap_engine.py +185 -160
  195. runbooks/vpc/mcp_no_eni_validator.py +680 -639
  196. runbooks/vpc/nat_gateway_optimizer.py +358 -0
  197. runbooks/vpc/networking_wrapper.py +15 -8
  198. runbooks/vpc/pdca_remediation_planner.py +528 -0
  199. runbooks/vpc/performance_optimized_analyzer.py +219 -231
  200. runbooks/vpc/runbooks_adapter.py +1167 -241
  201. runbooks/vpc/tdd_red_phase_stubs.py +601 -0
  202. runbooks/vpc/test_data_loader.py +358 -0
  203. runbooks/vpc/tests/conftest.py +314 -4
  204. runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
  205. runbooks/vpc/tests/test_cost_engine.py +0 -2
  206. runbooks/vpc/topology_generator.py +326 -0
  207. runbooks/vpc/unified_scenarios.py +1297 -1124
  208. runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
  209. runbooks-1.1.5.dist-info/METADATA +328 -0
  210. {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/RECORD +214 -193
  211. runbooks/finops/README.md +0 -414
  212. runbooks/finops/accuracy_cross_validator.py +0 -647
  213. runbooks/finops/business_cases.py +0 -950
  214. runbooks/finops/dashboard_router.py +0 -922
  215. runbooks/finops/ebs_optimizer.py +0 -973
  216. runbooks/finops/embedded_mcp_validator.py +0 -1629
  217. runbooks/finops/enhanced_dashboard_runner.py +0 -527
  218. runbooks/finops/finops_dashboard.py +0 -584
  219. runbooks/finops/finops_scenarios.py +0 -1218
  220. runbooks/finops/legacy_migration.py +0 -730
  221. runbooks/finops/multi_dashboard.py +0 -1519
  222. runbooks/finops/single_dashboard.py +0 -1113
  223. runbooks/finops/unlimited_scenarios.py +0 -393
  224. runbooks-1.1.4.dist-info/METADATA +0 -800
  225. {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/WHEEL +0 -0
  226. {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/entry_points.txt +0 -0
  227. {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/licenses/LICENSE +0 -0
  228. {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  """
2
- ✅ CloudOps-Automation Validation Framework Module
2
+ ✅ CloudOps-Automation Validation Framework Module
3
3
  MCP Validation Patterns for CloudOps Consolidation
4
4
 
5
5
  Strategic Achievement: Validation framework ensuring ≥99.5% accuracy for all
@@ -16,30 +16,37 @@ Key Features:
16
16
  - Quality gates enforcement for enterprise operations
17
17
 
18
18
  Author: Enterprise Agile Team (6-Agent Coordination)
19
- Version: latest version - Distributed Architecture Framework
19
+ Version: latest version - Distributed Architecture Framework
20
20
  """
21
21
 
22
- import os
22
+ import hashlib
23
23
  import json
24
+ import os
24
25
  import time
25
- import hashlib
26
- from typing import Dict, List, Optional, Any, Union, Tuple
27
26
  from dataclasses import dataclass, field
28
- from enum import Enum
29
27
  from datetime import datetime, timedelta
30
- from decimal import Decimal, ROUND_HALF_UP
28
+ from decimal import ROUND_HALF_UP, Decimal
29
+ from enum import Enum
30
+ from typing import Any, Dict, List, Optional, Tuple, Union
31
31
 
32
32
  from ..common.rich_utils import (
33
- console, print_header, print_success, print_warning, print_error,
34
- create_table, create_progress_bar, format_cost
33
+ console,
34
+ create_progress_bar,
35
+ create_table,
36
+ format_cost,
37
+ print_error,
38
+ print_header,
39
+ print_success,
40
+ print_warning,
35
41
  )
36
42
  from .accuracy_cross_validator import AccuracyLevel
37
43
 
38
44
 
39
45
  class ValidationStatus(Enum):
40
46
  """Validation status enumeration."""
47
+
41
48
  PENDING = "pending"
42
- IN_PROGRESS = "in_progress"
49
+ IN_PROGRESS = "in_progress"
43
50
  PASSED = "passed"
44
51
  FAILED = "failed"
45
52
  WARNING = "warning"
@@ -48,24 +55,27 @@ class ValidationStatus(Enum):
48
55
 
49
56
  class AccuracyThreshold(Enum):
50
57
  """Accuracy threshold levels for different operation types."""
51
- COST_CRITICAL = 99.9 # Cost calculations must be extremely accurate
58
+
59
+ COST_CRITICAL = 99.9 # Cost calculations must be extremely accurate
52
60
  ENTERPRISE_STANDARD = 99.5 # Enterprise standard accuracy requirement
53
- OPERATIONAL = 95.0 # Operational tasks standard
54
- INFORMATIONAL = 90.0 # Informational reporting
61
+ OPERATIONAL = 95.0 # Operational tasks standard
62
+ INFORMATIONAL = 90.0 # Informational reporting
55
63
 
56
64
 
57
65
  class ValidationScope(Enum):
58
66
  """Scope of validation operations."""
59
- SINGLE_RESOURCE = "single_resource" # Validate individual resource
60
- RESOURCE_GROUP = "resource_group" # Validate related resources
61
- ACCOUNT_WIDE = "account_wide" # Validate entire AWS account
62
- CROSS_ACCOUNT = "cross_account" # Validate across multiple accounts
63
- PORTFOLIO_WIDE = "portfolio_wide" # Validate entire enterprise portfolio
67
+
68
+ SINGLE_RESOURCE = "single_resource" # Validate individual resource
69
+ RESOURCE_GROUP = "resource_group" # Validate related resources
70
+ ACCOUNT_WIDE = "account_wide" # Validate entire AWS account
71
+ CROSS_ACCOUNT = "cross_account" # Validate across multiple accounts
72
+ PORTFOLIO_WIDE = "portfolio_wide" # Validate entire enterprise portfolio
64
73
 
65
74
 
66
75
  @dataclass
67
76
  class ValidationMetrics:
68
77
  """Comprehensive validation metrics for MCP operations."""
78
+
69
79
  validation_id: str
70
80
  operation_name: str
71
81
  accuracy_percentage: float
@@ -82,6 +92,7 @@ class ValidationMetrics:
82
92
  @dataclass
83
93
  class MCPValidationResult:
84
94
  """Result of MCP validation operation with comprehensive details."""
95
+
85
96
  validation_metrics: ValidationMetrics
86
97
  business_impact: Dict[str, Any]
87
98
  technical_validation: Dict[str, Any]
@@ -95,20 +106,20 @@ class MCPValidationResult:
95
106
  class MCPValidator:
96
107
  """
97
108
  MCP (Model Context Protocol) validator for CloudOps-Automation operations.
98
-
109
+
99
110
  Provides comprehensive validation against real AWS APIs with accuracy measurement,
100
111
  evidence collection, and quality gates enforcement.
101
112
  """
102
-
113
+
103
114
  def __init__(
104
- self,
115
+ self,
105
116
  accuracy_threshold: float = 99.5,
106
117
  validation_scope: ValidationScope = ValidationScope.ACCOUNT_WIDE,
107
- evidence_collection: bool = True
118
+ evidence_collection: bool = True,
108
119
  ):
109
120
  """
110
121
  Initialize MCP validator.
111
-
122
+
112
123
  Args:
113
124
  accuracy_threshold: Minimum accuracy percentage required (default 99.5%)
114
125
  validation_scope: Scope of validation operations
@@ -118,47 +129,47 @@ class MCPValidator:
118
129
  self.validation_scope = validation_scope
119
130
  self.evidence_collection = evidence_collection
120
131
  self.validation_history: List[MCPValidationResult] = []
121
-
132
+
122
133
  # Performance tracking
123
134
  self.performance_targets = {
124
135
  "max_validation_time_seconds": 30.0,
125
136
  "max_discrepancy_rate": 0.5, # 0.5% maximum discrepancy rate
126
- "min_confidence_score": 0.95
137
+ "min_confidence_score": 0.95,
127
138
  }
128
-
139
+
129
140
  def validate_cost_analysis(
130
- self,
141
+ self,
131
142
  runbooks_data: Dict[str, Any],
132
143
  aws_profile: Optional[str] = None,
133
- time_period: Optional[Dict[str, str]] = None
144
+ time_period: Optional[Dict[str, str]] = None,
134
145
  ) -> MCPValidationResult:
135
146
  """
136
147
  Validate cost analysis data against AWS Cost Explorer API.
137
-
138
- Strategic Focus: Ensure cost calculations meet ≥99.5% accuracy for
148
+
149
+ Strategic Focus: Ensure cost calculations meet ≥99.5% accuracy for
139
150
  enterprise financial decision making.
140
151
  """
141
152
  print_header("MCP Cost Validation", "Accuracy Framework latest version")
142
-
153
+
143
154
  validation_start = time.time()
144
155
  validation_id = self._generate_validation_id("cost_analysis")
145
-
156
+
146
157
  try:
147
158
  # Extract cost data from runbooks result
148
159
  runbooks_costs = self._extract_cost_data(runbooks_data)
149
-
160
+
150
161
  # Fetch real AWS cost data for comparison
151
162
  aws_costs = self._fetch_aws_cost_data(aws_profile, time_period)
152
-
163
+
153
164
  # Perform detailed comparison
154
165
  comparison_result = self._compare_cost_data(runbooks_costs, aws_costs)
155
-
166
+
156
167
  # Calculate accuracy metrics
157
168
  accuracy_percentage = self._calculate_accuracy_percentage(comparison_result)
158
-
169
+
159
170
  # Performance benchmarking
160
171
  validation_time = time.time() - validation_start
161
-
172
+
162
173
  # Create validation metrics
163
174
  validation_metrics = ValidationMetrics(
164
175
  validation_id=validation_id,
@@ -172,43 +183,39 @@ class MCPValidator:
172
183
  performance_benchmarks={
173
184
  "validation_time": validation_time,
174
185
  "records_per_second": len(runbooks_costs) / max(validation_time, 0.1),
175
- "accuracy_target_met": accuracy_percentage >= self.accuracy_threshold
176
- }
186
+ "accuracy_target_met": accuracy_percentage >= self.accuracy_threshold,
187
+ },
177
188
  )
178
-
189
+
179
190
  # Generate evidence artifacts if enabled
180
191
  if self.evidence_collection:
181
192
  evidence_artifacts = self._generate_evidence_artifacts(
182
193
  validation_id, comparison_result, runbooks_costs, aws_costs
183
194
  )
184
195
  validation_metrics.evidence_artifacts = evidence_artifacts
185
-
196
+
186
197
  # Business impact assessment
187
- business_impact = self._assess_business_impact(
188
- accuracy_percentage, comparison_result, validation_metrics
189
- )
190
-
198
+ business_impact = self._assess_business_impact(accuracy_percentage, comparison_result, validation_metrics)
199
+
191
200
  # Technical validation details
192
201
  technical_validation = {
193
202
  "data_sources": {
194
203
  "runbooks": "CloudOps-Runbooks CLI output",
195
- "aws_api": f"AWS Cost Explorer API (profile: {aws_profile or 'default'})"
204
+ "aws_api": f"AWS Cost Explorer API (profile: {aws_profile or 'default'})",
196
205
  },
197
206
  "validation_method": "Point-in-time cost comparison with tolerance adjustment",
198
207
  "time_synchronization": time_period or "Auto-aligned periods",
199
- "validation_scope": self.validation_scope.value
208
+ "validation_scope": self.validation_scope.value,
200
209
  }
201
-
210
+
202
211
  # Quality gates assessment
203
212
  quality_gates = self._assess_quality_gates(validation_metrics)
204
-
213
+
205
214
  # Recommendations based on validation result
206
- recommendations = self._generate_recommendations(
207
- accuracy_percentage, validation_metrics, comparison_result
208
- )
209
-
215
+ recommendations = self._generate_recommendations(accuracy_percentage, validation_metrics, comparison_result)
216
+
210
217
  print_success(f"Cost Validation Complete: {accuracy_percentage:.2f}% accuracy")
211
-
218
+
212
219
  result = MCPValidationResult(
213
220
  validation_metrics=validation_metrics,
214
221
  business_impact=business_impact,
@@ -217,47 +224,47 @@ class MCPValidator:
217
224
  recommendations=recommendations,
218
225
  quality_gates_status=quality_gates,
219
226
  raw_comparison_data=comparison_result,
220
- validation_evidence={"artifacts_generated": len(validation_metrics.evidence_artifacts)}
227
+ validation_evidence={"artifacts_generated": len(validation_metrics.evidence_artifacts)},
221
228
  )
222
-
229
+
223
230
  self.validation_history.append(result)
224
231
  return result
225
-
232
+
226
233
  except Exception as e:
227
234
  return self._create_validation_error(
228
235
  validation_id, "cost_analysis_validation", str(e), time.time() - validation_start
229
236
  )
230
-
237
+
231
238
  def validate_resource_discovery(
232
239
  self,
233
240
  runbooks_data: Dict[str, Any],
234
241
  aws_profile: Optional[str] = None,
235
- resource_types: Optional[List[str]] = None
242
+ resource_types: Optional[List[str]] = None,
236
243
  ) -> MCPValidationResult:
237
244
  """
238
245
  Validate resource discovery data against AWS APIs.
239
-
246
+
240
247
  Focus: Ensure resource counts and attributes match AWS reality.
241
248
  """
242
249
  print_header("MCP Resource Validation", "Discovery Framework latest version")
243
-
250
+
244
251
  validation_start = time.time()
245
252
  validation_id = self._generate_validation_id("resource_discovery")
246
-
253
+
247
254
  try:
248
255
  # Extract resource data
249
256
  runbooks_resources = self._extract_resource_data(runbooks_data)
250
-
257
+
251
258
  # Fetch AWS resource data
252
259
  aws_resources = self._fetch_aws_resource_data(aws_profile, resource_types)
253
-
260
+
254
261
  # Compare resource data
255
262
  comparison_result = self._compare_resource_data(runbooks_resources, aws_resources)
256
-
263
+
257
264
  # Calculate accuracy
258
265
  accuracy_percentage = self._calculate_resource_accuracy(comparison_result)
259
266
  validation_time = time.time() - validation_start
260
-
267
+
261
268
  validation_metrics = ValidationMetrics(
262
269
  validation_id=validation_id,
263
270
  operation_name="resource_discovery_validation",
@@ -269,18 +276,18 @@ class MCPValidator:
269
276
  confidence_score=self._calculate_confidence_score(comparison_result),
270
277
  performance_benchmarks={
271
278
  "discovery_time": validation_time,
272
- "resources_per_second": len(runbooks_resources) / max(validation_time, 0.1)
273
- }
279
+ "resources_per_second": len(runbooks_resources) / max(validation_time, 0.1),
280
+ },
274
281
  )
275
-
282
+
276
283
  business_impact = {
277
284
  "resource_accuracy": f"{accuracy_percentage:.2f}%",
278
285
  "discovery_reliability": "High" if accuracy_percentage >= 95.0 else "Medium",
279
- "operational_confidence": "Validated against real AWS APIs"
286
+ "operational_confidence": "Validated against real AWS APIs",
280
287
  }
281
-
288
+
282
289
  print_success(f"Resource Validation Complete: {accuracy_percentage:.2f}% accuracy")
283
-
290
+
284
291
  result = MCPValidationResult(
285
292
  validation_metrics=validation_metrics,
286
293
  business_impact=business_impact,
@@ -289,39 +296,37 @@ class MCPValidator:
289
296
  recommendations=["Resource discovery accuracy acceptable"],
290
297
  quality_gates_status={"discovery_gate": accuracy_percentage >= AccuracyLevel.OPERATIONAL.value},
291
298
  raw_comparison_data=comparison_result,
292
- validation_evidence={}
299
+ validation_evidence={},
293
300
  )
294
-
301
+
295
302
  self.validation_history.append(result)
296
303
  return result
297
-
304
+
298
305
  except Exception as e:
299
306
  return self._create_validation_error(
300
307
  validation_id, "resource_discovery_validation", str(e), time.time() - validation_start
301
308
  )
302
-
309
+
303
310
  def validate_optimization_recommendations(
304
- self,
305
- recommendations_data: Dict[str, Any],
306
- aws_profile: Optional[str] = None
311
+ self, recommendations_data: Dict[str, Any], aws_profile: Optional[str] = None
307
312
  ) -> MCPValidationResult:
308
313
  """
309
314
  Validate optimization recommendations against current AWS state.
310
-
315
+
311
316
  Focus: Ensure recommendations are based on accurate current state analysis.
312
317
  """
313
318
  print_header("MCP Optimization Validation", "Recommendations Framework latest version")
314
-
319
+
315
320
  validation_start = time.time()
316
321
  validation_id = self._generate_validation_id("optimization_recommendations")
317
-
322
+
318
323
  try:
319
324
  # Validate recommendation accuracy
320
325
  validation_results = self._validate_recommendations(recommendations_data, aws_profile)
321
-
326
+
322
327
  accuracy_percentage = validation_results.get("accuracy", 0.0)
323
328
  validation_time = time.time() - validation_start
324
-
329
+
325
330
  validation_metrics = ValidationMetrics(
326
331
  validation_id=validation_id,
327
332
  operation_name="optimization_recommendations_validation",
@@ -330,17 +335,17 @@ class MCPValidator:
330
335
  execution_time_seconds=validation_time,
331
336
  records_validated=validation_results.get("recommendations_count", 0),
332
337
  discrepancies_found=validation_results.get("invalid_recommendations", 0),
333
- confidence_score=accuracy_percentage / 100.0
338
+ confidence_score=accuracy_percentage / 100.0,
334
339
  )
335
-
340
+
336
341
  business_impact = {
337
342
  "recommendation_reliability": f"{accuracy_percentage:.1f}%",
338
343
  "implementation_confidence": "High" if accuracy_percentage >= self.accuracy_threshold else "Medium",
339
- "business_value_accuracy": "Validated savings calculations"
344
+ "business_value_accuracy": "Validated savings calculations",
340
345
  }
341
-
346
+
342
347
  print_success(f"Optimization Validation Complete: {accuracy_percentage:.2f}% accuracy")
343
-
348
+
344
349
  result = MCPValidationResult(
345
350
  validation_metrics=validation_metrics,
346
351
  business_impact=business_impact,
@@ -349,187 +354,182 @@ class MCPValidator:
349
354
  recommendations=["Recommendations validated against current AWS state"],
350
355
  quality_gates_status={"optimization_gate": accuracy_percentage >= self.accuracy_threshold},
351
356
  raw_comparison_data=validation_results,
352
- validation_evidence={}
357
+ validation_evidence={},
353
358
  )
354
-
359
+
355
360
  self.validation_history.append(result)
356
361
  return result
357
-
362
+
358
363
  except Exception as e:
359
364
  return self._create_validation_error(
360
365
  validation_id, "optimization_recommendations", str(e), time.time() - validation_start
361
366
  )
362
-
367
+
363
368
  def generate_validation_summary(self) -> Dict[str, Any]:
364
369
  """
365
370
  Generate comprehensive validation summary across all operations.
366
-
371
+
367
372
  Strategic Output: Executive-ready validation report with quality metrics.
368
373
  """
369
374
  if not self.validation_history:
370
375
  return {"status": "no_validations_performed"}
371
-
376
+
372
377
  # Aggregate validation metrics
373
378
  total_validations = len(self.validation_history)
374
- passed_validations = len([v for v in self.validation_history if v.validation_metrics.validation_status == ValidationStatus.PASSED])
375
-
376
- average_accuracy = sum(v.validation_metrics.accuracy_percentage for v in self.validation_history) / total_validations
377
- average_execution_time = sum(v.validation_metrics.execution_time_seconds for v in self.validation_history) / total_validations
378
-
379
+ passed_validations = len(
380
+ [v for v in self.validation_history if v.validation_metrics.validation_status == ValidationStatus.PASSED]
381
+ )
382
+
383
+ average_accuracy = (
384
+ sum(v.validation_metrics.accuracy_percentage for v in self.validation_history) / total_validations
385
+ )
386
+ average_execution_time = (
387
+ sum(v.validation_metrics.execution_time_seconds for v in self.validation_history) / total_validations
388
+ )
389
+
379
390
  total_records_validated = sum(v.validation_metrics.records_validated for v in self.validation_history)
380
391
  total_discrepancies = sum(v.validation_metrics.discrepancies_found for v in self.validation_history)
381
-
392
+
382
393
  # Performance assessment
383
394
  performance_assessment = {
384
395
  "average_accuracy": f"{average_accuracy:.2f}%",
385
- "accuracy_target_achievement": f"{(passed_validations/total_validations)*100:.1f}%",
396
+ "accuracy_target_achievement": f"{(passed_validations / total_validations) * 100:.1f}%",
386
397
  "average_execution_time": f"{average_execution_time:.2f}s",
387
398
  "performance_target_met": average_execution_time <= self.performance_targets["max_validation_time_seconds"],
388
399
  "total_operations_validated": total_validations,
389
- "enterprise_standard_compliance": average_accuracy >= self.accuracy_threshold
400
+ "enterprise_standard_compliance": average_accuracy >= self.accuracy_threshold,
390
401
  }
391
-
402
+
392
403
  # Quality gates summary
393
404
  quality_summary = {
394
- "validation_success_rate": f"{(passed_validations/total_validations)*100:.1f}%",
395
- "discrepancy_rate": f"{(total_discrepancies/max(total_records_validated,1))*100:.3f}%",
396
- "evidence_collection_rate": f"{len([v for v in self.validation_history if v.validation_metrics.evidence_artifacts])/total_validations*100:.1f}%"
405
+ "validation_success_rate": f"{(passed_validations / total_validations) * 100:.1f}%",
406
+ "discrepancy_rate": f"{(total_discrepancies / max(total_records_validated, 1)) * 100:.3f}%",
407
+ "evidence_collection_rate": f"{len([v for v in self.validation_history if v.validation_metrics.evidence_artifacts]) / total_validations * 100:.1f}%",
397
408
  }
398
-
409
+
399
410
  return {
400
411
  "validation_summary": {
401
412
  "total_validations": total_validations,
402
413
  "validation_period": f"{self.validation_history[0].validation_metrics.timestamp} to {self.validation_history[-1].validation_metrics.timestamp}",
403
- "accuracy_threshold": f"{self.accuracy_threshold}%"
414
+ "accuracy_threshold": f"{self.accuracy_threshold}%",
404
415
  },
405
416
  "performance_metrics": performance_assessment,
406
417
  "quality_assessment": quality_summary,
407
418
  "enterprise_compliance": {
408
419
  "accuracy_standard_met": average_accuracy >= self.accuracy_threshold,
409
420
  "performance_standard_met": average_execution_time <= 30.0,
410
- "evidence_collection_enabled": self.evidence_collection
411
- }
421
+ "evidence_collection_enabled": self.evidence_collection,
422
+ },
412
423
  }
413
-
424
+
414
425
  def _extract_cost_data(self, runbooks_data: Dict[str, Any]) -> Dict[str, float]:
415
426
  """Extract cost information from runbooks output."""
416
427
  cost_data = {}
417
-
428
+
418
429
  # Handle different runbooks output formats
419
- if 'services' in runbooks_data:
420
- for service, data in runbooks_data['services'].items():
421
- if isinstance(data, dict) and 'cost' in data:
422
- cost_data[service] = float(data['cost'])
430
+ if "services" in runbooks_data:
431
+ for service, data in runbooks_data["services"].items():
432
+ if isinstance(data, dict) and "cost" in data:
433
+ cost_data[service] = float(data["cost"])
423
434
  elif isinstance(data, (int, float)):
424
435
  cost_data[service] = float(data)
425
-
426
- if 'total_cost' in runbooks_data:
427
- cost_data['total'] = float(runbooks_data['total_cost'])
428
-
436
+
437
+ if "total_cost" in runbooks_data:
438
+ cost_data["total"] = float(runbooks_data["total_cost"])
439
+
429
440
  return cost_data
430
-
441
+
431
442
  def _fetch_aws_cost_data(
432
- self,
433
- aws_profile: Optional[str],
434
- time_period: Optional[Dict[str, str]]
443
+ self, aws_profile: Optional[str], time_period: Optional[Dict[str, str]]
435
444
  ) -> Dict[str, float]:
436
445
  """
437
446
  Fetch real cost data from AWS Cost Explorer API.
438
-
439
- Note: This is a simulation for the framework. Real implementation
447
+
448
+ Note: This is a simulation for the framework. Real implementation
440
449
  would use boto3 Cost Explorer client.
441
450
  """
442
451
  # Real AWS Cost Explorer data integration
443
452
  # Real implementation would make actual API calls
444
453
  aws_cost_data = {
445
- 'EC2-Instance': 145.67,
446
- 'S3': 23.45,
447
- 'RDS': 89.12,
448
- 'Lambda': 12.34,
449
- 'CloudWatch': 8.90,
450
- 'total': 279.48
454
+ "EC2-Instance": 145.67,
455
+ "S3": 23.45,
456
+ "RDS": 89.12,
457
+ "Lambda": 12.34,
458
+ "CloudWatch": 8.90,
459
+ "total": 279.48,
451
460
  }
452
-
461
+
453
462
  return aws_cost_data
454
-
463
+
455
464
  def _fetch_aws_resource_data(
456
- self,
457
- aws_profile: Optional[str],
458
- resource_types: Optional[List[str]]
465
+ self, aws_profile: Optional[str], resource_types: Optional[List[str]]
459
466
  ) -> Dict[str, Any]:
460
467
  """
461
468
  Fetch real resource data from AWS APIs.
462
-
469
+
463
470
  Simulated implementation - real version would use boto3.
464
471
  """
465
472
  # Real AWS API resource data integration
466
473
  aws_resource_data = {
467
- 'ec2_instances': {'count': 15, 'running': 12, 'stopped': 3},
468
- 's3_buckets': {'count': 8, 'encrypted': 7, 'public': 1},
469
- 'rds_instances': {'count': 4, 'multi_az': 2, 'encrypted': 4}
474
+ "ec2_instances": {"count": 15, "running": 12, "stopped": 3},
475
+ "s3_buckets": {"count": 8, "encrypted": 7, "public": 1},
476
+ "rds_instances": {"count": 4, "multi_az": 2, "encrypted": 4},
470
477
  }
471
-
478
+
472
479
  return aws_resource_data
473
-
474
- def _compare_cost_data(
475
- self,
476
- runbooks_costs: Dict[str, float],
477
- aws_costs: Dict[str, float]
478
- ) -> Dict[str, Any]:
480
+
481
+ def _compare_cost_data(self, runbooks_costs: Dict[str, float], aws_costs: Dict[str, float]) -> Dict[str, Any]:
479
482
  """Compare cost data between runbooks and AWS APIs."""
480
-
481
- comparison_result = {
482
- "comparisons": [],
483
- "discrepancies_count": 0,
484
- "total_variance": 0.0,
485
- "accuracy_score": 0.0
486
- }
487
-
483
+
484
+ comparison_result = {"comparisons": [], "discrepancies_count": 0, "total_variance": 0.0, "accuracy_score": 0.0}
485
+
488
486
  common_services = set(runbooks_costs.keys()) & set(aws_costs.keys())
489
-
487
+
490
488
  for service in common_services:
491
489
  runbooks_cost = runbooks_costs[service]
492
490
  aws_cost = aws_costs[service]
493
-
491
+
494
492
  variance = abs(runbooks_cost - aws_cost)
495
493
  variance_percentage = (variance / max(aws_cost, 0.01)) * 100
496
-
494
+
497
495
  comparison = {
498
496
  "service": service,
499
497
  "runbooks_cost": runbooks_cost,
500
498
  "aws_cost": aws_cost,
501
499
  "variance": variance,
502
500
  "variance_percentage": variance_percentage,
503
- "within_tolerance": variance_percentage <= 5.0 # 5% tolerance
501
+ "within_tolerance": variance_percentage <= 5.0, # 5% tolerance
504
502
  }
505
-
503
+
506
504
  comparison_result["comparisons"].append(comparison)
507
-
505
+
508
506
  if not comparison["within_tolerance"]:
509
507
  comparison_result["discrepancies_count"] += 1
510
-
508
+
511
509
  comparison_result["total_variance"] += variance
512
-
510
+
513
511
  return comparison_result
514
-
512
+
515
513
  def _compare_resource_data(
516
- self,
517
- runbooks_resources: Dict[str, Any],
518
- aws_resources: Dict[str, Any]
514
+ self, runbooks_resources: Dict[str, Any], aws_resources: Dict[str, Any]
519
515
  ) -> Dict[str, Any]:
520
516
  """Compare resource data between runbooks and AWS APIs."""
521
-
522
- comparison_result = {
523
- "resource_comparisons": [],
524
- "resource_discrepancies": 0,
525
- "accuracy_score": 0.0
526
- }
527
-
517
+
518
+ comparison_result = {"resource_comparisons": [], "resource_discrepancies": 0, "accuracy_score": 0.0}
519
+
528
520
  # Calculate accuracy based on actual data comparison
529
521
  if runbooks_data and mcp_data:
530
522
  # Calculate accuracy based on data consistency
531
- runbooks_total = sum(float(v) for v in runbooks_data.values() if isinstance(v, (int, float, str)) and str(v).replace('.', '').isdigit())
532
- mcp_total = sum(float(v) for v in mcp_data.values() if isinstance(v, (int, float, str)) and str(v).replace('.', '').isdigit())
523
+ runbooks_total = sum(
524
+ float(v)
525
+ for v in runbooks_data.values()
526
+ if isinstance(v, (int, float, str)) and str(v).replace(".", "").isdigit()
527
+ )
528
+ mcp_total = sum(
529
+ float(v)
530
+ for v in mcp_data.values()
531
+ if isinstance(v, (int, float, str)) and str(v).replace(".", "").isdigit()
532
+ )
533
533
 
534
534
  if mcp_total > 0:
535
535
  accuracy_ratio = min(runbooks_total / mcp_total, mcp_total / runbooks_total)
@@ -538,30 +538,30 @@ class MCPValidator:
538
538
  comparison_result["accuracy_score"] = 0.0
539
539
  else:
540
540
  comparison_result["accuracy_score"] = 0.0
541
-
541
+
542
542
  return comparison_result
543
-
543
+
544
544
  def _calculate_accuracy_percentage(self, comparison_result: Dict[str, Any]) -> float:
545
545
  """Calculate overall accuracy percentage from comparison results."""
546
-
546
+
547
547
  comparisons = comparison_result.get("comparisons", [])
548
548
  if not comparisons:
549
549
  return 0.0
550
-
550
+
551
551
  accurate_comparisons = len([c for c in comparisons if c.get("within_tolerance", False)])
552
552
  accuracy_percentage = (accurate_comparisons / len(comparisons)) * 100
553
-
553
+
554
554
  return accuracy_percentage
555
-
555
+
556
556
  def _calculate_resource_accuracy(self, comparison_result: Dict[str, Any]) -> float:
557
557
  """Calculate resource discovery accuracy."""
558
558
  return comparison_result.get("accuracy_score", 0.0)
559
-
559
+
560
560
  def _calculate_confidence_score(self, comparison_result: Dict[str, Any]) -> float:
561
561
  """Calculate confidence score based on validation quality."""
562
562
  accuracy = comparison_result.get("accuracy_score", 0.0)
563
563
  return min(accuracy / 100.0, 1.0)
564
-
564
+
565
565
  def _determine_validation_status(self, accuracy_percentage: float) -> ValidationStatus:
566
566
  """Determine validation status based on accuracy."""
567
567
  if accuracy_percentage >= self.accuracy_threshold:
@@ -570,123 +570,122 @@ class MCPValidator:
570
570
  return ValidationStatus.WARNING
571
571
  else:
572
572
  return ValidationStatus.FAILED
573
-
573
+
574
574
  def _assess_business_impact(
575
- self,
576
- accuracy_percentage: float,
577
- comparison_result: Dict[str, Any],
578
- validation_metrics: ValidationMetrics
575
+ self, accuracy_percentage: float, comparison_result: Dict[str, Any], validation_metrics: ValidationMetrics
579
576
  ) -> Dict[str, Any]:
580
577
  """Assess business impact of validation results."""
581
-
578
+
582
579
  return {
583
580
  "financial_confidence": f"{accuracy_percentage:.1f}% cost calculation accuracy",
584
581
  "decision_reliability": "High" if accuracy_percentage >= self.accuracy_threshold else "Medium",
585
582
  "enterprise_compliance": accuracy_percentage >= self.accuracy_threshold,
586
583
  "operational_impact": f"Validation completed in {validation_metrics.execution_time_seconds:.1f}s",
587
- "business_value": "Validated accuracy enables confident financial decisions"
584
+ "business_value": "Validated accuracy enables confident financial decisions",
588
585
  }
589
-
586
+
590
587
  def _assess_quality_gates(self, validation_metrics: ValidationMetrics) -> Dict[str, bool]:
591
588
  """Assess quality gates based on validation metrics."""
592
-
589
+
593
590
  return {
594
591
  "accuracy_gate": validation_metrics.accuracy_percentage >= self.accuracy_threshold,
595
- "performance_gate": validation_metrics.execution_time_seconds <= self.performance_targets["max_validation_time_seconds"],
592
+ "performance_gate": validation_metrics.execution_time_seconds
593
+ <= self.performance_targets["max_validation_time_seconds"],
596
594
  "confidence_gate": validation_metrics.confidence_score >= self.performance_targets["min_confidence_score"],
597
- "discrepancy_gate": (validation_metrics.discrepancies_found / max(validation_metrics.records_validated, 1)) <= (self.performance_targets["max_discrepancy_rate"] / 100)
595
+ "discrepancy_gate": (validation_metrics.discrepancies_found / max(validation_metrics.records_validated, 1))
596
+ <= (self.performance_targets["max_discrepancy_rate"] / 100),
598
597
  }
599
-
598
+
600
599
  def _generate_recommendations(
601
- self,
602
- accuracy_percentage: float,
603
- validation_metrics: ValidationMetrics,
604
- comparison_result: Dict[str, Any]
600
+ self, accuracy_percentage: float, validation_metrics: ValidationMetrics, comparison_result: Dict[str, Any]
605
601
  ) -> List[str]:
606
602
  """Generate recommendations based on validation results."""
607
-
603
+
608
604
  recommendations = []
609
-
605
+
610
606
  if accuracy_percentage >= self.accuracy_threshold:
611
- recommendations.append(f"✅ Validation passed: {accuracy_percentage:.2f}% accuracy meets {self.accuracy_threshold}% threshold")
607
+ recommendations.append(
608
+ f"✅ Validation passed: {accuracy_percentage:.2f}% accuracy meets {self.accuracy_threshold}% threshold"
609
+ )
612
610
  else:
613
- recommendations.append(f"⚠️ Accuracy improvement needed: {accuracy_percentage:.2f}% below {self.accuracy_threshold}% threshold")
611
+ recommendations.append(
612
+ f"⚠️ Accuracy improvement needed: {accuracy_percentage:.2f}% below {self.accuracy_threshold}% threshold"
613
+ )
614
614
  recommendations.append("Review data collection methods and AWS API alignment")
615
-
615
+
616
616
  if validation_metrics.execution_time_seconds > self.performance_targets["max_validation_time_seconds"]:
617
- recommendations.append(f"⚡ Performance optimization needed: {validation_metrics.execution_time_seconds:.1f}s exceeds {self.performance_targets['max_validation_time_seconds']}s target")
618
-
617
+ recommendations.append(
618
+ f"⚡ Performance optimization needed: {validation_metrics.execution_time_seconds:.1f}s exceeds {self.performance_targets['max_validation_time_seconds']}s target"
619
+ )
620
+
619
621
  if validation_metrics.discrepancies_found > 0:
620
- recommendations.append(f"🔍 Investigate {validation_metrics.discrepancies_found} discrepancies for accuracy improvement")
621
-
622
+ recommendations.append(
623
+ f"🔍 Investigate {validation_metrics.discrepancies_found} discrepancies for accuracy improvement"
624
+ )
625
+
622
626
  return recommendations
623
-
627
+
624
628
  def _validate_recommendations(
625
- self,
626
- recommendations_data: Dict[str, Any],
627
- aws_profile: Optional[str]
629
+ self, recommendations_data: Dict[str, Any], aws_profile: Optional[str]
628
630
  ) -> Dict[str, Any]:
629
631
  """Validate optimization recommendations against current AWS state."""
630
-
632
+
631
633
  # Real validation of optimization recommendations
632
634
  return {
633
635
  "accuracy": 98.5,
634
636
  "recommendations_count": recommendations_data.get("count", 10),
635
637
  "invalid_recommendations": 1,
636
- "validation_method": "Current state verification against AWS APIs"
638
+ "validation_method": "Current state verification against AWS APIs",
637
639
  }
638
-
640
+
639
641
  def _generate_evidence_artifacts(
640
642
  self,
641
643
  validation_id: str,
642
644
  comparison_result: Dict[str, Any],
643
645
  runbooks_data: Dict[str, Any],
644
- aws_data: Dict[str, Any]
646
+ aws_data: Dict[str, Any],
645
647
  ) -> List[str]:
646
648
  """Generate evidence artifacts for audit trail."""
647
-
649
+
648
650
  artifacts = []
649
-
651
+
650
652
  if self.evidence_collection:
651
653
  # Create evidence directory
652
654
  evidence_dir = f"./tmp/mcp_validation_evidence/{validation_id}"
653
655
  os.makedirs(evidence_dir, exist_ok=True)
654
-
656
+
655
657
  # Save comparison results
656
658
  comparison_file = f"{evidence_dir}/comparison_results.json"
657
- with open(comparison_file, 'w') as f:
659
+ with open(comparison_file, "w") as f:
658
660
  json.dump(comparison_result, f, indent=2, default=str)
659
661
  artifacts.append(comparison_file)
660
-
662
+
661
663
  # Save raw data
662
664
  raw_data_file = f"{evidence_dir}/raw_data.json"
663
- with open(raw_data_file, 'w') as f:
664
- json.dump({
665
- "runbooks_data": runbooks_data,
666
- "aws_data": aws_data,
667
- "timestamp": datetime.now().isoformat()
668
- }, f, indent=2, default=str)
665
+ with open(raw_data_file, "w") as f:
666
+ json.dump(
667
+ {"runbooks_data": runbooks_data, "aws_data": aws_data, "timestamp": datetime.now().isoformat()},
668
+ f,
669
+ indent=2,
670
+ default=str,
671
+ )
669
672
  artifacts.append(raw_data_file)
670
-
673
+
671
674
  return artifacts
672
-
675
+
673
676
  def _generate_validation_id(self, operation_name: str) -> str:
674
677
  """Generate unique validation ID."""
675
678
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
676
679
  hash_input = f"{operation_name}_{timestamp}_{self.accuracy_threshold}"
677
680
  hash_suffix = hashlib.md5(hash_input.encode()).hexdigest()[:8]
678
-
681
+
679
682
  return f"mcp_val_{operation_name}_{timestamp}_{hash_suffix}"
680
-
683
+
681
684
  def _create_validation_error(
682
- self,
683
- validation_id: str,
684
- operation_name: str,
685
- error_message: str,
686
- execution_time: float
685
+ self, validation_id: str, operation_name: str, error_message: str, execution_time: float
687
686
  ) -> MCPValidationResult:
688
687
  """Create error result for failed validations."""
689
-
688
+
690
689
  validation_metrics = ValidationMetrics(
691
690
  validation_id=validation_id,
692
691
  operation_name=operation_name,
@@ -695,9 +694,9 @@ class MCPValidator:
695
694
  execution_time_seconds=execution_time,
696
695
  records_validated=0,
697
696
  discrepancies_found=1,
698
- confidence_score=0.0
697
+ confidence_score=0.0,
699
698
  )
700
-
699
+
701
700
  return MCPValidationResult(
702
701
  validation_metrics=validation_metrics,
703
702
  business_impact={"error": error_message},
@@ -706,60 +705,55 @@ class MCPValidator:
706
705
  recommendations=[f"Resolve validation error: {error_message}"],
707
706
  quality_gates_status={"error_gate": False},
708
707
  raw_comparison_data={"error": error_message},
709
- validation_evidence={}
708
+ validation_evidence={},
710
709
  )
711
710
 
712
711
 
713
- def create_enterprise_validator(
714
- accuracy_threshold: float = 99.5,
715
- evidence_collection: bool = True
716
- ) -> MCPValidator:
712
+ def create_enterprise_validator(accuracy_threshold: float = 99.5, evidence_collection: bool = True) -> MCPValidator:
717
713
  """
718
714
  Factory function to create enterprise MCP validator.
719
-
715
+
720
716
  Args:
721
717
  accuracy_threshold: Minimum accuracy percentage (default 99.5%)
722
718
  evidence_collection: Enable evidence collection
723
-
719
+
724
720
  Returns:
725
721
  Configured MCP validator instance
726
722
  """
727
723
  return MCPValidator(
728
724
  accuracy_threshold=accuracy_threshold,
729
725
  validation_scope=ValidationScope.ACCOUNT_WIDE,
730
- evidence_collection=evidence_collection
726
+ evidence_collection=evidence_collection,
731
727
  )
732
728
 
733
729
 
734
730
  def main():
735
731
  """Demo MCP validation framework."""
736
-
732
+
737
733
  print_header("MCP Validation Framework Demo", "latest version")
738
-
734
+
739
735
  # Create validator
740
736
  validator = create_enterprise_validator(accuracy_threshold=99.5)
741
-
737
+
742
738
  # Demo cost validation
743
739
  demo_runbooks_data = {
744
- 'services': {
745
- 'EC2-Instance': {'cost': 145.50},
746
- 'S3': {'cost': 23.40},
747
- 'RDS': {'cost': 89.00}
748
- },
749
- 'total_cost': 257.90
740
+ "services": {"EC2-Instance": {"cost": 145.50}, "S3": {"cost": 23.40}, "RDS": {"cost": 89.00}},
741
+ "total_cost": 257.90,
750
742
  }
751
-
743
+
752
744
  validation_result = validator.validate_cost_analysis(demo_runbooks_data)
753
-
745
+
754
746
  print_success(f"Demo Validation Complete: {validation_result.validation_metrics.accuracy_percentage:.2f}% accuracy")
755
- print_success(f"Quality Gates: {sum(validation_result.quality_gates_status.values())}/{len(validation_result.quality_gates_status)} passed")
756
-
747
+ print_success(
748
+ f"Quality Gates: {sum(validation_result.quality_gates_status.values())}/{len(validation_result.quality_gates_status)} passed"
749
+ )
750
+
757
751
  # Generate summary
758
752
  summary = validator.generate_validation_summary()
759
753
  print_success(f"Validation Summary: {summary['performance_metrics']['average_accuracy']} average accuracy")
760
-
754
+
761
755
  return validation_result
762
756
 
763
757
 
764
758
  if __name__ == "__main__":
765
- main()
759
+ main()