runbooks 1.1.7__py3-none-any.whl → 1.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/__init___optimized.py +2 -1
  3. runbooks/_platform/__init__.py +1 -1
  4. runbooks/cfat/cli.py +4 -3
  5. runbooks/cfat/cloud_foundations_assessment.py +1 -2
  6. runbooks/cfat/tests/test_cli.py +4 -1
  7. runbooks/cli/commands/finops.py +68 -19
  8. runbooks/cli/commands/inventory.py +838 -14
  9. runbooks/cli/commands/operate.py +65 -4
  10. runbooks/cli/commands/vpc.py +1 -1
  11. runbooks/cloudops/cost_optimizer.py +1 -3
  12. runbooks/common/cli_decorators.py +6 -4
  13. runbooks/common/config_loader.py +787 -0
  14. runbooks/common/config_schema.py +280 -0
  15. runbooks/common/dry_run_framework.py +14 -2
  16. runbooks/common/mcp_integration.py +238 -0
  17. runbooks/finops/ebs_cost_optimizer.py +7 -4
  18. runbooks/finops/elastic_ip_optimizer.py +7 -4
  19. runbooks/finops/infrastructure/__init__.py +3 -2
  20. runbooks/finops/infrastructure/commands.py +7 -4
  21. runbooks/finops/infrastructure/load_balancer_optimizer.py +7 -4
  22. runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +7 -4
  23. runbooks/finops/nat_gateway_optimizer.py +7 -4
  24. runbooks/finops/tests/run_tests.py +1 -1
  25. runbooks/inventory/ArgumentsClass.py +2 -1
  26. runbooks/inventory/CLAUDE.md +41 -0
  27. runbooks/inventory/README.md +210 -2
  28. runbooks/inventory/Tests/test_Inventory_Modules.py +27 -10
  29. runbooks/inventory/Tests/test_cfn_describe_stacks.py +18 -7
  30. runbooks/inventory/Tests/test_ec2_describe_instances.py +30 -15
  31. runbooks/inventory/Tests/test_lambda_list_functions.py +17 -3
  32. runbooks/inventory/Tests/test_org_list_accounts.py +17 -4
  33. runbooks/inventory/account_class.py +0 -1
  34. runbooks/inventory/all_my_instances_wrapper.py +4 -8
  35. runbooks/inventory/aws_organization.png +0 -0
  36. runbooks/inventory/check_cloudtrail_compliance.py +4 -4
  37. runbooks/inventory/check_controltower_readiness.py +50 -47
  38. runbooks/inventory/check_landingzone_readiness.py +35 -31
  39. runbooks/inventory/cloud_foundations_integration.py +8 -3
  40. runbooks/inventory/collectors/aws_compute.py +59 -11
  41. runbooks/inventory/collectors/aws_management.py +39 -5
  42. runbooks/inventory/core/collector.py +1655 -159
  43. runbooks/inventory/core/concurrent_paginator.py +511 -0
  44. runbooks/inventory/discovery.md +15 -6
  45. runbooks/inventory/{draw_org_structure.py → draw_org.py} +55 -9
  46. runbooks/inventory/drift_detection_cli.py +8 -68
  47. runbooks/inventory/find_cfn_drift_detection.py +14 -4
  48. runbooks/inventory/find_cfn_orphaned_stacks.py +7 -5
  49. runbooks/inventory/find_cfn_stackset_drift.py +5 -5
  50. runbooks/inventory/find_ec2_security_groups.py +6 -3
  51. runbooks/inventory/find_landingzone_versions.py +5 -5
  52. runbooks/inventory/find_vpc_flow_logs.py +5 -5
  53. runbooks/inventory/inventory.sh +20 -7
  54. runbooks/inventory/inventory_mcp_cli.py +4 -0
  55. runbooks/inventory/inventory_modules.py +9 -7
  56. runbooks/inventory/list_cfn_stacks.py +18 -8
  57. runbooks/inventory/list_cfn_stackset_operation_results.py +2 -2
  58. runbooks/inventory/list_cfn_stackset_operations.py +32 -20
  59. runbooks/inventory/list_cfn_stacksets.py +7 -4
  60. runbooks/inventory/list_config_recorders_delivery_channels.py +4 -4
  61. runbooks/inventory/list_ds_directories.py +3 -3
  62. runbooks/inventory/list_ec2_availability_zones.py +7 -3
  63. runbooks/inventory/list_ec2_ebs_volumes.py +3 -3
  64. runbooks/inventory/list_ec2_instances.py +1 -1
  65. runbooks/inventory/list_ecs_clusters_and_tasks.py +8 -4
  66. runbooks/inventory/list_elbs_load_balancers.py +7 -3
  67. runbooks/inventory/list_enis_network_interfaces.py +3 -3
  68. runbooks/inventory/list_guardduty_detectors.py +9 -5
  69. runbooks/inventory/list_iam_policies.py +7 -3
  70. runbooks/inventory/list_iam_roles.py +3 -3
  71. runbooks/inventory/list_iam_saml_providers.py +8 -4
  72. runbooks/inventory/list_lambda_functions.py +8 -4
  73. runbooks/inventory/list_org_accounts.py +306 -276
  74. runbooks/inventory/list_org_accounts_users.py +45 -9
  75. runbooks/inventory/list_rds_db_instances.py +4 -4
  76. runbooks/inventory/list_route53_hosted_zones.py +3 -3
  77. runbooks/inventory/list_servicecatalog_provisioned_products.py +5 -5
  78. runbooks/inventory/list_sns_topics.py +4 -4
  79. runbooks/inventory/list_ssm_parameters.py +6 -3
  80. runbooks/inventory/list_vpc_subnets.py +8 -4
  81. runbooks/inventory/list_vpcs.py +15 -4
  82. runbooks/inventory/mcp_inventory_validator.py +771 -134
  83. runbooks/inventory/mcp_vpc_validator.py +6 -0
  84. runbooks/inventory/organizations_discovery.py +17 -3
  85. runbooks/inventory/organizations_utils.py +553 -0
  86. runbooks/inventory/output_formatters.py +422 -0
  87. runbooks/inventory/recover_cfn_stack_ids.py +5 -5
  88. runbooks/inventory/run_on_multi_accounts.py +3 -3
  89. runbooks/inventory/tag_coverage.py +481 -0
  90. runbooks/inventory/validation_utils.py +358 -0
  91. runbooks/inventory/verify_ec2_security_groups.py +18 -5
  92. runbooks/inventory/vpc_architecture_validator.py +7 -1
  93. runbooks/inventory/vpc_dependency_analyzer.py +6 -0
  94. runbooks/main_final.py +2 -2
  95. runbooks/main_ultra_minimal.py +2 -2
  96. runbooks/mcp/integration.py +6 -4
  97. runbooks/remediation/acm_remediation.py +2 -2
  98. runbooks/remediation/cloudtrail_remediation.py +2 -2
  99. runbooks/remediation/cognito_remediation.py +2 -2
  100. runbooks/remediation/dynamodb_remediation.py +2 -2
  101. runbooks/remediation/ec2_remediation.py +2 -2
  102. runbooks/remediation/kms_remediation.py +2 -2
  103. runbooks/remediation/lambda_remediation.py +2 -2
  104. runbooks/remediation/rds_remediation.py +2 -2
  105. runbooks/remediation/s3_remediation.py +1 -1
  106. runbooks/vpc/cloudtrail_audit_integration.py +1 -1
  107. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/METADATA +74 -4
  108. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/RECORD +112 -105
  109. runbooks/__init__.py.backup +0 -134
  110. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/WHEEL +0 -0
  111. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/entry_points.txt +0 -0
  112. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/licenses/LICENSE +0 -0
  113. {runbooks-1.1.7.dist-info → runbooks-1.1.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,280 @@
1
+ """
2
+ Configuration schema validation for CloudOps Runbooks.
3
+
4
+ Provides JSON Schema validation and business rules for hierarchical
5
+ tag mapping configuration (user config > project config > env vars > defaults).
6
+
7
+ This module defines the validation schema for runbooks configuration files,
8
+ supporting hierarchical configuration loading with comprehensive validation
9
+ rules for AWS tag mappings, coverage requirements, and caching behavior.
10
+
11
+ Author: CloudOps-Runbooks Enterprise Team
12
+ Version: 1.1.10
13
+ """
14
+
15
+ from typing import Any, Dict, List
16
+
17
+ # =============================================================================
18
+ # JSON SCHEMA VALIDATION
19
+ # =============================================================================
20
+
21
+ TAG_MAPPING_SCHEMA: Dict[str, Any] = {
22
+ "type": "object",
23
+ "properties": {
24
+ "runbooks": {
25
+ "type": "object",
26
+ "properties": {
27
+ "version": {
28
+ "type": "string",
29
+ "pattern": r"^\d+\.\d+\.\d+$", # Semantic versioning (e.g., '1.1.10')
30
+ "description": "Config schema version following semantic versioning",
31
+ },
32
+ "inventory": {
33
+ "type": "object",
34
+ "properties": {
35
+ "tag_mappings": {
36
+ "type": "object",
37
+ "description": "Maps internal field names to AWS tag keys",
38
+ "patternProperties": {
39
+ "^[a-z_]+$": { # Field names: lowercase + underscores only
40
+ "type": "string",
41
+ "minLength": 1,
42
+ "maxLength": 128, # AWS tag key maximum length
43
+ "description": "AWS tag key name (1-128 characters)",
44
+ }
45
+ },
46
+ "additionalProperties": False, # Strict validation - no unexpected fields
47
+ },
48
+ "tag_coverage": {
49
+ "type": "object",
50
+ "description": "Tag coverage analysis and reporting configuration",
51
+ "properties": {
52
+ "enabled": {
53
+ "type": "boolean",
54
+ "description": "Enable tag coverage analysis",
55
+ },
56
+ "minimum_tier1_coverage": {
57
+ "type": "number",
58
+ "minimum": 0.0,
59
+ "maximum": 100.0,
60
+ "description": "Minimum required coverage for Tier 1 tags (percentage)",
61
+ },
62
+ "minimum_tier2_coverage": {
63
+ "type": "number",
64
+ "minimum": 0.0,
65
+ "maximum": 100.0,
66
+ "description": "Minimum required coverage for Tier 2 tags (percentage)",
67
+ },
68
+ "display_recommendations": {
69
+ "type": "boolean",
70
+ "description": "Display tag coverage improvement recommendations",
71
+ },
72
+ },
73
+ },
74
+ "cache": {
75
+ "type": "object",
76
+ "description": "Inventory data caching configuration",
77
+ "properties": {
78
+ "enabled": {
79
+ "type": "boolean",
80
+ "description": "Enable inventory data caching",
81
+ },
82
+ "ttl_seconds": {
83
+ "type": "integer",
84
+ "minimum": 60, # Minimum 1 minute
85
+ "maximum": 86400, # Maximum 24 hours
86
+ "description": "Cache time-to-live in seconds (60-86400)",
87
+ },
88
+ },
89
+ },
90
+ },
91
+ },
92
+ },
93
+ }
94
+ },
95
+ }
96
+
97
+
98
+ # =============================================================================
99
+ # BUSINESS RULES AND VALIDATION CONSTANTS
100
+ # =============================================================================
101
+
102
+ VALIDATION_RULES: Dict[str, Any] = {
103
+ # Allowed field names for tag_mappings configuration
104
+ # These field names follow the lowercase_with_underscores convention
105
+ "allowed_field_names": [
106
+ # TIER 1: Business Metadata (Critical for cost allocation and accountability)
107
+ "wbs_code", # Work Breakdown Structure code for project tracking
108
+ "cost_group", # Cost allocation group for financial reporting
109
+ "technical_lead", # Technical point of contact
110
+ "account_owner", # AWS account ownership
111
+ # TIER 2: Governance Metadata (Important for organizational structure)
112
+ "business_unit", # Business unit or division
113
+ "functional_area", # Functional area within organization
114
+ "managed_by", # Management responsibility
115
+ "product_owner", # Product ownership
116
+ # TIER 3: Operational Metadata (Standard operational requirements)
117
+ "purpose", # Resource purpose or description
118
+ "environment", # Environment classification (dev, staging, prod)
119
+ "compliance_scope", # Compliance framework requirements
120
+ "data_classification", # Data sensitivity classification
121
+ # TIER 4: Extended Metadata (Optional supplementary information)
122
+ "project_name", # Project name or identifier
123
+ "budget_code", # Budget allocation code
124
+ "support_tier", # Support tier classification
125
+ "created_date", # Resource creation date
126
+ "expiry_date", # Resource expiration or review date
127
+ ],
128
+ # AWS reserved tag keys that cannot be used for custom mappings
129
+ "reserved_tag_keys": [
130
+ "Name", # AWS reserved tag key
131
+ "aws:", # AWS reserved prefix (check using startswith)
132
+ ],
133
+ # Tier definitions for coverage analysis and reporting
134
+ # Maps tier names to their constituent field names
135
+ "tier_definitions": {
136
+ "tier_1": [
137
+ "wbs_code",
138
+ "cost_group",
139
+ "technical_lead",
140
+ "account_owner",
141
+ ],
142
+ "tier_2": [
143
+ "business_unit",
144
+ "functional_area",
145
+ "managed_by",
146
+ "product_owner",
147
+ ],
148
+ "tier_3": [
149
+ "purpose",
150
+ "environment",
151
+ "compliance_scope",
152
+ "data_classification",
153
+ ],
154
+ "tier_4": [
155
+ "project_name",
156
+ "budget_code",
157
+ "support_tier",
158
+ "created_date",
159
+ "expiry_date",
160
+ ],
161
+ },
162
+ }
163
+
164
+
165
+ # =============================================================================
166
+ # HELPER FUNCTIONS
167
+ # =============================================================================
168
+
169
+
170
+ def get_tier_for_field(field_name: str) -> str:
171
+ """
172
+ Determine the tier classification for a given field name.
173
+
174
+ Args:
175
+ field_name: The field name to classify (e.g., 'wbs_code', 'environment')
176
+
177
+ Returns:
178
+ Tier classification string ('tier_1', 'tier_2', 'tier_3', 'tier_4', or 'unknown')
179
+
180
+ Example:
181
+ >>> get_tier_for_field('wbs_code')
182
+ 'tier_1'
183
+ >>> get_tier_for_field('environment')
184
+ 'tier_3'
185
+ """
186
+ for tier_name, tier_fields in VALIDATION_RULES["tier_definitions"].items():
187
+ if field_name in tier_fields:
188
+ return tier_name
189
+ return "unknown"
190
+
191
+
192
+ def is_reserved_tag_key(tag_key: str) -> bool:
193
+ """
194
+ Check if a tag key is reserved by AWS and cannot be used for custom mappings.
195
+
196
+ Reserved tag keys include:
197
+ - 'Name' (AWS standard tag)
198
+ - Any key starting with 'aws:' (AWS system tags)
199
+
200
+ Args:
201
+ tag_key: The AWS tag key to validate
202
+
203
+ Returns:
204
+ True if the tag key is reserved, False otherwise
205
+
206
+ Example:
207
+ >>> is_reserved_tag_key('Name')
208
+ True
209
+ >>> is_reserved_tag_key('aws:cloudformation:stack-name')
210
+ True
211
+ >>> is_reserved_tag_key('CostCenter')
212
+ False
213
+ """
214
+ reserved_keys = VALIDATION_RULES["reserved_tag_keys"]
215
+ if tag_key in reserved_keys:
216
+ return True
217
+ # Check for aws: prefix
218
+ if tag_key.startswith("aws:"):
219
+ return True
220
+ return False
221
+
222
+
223
+ def get_allowed_field_names() -> List[str]:
224
+ """
225
+ Get the complete list of allowed field names for tag mappings.
226
+
227
+ Returns:
228
+ List of allowed field names across all tiers
229
+
230
+ Example:
231
+ >>> fields = get_allowed_field_names()
232
+ >>> 'wbs_code' in fields
233
+ True
234
+ >>> len(fields) > 0
235
+ True
236
+ """
237
+ return VALIDATION_RULES["allowed_field_names"]
238
+
239
+
240
+ def validate_field_name_format(field_name: str) -> bool:
241
+ """
242
+ Validate that a field name follows the required format.
243
+
244
+ Field names must:
245
+ - Use lowercase letters only
246
+ - Use underscores for word separation
247
+ - Match pattern: ^[a-z_]+$
248
+
249
+ Args:
250
+ field_name: The field name to validate
251
+
252
+ Returns:
253
+ True if field name format is valid, False otherwise
254
+
255
+ Example:
256
+ >>> validate_field_name_format('wbs_code')
257
+ True
258
+ >>> validate_field_name_format('WBS_Code')
259
+ False
260
+ >>> validate_field_name_format('wbs-code')
261
+ False
262
+ """
263
+ import re
264
+
265
+ pattern = r"^[a-z_]+$"
266
+ return bool(re.match(pattern, field_name))
267
+
268
+
269
+ # =============================================================================
270
+ # MODULE METADATA
271
+ # =============================================================================
272
+
273
+ __all__ = [
274
+ "TAG_MAPPING_SCHEMA",
275
+ "VALIDATION_RULES",
276
+ "get_tier_for_field",
277
+ "is_reserved_tag_key",
278
+ "get_allowed_field_names",
279
+ "validate_field_name_format",
280
+ ]
@@ -110,6 +110,12 @@ class DryRunSafetyFramework:
110
110
  "simulation_mode": False,
111
111
  "warning_message": None,
112
112
  },
113
+ OperationType.REPORTING: {
114
+ "default_dry_run": False, # Reporting is read-only
115
+ "requires_confirmation": False,
116
+ "simulation_mode": False, # Real API calls for report generation
117
+ "warning_message": None,
118
+ },
113
119
  OperationType.RESOURCE_CREATE: {
114
120
  "default_dry_run": True, # Safety-first for resource creation
115
121
  "requires_confirmation": True,
@@ -128,6 +134,12 @@ class DryRunSafetyFramework:
128
134
  "simulation_mode": True,
129
135
  "warning_message": "🚨 RESOURCE DELETION: This will permanently delete AWS resources",
130
136
  },
137
+ OperationType.CONFIGURATION: {
138
+ "default_dry_run": True, # Safety-first for configuration changes
139
+ "requires_confirmation": True,
140
+ "simulation_mode": True,
141
+ "warning_message": "⚙️ CONFIGURATION CHANGE: This will modify settings and policies",
142
+ },
131
143
  OperationType.REMEDIATION: {
132
144
  "default_dry_run": True, # Safety-first for remediation
133
145
  "requires_confirmation": True,
@@ -323,7 +335,7 @@ class DryRunSafetyFramework:
323
335
  log_entry = {
324
336
  "mode": mode,
325
337
  "operation_type": context.operation_type.value,
326
- "module": context.module_name,
338
+ "operation_module": context.module_name,
327
339
  "operation": context.operation_name,
328
340
  "target_count": len(context.target_resources),
329
341
  "safety_level": context.safety_level,
@@ -386,7 +398,7 @@ class DryRunSafetyFramework:
386
398
  self.logger.info(
387
399
  f"DryRun {event}",
388
400
  extra={
389
- "module": context.module_name,
401
+ "operation_module": context.module_name,
390
402
  "operation": context.operation_name,
391
403
  "dry_run": context.enabled,
392
404
  **data,
@@ -64,6 +64,51 @@ from runbooks.common.rich_utils import (
64
64
  )
65
65
 
66
66
 
67
+ # Custom Exception Hierarchy for MCP Validation
68
+ class MCPValidationError(Exception):
69
+ """Base exception for MCP validation errors."""
70
+ pass
71
+
72
+
73
+ class MCPTypeError(MCPValidationError):
74
+ """Raised when MCP client has incorrect type."""
75
+
76
+ def __init__(self, expected_type: str, actual_type: str, remediation: str):
77
+ self.expected_type = expected_type
78
+ self.actual_type = actual_type
79
+ self.remediation = remediation
80
+ super().__init__(
81
+ f"MCP client type error: Expected {expected_type}, got {actual_type}. "
82
+ f"Remediation: {remediation}"
83
+ )
84
+
85
+
86
+ class MCPAccuracyError(MCPValidationError):
87
+ """Raised when MCP accuracy falls below threshold."""
88
+
89
+ def __init__(self, accuracy: float, threshold: float, mismatched_fields: List[str]):
90
+ self.accuracy = accuracy
91
+ self.threshold = threshold
92
+ self.mismatched_fields = mismatched_fields
93
+ super().__init__(
94
+ f"MCP accuracy {accuracy:.2f}% below threshold {threshold:.2f}%. "
95
+ f"Mismatched fields: {', '.join(mismatched_fields)}. "
96
+ f"Remediation: Check MCP server version compatibility with boto3."
97
+ )
98
+
99
+
100
+ class MCPConnectionError(MCPValidationError):
101
+ """Raised when MCP server connection fails."""
102
+
103
+ def __init__(self, server_name: str, error_details: str):
104
+ self.server_name = server_name
105
+ self.error_details = error_details
106
+ super().__init__(
107
+ f"MCP server '{server_name}' connection failed: {error_details}. "
108
+ f"Remediation: Verify MCP server running via 'uvx {server_name}@latest --version'."
109
+ )
110
+
111
+
67
112
  class MCPOperationType(Enum):
68
113
  """MCP operation types for different modules."""
69
114
 
@@ -181,6 +226,195 @@ class EnterpriseMCPIntegrator:
181
226
  except Exception as e:
182
227
  print_error(f"Failed to initialize {profile_type} profile: {str(e)}")
183
228
 
229
+ def _validate_boto3_client(self, client: Any, required_method: str = 'get_caller_identity') -> bool:
230
+ """
231
+ Defensive type checking for boto3 clients - prevents type confusion.
232
+
233
+ Historical Context:
234
+ - Oct 4, 2025: Type confusion incident (dicts passed instead of clients)
235
+ - Reported accuracy: 99.8% | True accuracy: 0.0%
236
+ - Root cause: AttributeError on dict.get_caller_identity() caught silently
237
+
238
+ Args:
239
+ client: Object to validate (should be boto3 client)
240
+ required_method: Method that must exist on client
241
+
242
+ Returns:
243
+ bool: True if valid client
244
+
245
+ Raises:
246
+ MCPTypeError: If client is not a boto3 client or lacks required method
247
+
248
+ Reference: @.claude/lessons-learned/quality-gate-violations.md lines 30-51
249
+ """
250
+ # CRITICAL: Check if it's a dict (common historical mistake)
251
+ if isinstance(client, dict):
252
+ raise MCPTypeError(
253
+ expected_type="boto3.client.BaseClient",
254
+ actual_type="dict",
255
+ remediation="Use session.client('service_name') to create proper boto3 client"
256
+ )
257
+
258
+ # Check if it's a string
259
+ if isinstance(client, str):
260
+ raise MCPTypeError(
261
+ expected_type="boto3.client.BaseClient",
262
+ actual_type="str",
263
+ remediation=f"String '{client}' is a service name. Use session.client('{client}') to create client"
264
+ )
265
+
266
+ # Verify it's a boto3 client
267
+ try:
268
+ import botocore.client
269
+ if not isinstance(client, botocore.client.BaseClient):
270
+ raise MCPTypeError(
271
+ expected_type="boto3.client.BaseClient",
272
+ actual_type=type(client).__name__,
273
+ remediation="Ensure you're passing a valid boto3 client object"
274
+ )
275
+ except ImportError:
276
+ # Fallback if botocore not available (shouldn't happen in production)
277
+ print_warning("botocore not available for type checking - using hasattr check only")
278
+
279
+ # Check if it has required method
280
+ if not hasattr(client, required_method):
281
+ raise AttributeError(
282
+ f"Client {type(client).__name__} missing required method '{required_method}'. "
283
+ f"Remediation: Use correct service name for intended operation."
284
+ )
285
+
286
+ return True
287
+
288
+ def calculate_true_accuracy(
289
+ self,
290
+ mcp_result: Dict[str, Any],
291
+ boto3_result: Dict[str, Any],
292
+ comparison_fields: List[str]
293
+ ) -> float:
294
+ """
295
+ Calculate true accuracy by comparing MCP vs native boto3 results.
296
+
297
+ Historical Context:
298
+ - October 4, 2025: Reported 99.8% accuracy was actually 0.0%
299
+ - Root cause: No cross-validation against boto3 responses
300
+ - This method prevents recurrence via field-by-field comparison
301
+
302
+ Args:
303
+ mcp_result: Result from MCP server API call
304
+ boto3_result: Result from native boto3 API call
305
+ comparison_fields: Fields to compare for accuracy
306
+
307
+ Returns:
308
+ Accuracy percentage (0.0 to 100.0)
309
+
310
+ Raises:
311
+ ValueError: If results cannot be compared
312
+ """
313
+ from decimal import Decimal
314
+
315
+ # Validate both results are dicts
316
+ if not isinstance(mcp_result, dict) or not isinstance(boto3_result, dict):
317
+ raise ValueError(
318
+ f"Cannot compare accuracy: MCP result type={type(mcp_result).__name__}, "
319
+ f"boto3 result type={type(boto3_result).__name__}"
320
+ )
321
+
322
+ # Compare each field
323
+ total_fields = len(comparison_fields)
324
+ matching_fields = 0
325
+
326
+ for field in comparison_fields:
327
+ mcp_value = mcp_result.get(field)
328
+ boto3_value = boto3_result.get(field)
329
+
330
+ # Handle Decimal precision for financial values
331
+ if isinstance(mcp_value, (float, Decimal)) and isinstance(boto3_value, (float, Decimal)):
332
+ mcp_decimal = Decimal(str(mcp_value))
333
+ boto3_decimal = Decimal(str(boto3_value))
334
+ # ±0.01% tolerance for financial accuracy
335
+ tolerance = boto3_decimal * Decimal('0.0001')
336
+ if abs(mcp_decimal - boto3_decimal) <= tolerance:
337
+ matching_fields += 1
338
+ else:
339
+ # Exact match for non-financial fields
340
+ if mcp_value == boto3_value:
341
+ matching_fields += 1
342
+
343
+ # Calculate accuracy
344
+ accuracy = (matching_fields / total_fields) * 100.0 if total_fields > 0 else 0.0
345
+
346
+ return accuracy
347
+
348
+ async def validate_with_cross_check(
349
+ self,
350
+ operation: str,
351
+ mcp_client: Any,
352
+ boto3_client: Any,
353
+ params: Dict[str, Any],
354
+ comparison_fields: List[str]
355
+ ) -> Dict[str, Any]:
356
+ """
357
+ Execute operation via both MCP and boto3, calculate accuracy.
358
+
359
+ Args:
360
+ operation: API operation to execute (e.g., 'get_cost_and_usage')
361
+ mcp_client: MCP client instance
362
+ boto3_client: Native boto3 client for cross-validation
363
+ params: Parameters for API operation
364
+ comparison_fields: Fields to compare between MCP and boto3 results
365
+
366
+ Returns:
367
+ {
368
+ 'mcp_result': {...},
369
+ 'boto3_result': {...},
370
+ 'accuracy': 99.7,
371
+ 'threshold_met': True, # ≥99.5%
372
+ 'field_matches': ['field1', 'field2'],
373
+ 'field_mismatches': []
374
+ }
375
+ """
376
+ # Validate clients
377
+ self._validate_boto3_client(boto3_client)
378
+
379
+ # Execute via MCP
380
+ mcp_method = getattr(mcp_client, operation)
381
+ if asyncio.iscoroutinefunction(mcp_method):
382
+ mcp_result = await mcp_method(**params)
383
+ else:
384
+ mcp_result = mcp_method(**params)
385
+
386
+ # Execute via boto3
387
+ boto3_method = getattr(boto3_client, operation)
388
+ boto3_result = boto3_method(**params)
389
+
390
+ # Calculate accuracy
391
+ accuracy = self.calculate_true_accuracy(
392
+ mcp_result,
393
+ boto3_result,
394
+ comparison_fields
395
+ )
396
+
397
+ # Identify matching and mismatching fields
398
+ matches = []
399
+ mismatches = []
400
+ for field in comparison_fields:
401
+ mcp_value = mcp_result.get(field)
402
+ boto3_value = boto3_result.get(field)
403
+ if mcp_value == boto3_value:
404
+ matches.append(field)
405
+ else:
406
+ mismatches.append(field)
407
+
408
+ return {
409
+ 'mcp_result': mcp_result,
410
+ 'boto3_result': boto3_result,
411
+ 'accuracy': accuracy,
412
+ 'threshold_met': accuracy >= 99.5,
413
+ 'comparison_fields': comparison_fields,
414
+ 'matches': matches,
415
+ 'mismatches': mismatches
416
+ }
417
+
184
418
  def _is_organizations_cache_valid(self) -> bool:
185
419
  """Check if Organizations cache is still valid."""
186
420
  if not self._organizations_cache_timestamp:
@@ -786,4 +1020,8 @@ __all__ = [
786
1020
  "EnterpriseMCPIntegrator",
787
1021
  "MCPOperationType",
788
1022
  "MCPValidationResult",
1023
+ "MCPValidationError",
1024
+ "MCPTypeError",
1025
+ "MCPAccuracyError",
1026
+ "MCPConnectionError",
789
1027
  ]
@@ -976,11 +976,14 @@ class EBSCostOptimizer:
976
976
  @click.option("--regions", multiple=True, help="AWS regions to analyze (space-separated)")
977
977
  @click.option("--dry-run/--no-dry-run", default=True, help="Execute in dry-run mode (READ-ONLY analysis)")
978
978
  @click.option(
979
- "--export-format", type=click.Choice(["json", "csv", "markdown"]), default="json", help="Export format for results"
979
+ "-f", "--format", "--export-format",
980
+ type=click.Choice(["json", "csv", "markdown"]),
981
+ default="json",
982
+ help="Export format for results (-f/--format preferred, --export-format legacy)"
980
983
  )
981
984
  @click.option("--output-file", help="Output file path for results export")
982
985
  @click.option("--usage-threshold-days", type=int, default=7, help="CloudWatch analysis period in days")
983
- def ebs_optimizer(profile, regions, dry_run, export_format, output_file, usage_threshold_days):
986
+ def ebs_optimizer(profile, regions, dry_run, format, output_file, usage_threshold_days):
984
987
  """
985
988
  EBS Volume Optimizer - Enterprise Multi-Region Storage Analysis
986
989
 
@@ -1006,8 +1009,8 @@ def ebs_optimizer(profile, regions, dry_run, export_format, output_file, usage_t
1006
1009
  results = asyncio.run(optimizer.analyze_ebs_volumes(dry_run=dry_run))
1007
1010
 
1008
1011
  # Export results if requested
1009
- if output_file or export_format != "json":
1010
- optimizer.export_results(results, output_file, export_format)
1012
+ if output_file or format != "json":
1013
+ optimizer.export_results(results, output_file, format)
1011
1014
 
1012
1015
  # Display final success message
1013
1016
  if results.total_potential_annual_savings > 0:
@@ -692,10 +692,13 @@ class ElasticIPOptimizer:
692
692
  @click.option("--regions", multiple=True, help="AWS regions to analyze (space-separated)")
693
693
  @click.option("--dry-run/--no-dry-run", default=True, help="Execute in dry-run mode (READ-ONLY analysis)")
694
694
  @click.option(
695
- "--export-format", type=click.Choice(["json", "csv", "markdown"]), default="json", help="Export format for results"
695
+ "-f", "--format", "--export-format",
696
+ type=click.Choice(["json", "csv", "markdown"]),
697
+ default="json",
698
+ help="Export format for results (-f/--format preferred, --export-format legacy)"
696
699
  )
697
700
  @click.option("--output-file", help="Output file path for results export")
698
- def elastic_ip_optimizer(profile, regions, dry_run, export_format, output_file):
701
+ def elastic_ip_optimizer(profile, regions, dry_run, format, output_file):
699
702
  """
700
703
  Elastic IP Cost Optimizer - Enterprise Multi-Region Analysis
701
704
 
@@ -716,8 +719,8 @@ def elastic_ip_optimizer(profile, regions, dry_run, export_format, output_file):
716
719
  results = asyncio.run(optimizer.analyze_elastic_ips(dry_run=dry_run))
717
720
 
718
721
  # Export results if requested
719
- if output_file or export_format != "json":
720
- optimizer.export_results(results, output_file, export_format)
722
+ if output_file or format != "json":
723
+ optimizer.export_results(results, output_file, format)
721
724
 
722
725
  # Display final success message
723
726
  if results.potential_annual_savings > 0:
@@ -73,8 +73,9 @@ EPIC_2_TARGETS = {
73
73
  "total": 210147.0,
74
74
  }
75
75
 
76
- # Module metadata
77
- __version__ = "1.1.5"
76
+ # Module metadata - version imported from central source
77
+ from runbooks import __version__
78
+
78
79
  __epic__ = "Epic 2 Infrastructure Optimization"
79
80
  __target_savings__ = "$210,147 annual"
80
81
  __status__ = "Production Ready"
@@ -358,10 +358,13 @@ def infrastructure():
358
358
  )
359
359
  @click.option("--dry-run/--no-dry-run", default=True, help="Execute in dry-run mode (READ-ONLY analysis)")
360
360
  @click.option(
361
- "--export-format", type=click.Choice(["json", "csv", "markdown"]), default="json", help="Export format for results"
361
+ "-f", "--format", "--export-format",
362
+ type=click.Choice(["json", "csv", "markdown"]),
363
+ default="json",
364
+ help="Export format for results (-f/--format preferred, --export-format legacy)"
362
365
  )
363
366
  @click.option("--output-file", help="Output file path for results export")
364
- def analyze(profile, regions, components, dry_run, export_format, output_file):
367
+ def analyze(profile, regions, components, dry_run, format, output_file):
365
368
  """
366
369
  Comprehensive Infrastructure Optimization Analysis - Epic 2
367
370
 
@@ -391,8 +394,8 @@ def analyze(profile, regions, components, dry_run, export_format, output_file):
391
394
  )
392
395
 
393
396
  # Export results if requested (implementation would go here)
394
- if output_file or export_format != "json":
395
- print_info(f"Export functionality available - results ready for {export_format} export")
397
+ if output_file or format != "json":
398
+ print_info(f"Export functionality available - results ready for {format} export")
396
399
 
397
400
  # Display final success message
398
401
  if results.epic_2_target_achieved: