runbooks 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (273) hide show
  1. runbooks/__init__.py +31 -2
  2. runbooks/__init___optimized.py +18 -4
  3. runbooks/_platform/__init__.py +1 -5
  4. runbooks/_platform/core/runbooks_wrapper.py +141 -138
  5. runbooks/aws2/accuracy_validator.py +812 -0
  6. runbooks/base.py +7 -0
  7. runbooks/cfat/assessment/compliance.py +1 -1
  8. runbooks/cfat/assessment/runner.py +1 -0
  9. runbooks/cfat/cloud_foundations_assessment.py +227 -239
  10. runbooks/cli/__init__.py +1 -1
  11. runbooks/cli/commands/cfat.py +64 -23
  12. runbooks/cli/commands/finops.py +1005 -54
  13. runbooks/cli/commands/inventory.py +135 -91
  14. runbooks/cli/commands/operate.py +9 -36
  15. runbooks/cli/commands/security.py +42 -18
  16. runbooks/cli/commands/validation.py +432 -18
  17. runbooks/cli/commands/vpc.py +81 -17
  18. runbooks/cli/registry.py +22 -10
  19. runbooks/cloudops/__init__.py +20 -27
  20. runbooks/cloudops/base.py +96 -107
  21. runbooks/cloudops/cost_optimizer.py +544 -542
  22. runbooks/cloudops/infrastructure_optimizer.py +5 -4
  23. runbooks/cloudops/interfaces.py +224 -225
  24. runbooks/cloudops/lifecycle_manager.py +5 -4
  25. runbooks/cloudops/mcp_cost_validation.py +252 -235
  26. runbooks/cloudops/models.py +78 -53
  27. runbooks/cloudops/monitoring_automation.py +5 -4
  28. runbooks/cloudops/notebook_framework.py +177 -213
  29. runbooks/cloudops/security_enforcer.py +125 -159
  30. runbooks/common/accuracy_validator.py +17 -12
  31. runbooks/common/aws_pricing.py +349 -326
  32. runbooks/common/aws_pricing_api.py +211 -212
  33. runbooks/common/aws_profile_manager.py +40 -36
  34. runbooks/common/aws_utils.py +74 -79
  35. runbooks/common/business_logic.py +126 -104
  36. runbooks/common/cli_decorators.py +36 -60
  37. runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
  38. runbooks/common/cross_account_manager.py +197 -204
  39. runbooks/common/date_utils.py +27 -39
  40. runbooks/common/decorators.py +29 -19
  41. runbooks/common/dry_run_examples.py +173 -208
  42. runbooks/common/dry_run_framework.py +157 -155
  43. runbooks/common/enhanced_exception_handler.py +15 -4
  44. runbooks/common/enhanced_logging_example.py +50 -64
  45. runbooks/common/enhanced_logging_integration_example.py +65 -37
  46. runbooks/common/env_utils.py +16 -16
  47. runbooks/common/error_handling.py +40 -38
  48. runbooks/common/lazy_loader.py +41 -23
  49. runbooks/common/logging_integration_helper.py +79 -86
  50. runbooks/common/mcp_cost_explorer_integration.py +476 -493
  51. runbooks/common/mcp_integration.py +99 -79
  52. runbooks/common/memory_optimization.py +140 -118
  53. runbooks/common/module_cli_base.py +37 -58
  54. runbooks/common/organizations_client.py +175 -193
  55. runbooks/common/patterns.py +23 -25
  56. runbooks/common/performance_monitoring.py +67 -71
  57. runbooks/common/performance_optimization_engine.py +283 -274
  58. runbooks/common/profile_utils.py +111 -37
  59. runbooks/common/rich_utils.py +315 -141
  60. runbooks/common/sre_performance_suite.py +177 -186
  61. runbooks/enterprise/__init__.py +1 -1
  62. runbooks/enterprise/logging.py +144 -106
  63. runbooks/enterprise/security.py +187 -204
  64. runbooks/enterprise/validation.py +43 -56
  65. runbooks/finops/__init__.py +26 -30
  66. runbooks/finops/account_resolver.py +1 -1
  67. runbooks/finops/advanced_optimization_engine.py +980 -0
  68. runbooks/finops/automation_core.py +268 -231
  69. runbooks/finops/business_case_config.py +184 -179
  70. runbooks/finops/cli.py +660 -139
  71. runbooks/finops/commvault_ec2_analysis.py +157 -164
  72. runbooks/finops/compute_cost_optimizer.py +336 -320
  73. runbooks/finops/config.py +20 -20
  74. runbooks/finops/cost_optimizer.py +484 -618
  75. runbooks/finops/cost_processor.py +332 -214
  76. runbooks/finops/dashboard_runner.py +1006 -172
  77. runbooks/finops/ebs_cost_optimizer.py +991 -657
  78. runbooks/finops/elastic_ip_optimizer.py +317 -257
  79. runbooks/finops/enhanced_mcp_integration.py +340 -0
  80. runbooks/finops/enhanced_progress.py +32 -29
  81. runbooks/finops/enhanced_trend_visualization.py +3 -2
  82. runbooks/finops/enterprise_wrappers.py +223 -285
  83. runbooks/finops/executive_export.py +203 -160
  84. runbooks/finops/helpers.py +130 -288
  85. runbooks/finops/iam_guidance.py +1 -1
  86. runbooks/finops/infrastructure/__init__.py +80 -0
  87. runbooks/finops/infrastructure/commands.py +506 -0
  88. runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
  89. runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
  90. runbooks/finops/markdown_exporter.py +337 -174
  91. runbooks/finops/mcp_validator.py +1952 -0
  92. runbooks/finops/nat_gateway_optimizer.py +1512 -481
  93. runbooks/finops/network_cost_optimizer.py +657 -587
  94. runbooks/finops/notebook_utils.py +226 -188
  95. runbooks/finops/optimization_engine.py +1136 -0
  96. runbooks/finops/optimizer.py +19 -23
  97. runbooks/finops/rds_snapshot_optimizer.py +367 -411
  98. runbooks/finops/reservation_optimizer.py +427 -363
  99. runbooks/finops/scenario_cli_integration.py +64 -65
  100. runbooks/finops/scenarios.py +1277 -438
  101. runbooks/finops/schemas.py +218 -182
  102. runbooks/finops/snapshot_manager.py +2289 -0
  103. runbooks/finops/types.py +3 -3
  104. runbooks/finops/validation_framework.py +259 -265
  105. runbooks/finops/vpc_cleanup_exporter.py +189 -144
  106. runbooks/finops/vpc_cleanup_optimizer.py +591 -573
  107. runbooks/finops/workspaces_analyzer.py +171 -182
  108. runbooks/integration/__init__.py +89 -0
  109. runbooks/integration/mcp_integration.py +1920 -0
  110. runbooks/inventory/CLAUDE.md +816 -0
  111. runbooks/inventory/__init__.py +2 -2
  112. runbooks/inventory/aws_decorators.py +2 -3
  113. runbooks/inventory/check_cloudtrail_compliance.py +2 -4
  114. runbooks/inventory/check_controltower_readiness.py +152 -151
  115. runbooks/inventory/check_landingzone_readiness.py +85 -84
  116. runbooks/inventory/cloud_foundations_integration.py +144 -149
  117. runbooks/inventory/collectors/aws_comprehensive.py +1 -1
  118. runbooks/inventory/collectors/aws_networking.py +109 -99
  119. runbooks/inventory/collectors/base.py +4 -0
  120. runbooks/inventory/core/collector.py +495 -313
  121. runbooks/inventory/core/formatter.py +11 -0
  122. runbooks/inventory/draw_org_structure.py +8 -9
  123. runbooks/inventory/drift_detection_cli.py +69 -96
  124. runbooks/inventory/ec2_vpc_utils.py +2 -2
  125. runbooks/inventory/find_cfn_drift_detection.py +5 -7
  126. runbooks/inventory/find_cfn_orphaned_stacks.py +7 -9
  127. runbooks/inventory/find_cfn_stackset_drift.py +5 -6
  128. runbooks/inventory/find_ec2_security_groups.py +48 -42
  129. runbooks/inventory/find_landingzone_versions.py +4 -6
  130. runbooks/inventory/find_vpc_flow_logs.py +7 -9
  131. runbooks/inventory/inventory_mcp_cli.py +48 -46
  132. runbooks/inventory/inventory_modules.py +103 -91
  133. runbooks/inventory/list_cfn_stacks.py +9 -10
  134. runbooks/inventory/list_cfn_stackset_operation_results.py +1 -3
  135. runbooks/inventory/list_cfn_stackset_operations.py +79 -57
  136. runbooks/inventory/list_cfn_stacksets.py +8 -10
  137. runbooks/inventory/list_config_recorders_delivery_channels.py +49 -39
  138. runbooks/inventory/list_ds_directories.py +65 -53
  139. runbooks/inventory/list_ec2_availability_zones.py +2 -4
  140. runbooks/inventory/list_ec2_ebs_volumes.py +32 -35
  141. runbooks/inventory/list_ec2_instances.py +23 -28
  142. runbooks/inventory/list_ecs_clusters_and_tasks.py +26 -34
  143. runbooks/inventory/list_elbs_load_balancers.py +22 -20
  144. runbooks/inventory/list_enis_network_interfaces.py +26 -33
  145. runbooks/inventory/list_guardduty_detectors.py +2 -4
  146. runbooks/inventory/list_iam_policies.py +2 -4
  147. runbooks/inventory/list_iam_roles.py +5 -7
  148. runbooks/inventory/list_iam_saml_providers.py +4 -6
  149. runbooks/inventory/list_lambda_functions.py +38 -38
  150. runbooks/inventory/list_org_accounts.py +6 -8
  151. runbooks/inventory/list_org_accounts_users.py +55 -44
  152. runbooks/inventory/list_rds_db_instances.py +31 -33
  153. runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
  154. runbooks/inventory/list_route53_hosted_zones.py +3 -5
  155. runbooks/inventory/list_servicecatalog_provisioned_products.py +37 -41
  156. runbooks/inventory/list_sns_topics.py +2 -4
  157. runbooks/inventory/list_ssm_parameters.py +4 -7
  158. runbooks/inventory/list_vpc_subnets.py +2 -4
  159. runbooks/inventory/list_vpcs.py +7 -10
  160. runbooks/inventory/mcp_inventory_validator.py +554 -468
  161. runbooks/inventory/mcp_vpc_validator.py +359 -442
  162. runbooks/inventory/organizations_discovery.py +63 -55
  163. runbooks/inventory/recover_cfn_stack_ids.py +7 -8
  164. runbooks/inventory/requirements.txt +0 -1
  165. runbooks/inventory/rich_inventory_display.py +35 -34
  166. runbooks/inventory/run_on_multi_accounts.py +3 -5
  167. runbooks/inventory/unified_validation_engine.py +281 -253
  168. runbooks/inventory/verify_ec2_security_groups.py +1 -1
  169. runbooks/inventory/vpc_analyzer.py +735 -697
  170. runbooks/inventory/vpc_architecture_validator.py +293 -348
  171. runbooks/inventory/vpc_dependency_analyzer.py +384 -380
  172. runbooks/inventory/vpc_flow_analyzer.py +1 -1
  173. runbooks/main.py +49 -34
  174. runbooks/main_final.py +91 -60
  175. runbooks/main_minimal.py +22 -10
  176. runbooks/main_optimized.py +131 -100
  177. runbooks/main_ultra_minimal.py +7 -2
  178. runbooks/mcp/__init__.py +36 -0
  179. runbooks/mcp/integration.py +679 -0
  180. runbooks/monitoring/performance_monitor.py +9 -4
  181. runbooks/operate/dynamodb_operations.py +3 -1
  182. runbooks/operate/ec2_operations.py +145 -137
  183. runbooks/operate/iam_operations.py +146 -152
  184. runbooks/operate/networking_cost_heatmap.py +29 -8
  185. runbooks/operate/rds_operations.py +223 -254
  186. runbooks/operate/s3_operations.py +107 -118
  187. runbooks/operate/vpc_operations.py +646 -616
  188. runbooks/remediation/base.py +1 -1
  189. runbooks/remediation/commons.py +10 -7
  190. runbooks/remediation/commvault_ec2_analysis.py +70 -66
  191. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
  192. runbooks/remediation/multi_account.py +24 -21
  193. runbooks/remediation/rds_snapshot_list.py +86 -60
  194. runbooks/remediation/remediation_cli.py +92 -146
  195. runbooks/remediation/universal_account_discovery.py +83 -79
  196. runbooks/remediation/workspaces_list.py +46 -41
  197. runbooks/security/__init__.py +19 -0
  198. runbooks/security/assessment_runner.py +1150 -0
  199. runbooks/security/baseline_checker.py +812 -0
  200. runbooks/security/cloudops_automation_security_validator.py +509 -535
  201. runbooks/security/compliance_automation_engine.py +17 -17
  202. runbooks/security/config/__init__.py +2 -2
  203. runbooks/security/config/compliance_config.py +50 -50
  204. runbooks/security/config_template_generator.py +63 -76
  205. runbooks/security/enterprise_security_framework.py +1 -1
  206. runbooks/security/executive_security_dashboard.py +519 -508
  207. runbooks/security/multi_account_security_controls.py +959 -1210
  208. runbooks/security/real_time_security_monitor.py +422 -444
  209. runbooks/security/security_baseline_tester.py +1 -1
  210. runbooks/security/security_cli.py +143 -112
  211. runbooks/security/test_2way_validation.py +439 -0
  212. runbooks/security/two_way_validation_framework.py +852 -0
  213. runbooks/sre/production_monitoring_framework.py +167 -177
  214. runbooks/tdd/__init__.py +15 -0
  215. runbooks/tdd/cli.py +1071 -0
  216. runbooks/utils/__init__.py +14 -17
  217. runbooks/utils/logger.py +7 -2
  218. runbooks/utils/version_validator.py +50 -47
  219. runbooks/validation/__init__.py +6 -6
  220. runbooks/validation/cli.py +9 -3
  221. runbooks/validation/comprehensive_2way_validator.py +745 -704
  222. runbooks/validation/mcp_validator.py +906 -228
  223. runbooks/validation/terraform_citations_validator.py +104 -115
  224. runbooks/validation/terraform_drift_detector.py +461 -454
  225. runbooks/vpc/README.md +617 -0
  226. runbooks/vpc/__init__.py +8 -1
  227. runbooks/vpc/analyzer.py +577 -0
  228. runbooks/vpc/cleanup_wrapper.py +476 -413
  229. runbooks/vpc/cli_cloudtrail_commands.py +339 -0
  230. runbooks/vpc/cli_mcp_validation_commands.py +480 -0
  231. runbooks/vpc/cloudtrail_audit_integration.py +717 -0
  232. runbooks/vpc/config.py +92 -97
  233. runbooks/vpc/cost_engine.py +411 -148
  234. runbooks/vpc/cost_explorer_integration.py +553 -0
  235. runbooks/vpc/cross_account_session.py +101 -106
  236. runbooks/vpc/enhanced_mcp_validation.py +917 -0
  237. runbooks/vpc/eni_gate_validator.py +961 -0
  238. runbooks/vpc/heatmap_engine.py +185 -160
  239. runbooks/vpc/mcp_no_eni_validator.py +680 -639
  240. runbooks/vpc/nat_gateway_optimizer.py +358 -0
  241. runbooks/vpc/networking_wrapper.py +15 -8
  242. runbooks/vpc/pdca_remediation_planner.py +528 -0
  243. runbooks/vpc/performance_optimized_analyzer.py +219 -231
  244. runbooks/vpc/runbooks_adapter.py +1167 -241
  245. runbooks/vpc/tdd_red_phase_stubs.py +601 -0
  246. runbooks/vpc/test_data_loader.py +358 -0
  247. runbooks/vpc/tests/conftest.py +314 -4
  248. runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
  249. runbooks/vpc/tests/test_cost_engine.py +0 -2
  250. runbooks/vpc/topology_generator.py +326 -0
  251. runbooks/vpc/unified_scenarios.py +1297 -1124
  252. runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
  253. runbooks-1.1.6.dist-info/METADATA +327 -0
  254. runbooks-1.1.6.dist-info/RECORD +489 -0
  255. runbooks/finops/README.md +0 -414
  256. runbooks/finops/accuracy_cross_validator.py +0 -647
  257. runbooks/finops/business_cases.py +0 -950
  258. runbooks/finops/dashboard_router.py +0 -922
  259. runbooks/finops/ebs_optimizer.py +0 -973
  260. runbooks/finops/embedded_mcp_validator.py +0 -1629
  261. runbooks/finops/enhanced_dashboard_runner.py +0 -527
  262. runbooks/finops/finops_dashboard.py +0 -584
  263. runbooks/finops/finops_scenarios.py +0 -1218
  264. runbooks/finops/legacy_migration.py +0 -730
  265. runbooks/finops/multi_dashboard.py +0 -1519
  266. runbooks/finops/single_dashboard.py +0 -1113
  267. runbooks/finops/unlimited_scenarios.py +0 -393
  268. runbooks-1.1.4.dist-info/METADATA +0 -800
  269. runbooks-1.1.4.dist-info/RECORD +0 -468
  270. {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/WHEEL +0 -0
  271. {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/entry_points.txt +0 -0
  272. {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/licenses/LICENSE +0 -0
  273. {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/top_level.txt +0 -0
@@ -21,22 +21,23 @@ logger = logging.getLogger(__name__)
21
21
  @dataclass
22
22
  class CostAnalysisCache:
23
23
  """Cache for cost analysis results to improve performance."""
24
+
24
25
  cost_data: Dict[str, Any] = field(default_factory=dict)
25
26
  last_updated: Dict[str, float] = field(default_factory=dict)
26
27
  cache_ttl: int = 300 # 5 minutes
27
-
28
+
28
29
  def is_valid(self, cache_key: str) -> bool:
29
30
  """Check if cached data is still valid."""
30
31
  if cache_key not in self.last_updated:
31
32
  return False
32
33
  return time.time() - self.last_updated[cache_key] < self.cache_ttl
33
-
34
+
34
35
  def get_cached_data(self, cache_key: str) -> Optional[Any]:
35
36
  """Get cached data if valid."""
36
37
  if self.is_valid(cache_key):
37
38
  return self.cost_data.get(cache_key)
38
39
  return None
39
-
40
+
40
41
  def cache_data(self, cache_key: str, data: Any):
41
42
  """Cache data."""
42
43
  self.cost_data[cache_key] = data
@@ -46,13 +47,14 @@ class CostAnalysisCache:
46
47
  @dataclass
47
48
  class ParallelCostMetrics:
48
49
  """Metrics for parallel cost analysis operations."""
50
+
49
51
  total_operations: int = 0
50
52
  parallel_operations: int = 0
51
53
  cache_hits: int = 0
52
54
  api_calls: int = 0
53
55
  total_time: float = 0.0
54
56
  average_operation_time: float = 0.0
55
-
57
+
56
58
  def get_cache_hit_ratio(self) -> float:
57
59
  """Calculate cache hit ratio."""
58
60
  total = self.cache_hits + self.api_calls
@@ -62,7 +64,7 @@ class ParallelCostMetrics:
62
64
  class NetworkingCostEngine:
63
65
  """
64
66
  Enhanced core engine for networking cost calculations and analysis with parallel processing
65
-
67
+
66
68
  Performance Features:
67
69
  - Parallel cost calculations with ThreadPoolExecutor
68
70
  - Intelligent TTL-based caching (5 minutes)
@@ -71,8 +73,14 @@ class NetworkingCostEngine:
71
73
  - Real-time performance metrics
72
74
  """
73
75
 
74
- def __init__(self, session: Optional[boto3.Session] = None, config: Optional[VPCNetworkingConfig] = None,
75
- enable_parallel: bool = True, max_workers: int = 10, enable_caching: bool = True):
76
+ def __init__(
77
+ self,
78
+ session: Optional[boto3.Session] = None,
79
+ config: Optional[VPCNetworkingConfig] = None,
80
+ enable_parallel: bool = True,
81
+ max_workers: int = 10,
82
+ enable_caching: bool = True,
83
+ ):
76
84
  """
77
85
  Initialize the enhanced cost engine with performance optimizations
78
86
 
@@ -86,17 +94,17 @@ class NetworkingCostEngine:
86
94
  self.session = session or boto3.Session()
87
95
  self.config = config or load_config()
88
96
  self.cost_model = self.config.cost_model
89
-
97
+
90
98
  # Performance optimization settings
91
99
  self.enable_parallel = enable_parallel
92
100
  self.max_workers = max_workers
93
101
  self.enable_caching = enable_caching
94
-
102
+
95
103
  # Initialize performance components
96
104
  self.cost_cache = CostAnalysisCache() if enable_caching else None
97
105
  self.performance_metrics = ParallelCostMetrics()
98
106
  self.executor = ThreadPoolExecutor(max_workers=max_workers) if enable_parallel else None
99
-
107
+
100
108
  # Lazy-loaded clients with connection pooling
101
109
  self._cost_explorer_client = None
102
110
  self._cloudwatch_client = None
@@ -414,45 +422,45 @@ class NetworkingCostEngine:
414
422
  return savings_analysis
415
423
 
416
424
  # Enhanced Performance Methods for <30s Execution Target
417
-
425
+
418
426
  def analyze_vpc_costs_parallel(
419
- self,
420
- vpc_ids: List[str],
427
+ self,
428
+ vpc_ids: List[str],
421
429
  include_historical: bool = True,
422
430
  include_projections: bool = True,
423
- days_analysis: int = 30
431
+ days_analysis: int = 30,
424
432
  ) -> Dict[str, Any]:
425
433
  """
426
434
  Analyze VPC costs in parallel for enhanced performance.
427
-
435
+
428
436
  Performance Targets:
429
437
  - <30s total execution for up to 50 VPCs
430
438
  - ≥99.5% accuracy through intelligent caching
431
439
  - 60%+ parallel efficiency over sequential processing
432
-
440
+
433
441
  Args:
434
442
  vpc_ids: List of VPC IDs to analyze
435
443
  include_historical: Include historical cost analysis
436
444
  include_projections: Include cost projections
437
445
  days_analysis: Number of days to analyze
438
-
446
+
439
447
  Returns:
440
448
  Comprehensive cost analysis results
441
449
  """
442
450
  start_time = time.time()
443
-
451
+
444
452
  if not self.enable_parallel or len(vpc_ids) <= 2:
445
453
  # Sequential processing for small sets or if parallel disabled
446
454
  return self._analyze_vpc_costs_sequential(vpc_ids, include_historical, include_projections, days_analysis)
447
-
455
+
448
456
  # Prepare parallel cost analysis tasks
449
457
  analysis_futures = []
450
458
  cost_results = {}
451
-
459
+
452
460
  # Split VPCs into batches for optimal parallel processing
453
461
  batch_size = max(1, len(vpc_ids) // self.max_workers)
454
- vpc_batches = [vpc_ids[i:i + batch_size] for i in range(0, len(vpc_ids), batch_size)]
455
-
462
+ vpc_batches = [vpc_ids[i : i + batch_size] for i in range(0, len(vpc_ids), batch_size)]
463
+
456
464
  try:
457
465
  # Submit parallel cost analysis tasks
458
466
  for batch_idx, vpc_batch in enumerate(vpc_batches):
@@ -462,14 +470,14 @@ class NetworkingCostEngine:
462
470
  include_historical,
463
471
  include_projections,
464
472
  days_analysis,
465
- f"batch_{batch_idx}"
473
+ f"batch_{batch_idx}",
466
474
  )
467
475
  analysis_futures.append(future)
468
476
  self.performance_metrics.parallel_operations += 1
469
-
477
+
470
478
  # Collect parallel results with timeout protection
471
479
  timeout_seconds = 25 # Leave 5s buffer for processing
472
-
480
+
473
481
  for future in as_completed(analysis_futures, timeout=timeout_seconds):
474
482
  try:
475
483
  batch_results = future.result(timeout=5)
@@ -477,20 +485,20 @@ class NetworkingCostEngine:
477
485
  except Exception as e:
478
486
  logger.warning(f"Parallel cost analysis batch failed: {e}")
479
487
  # Continue with other batches
480
-
488
+
481
489
  # Aggregate results from all parallel operations
482
490
  aggregated_results = self._aggregate_parallel_cost_results(cost_results)
483
-
491
+
484
492
  # Update performance metrics
485
493
  total_time = time.time() - start_time
486
494
  self.performance_metrics.total_time = total_time
487
495
  self.performance_metrics.total_operations = len(vpc_ids)
488
496
  self.performance_metrics.average_operation_time = total_time / max(len(vpc_ids), 1)
489
-
497
+
490
498
  logger.info(f"Parallel VPC cost analysis completed: {len(vpc_ids)} VPCs in {total_time:.2f}s")
491
-
499
+
492
500
  return aggregated_results
493
-
501
+
494
502
  except Exception as e:
495
503
  logger.error(f"Parallel cost analysis failed: {e}")
496
504
  # Fallback to sequential processing
@@ -503,67 +511,59 @@ class NetworkingCostEngine:
503
511
  include_historical: bool,
504
512
  include_projections: bool,
505
513
  days_analysis: int,
506
- batch_id: str
514
+ batch_id: str,
507
515
  ) -> Dict[str, Any]:
508
516
  """
509
517
  Analyze costs for a batch of VPCs with caching optimization.
510
-
518
+
511
519
  Args:
512
520
  vpc_batch: Batch of VPC IDs to analyze
513
521
  include_historical: Include historical analysis
514
522
  include_projections: Include cost projections
515
523
  days_analysis: Days to analyze
516
524
  batch_id: Batch identifier for tracking
517
-
525
+
518
526
  Returns:
519
527
  Cost analysis results for the batch
520
528
  """
521
529
  batch_results = {}
522
-
530
+
523
531
  for vpc_id in vpc_batch:
524
532
  try:
525
533
  # Check cache first for performance optimization
526
534
  cache_key = f"vpc_cost_{vpc_id}_{days_analysis}_{include_historical}_{include_projections}"
527
-
535
+
528
536
  if self.cost_cache:
529
537
  cached_result = self.cost_cache.get_cached_data(cache_key)
530
538
  if cached_result:
531
539
  batch_results[vpc_id] = cached_result
532
540
  self.performance_metrics.cache_hits += 1
533
541
  continue
534
-
542
+
535
543
  # Perform fresh analysis
536
544
  vpc_cost_analysis = self._analyze_single_vpc_costs(
537
545
  vpc_id, include_historical, include_projections, days_analysis
538
546
  )
539
-
547
+
540
548
  # Cache the result
541
549
  if self.cost_cache:
542
550
  self.cost_cache.cache_data(cache_key, vpc_cost_analysis)
543
-
551
+
544
552
  batch_results[vpc_id] = vpc_cost_analysis
545
553
  self.performance_metrics.api_calls += 1
546
-
554
+
547
555
  except Exception as e:
548
556
  logger.warning(f"Cost analysis failed for VPC {vpc_id} in batch {batch_id}: {e}")
549
- batch_results[vpc_id] = {
550
- "error": str(e),
551
- "vpc_id": vpc_id,
552
- "analysis_failed": True
553
- }
554
-
557
+ batch_results[vpc_id] = {"error": str(e), "vpc_id": vpc_id, "analysis_failed": True}
558
+
555
559
  return batch_results
556
560
 
557
561
  def _analyze_single_vpc_costs(
558
- self,
559
- vpc_id: str,
560
- include_historical: bool,
561
- include_projections: bool,
562
- days_analysis: int
562
+ self, vpc_id: str, include_historical: bool, include_projections: bool, days_analysis: int
563
563
  ) -> Dict[str, Any]:
564
564
  """
565
565
  Analyze costs for a single VPC with comprehensive metrics.
566
-
566
+
567
567
  Returns:
568
568
  Detailed cost analysis for single VPC
569
569
  """
@@ -574,82 +574,80 @@ class NetworkingCostEngine:
574
574
  "total_cost": 0.0,
575
575
  "cost_breakdown": {},
576
576
  "optimization_opportunities": [],
577
- "performance_metrics": {}
577
+ "performance_metrics": {},
578
578
  }
579
-
579
+
580
580
  try:
581
581
  # NAT Gateway costs
582
582
  nat_gateways = self._get_vpc_nat_gateways(vpc_id)
583
583
  nat_gateway_costs = 0.0
584
-
584
+
585
585
  for nat_gateway_id in nat_gateways:
586
586
  nat_cost = self.calculate_nat_gateway_cost(nat_gateway_id, days_analysis)
587
587
  nat_gateway_costs += nat_cost.get("total_cost", 0.0)
588
-
588
+
589
589
  vpc_cost_data["cost_breakdown"]["nat_gateways"] = nat_gateway_costs
590
-
590
+
591
591
  # VPC Endpoints costs
592
592
  vpc_endpoints = self._get_vpc_endpoints(vpc_id)
593
593
  endpoint_costs = 0.0
594
-
594
+
595
595
  for endpoint in vpc_endpoints:
596
596
  endpoint_cost = self.calculate_vpc_endpoint_cost(
597
597
  endpoint.get("VpcEndpointType", "Gateway"),
598
598
  endpoint.get("availability_zones", 1),
599
- 0 # Data processing would need additional analysis
599
+ 0, # Data processing would need additional analysis
600
600
  )
601
601
  endpoint_costs += endpoint_cost.get("total_monthly_cost", 0.0)
602
-
602
+
603
603
  vpc_cost_data["cost_breakdown"]["vpc_endpoints"] = endpoint_costs
604
-
604
+
605
605
  # Elastic IPs costs
606
606
  elastic_ips = self._get_vpc_elastic_ips(vpc_id)
607
607
  eip_costs = 0.0
608
-
608
+
609
609
  for eip in elastic_ips:
610
610
  # Estimate idle hours (simplified)
611
- eip_cost = self.calculate_elastic_ip_cost(idle_hours=24*days_analysis*0.1) # 10% idle assumption
611
+ eip_cost = self.calculate_elastic_ip_cost(idle_hours=24 * days_analysis * 0.1) # 10% idle assumption
612
612
  eip_costs += eip_cost.get("total_cost", 0.0)
613
-
613
+
614
614
  vpc_cost_data["cost_breakdown"]["elastic_ips"] = eip_costs
615
-
615
+
616
616
  # Data transfer estimates (simplified)
617
617
  data_transfer_cost = self.calculate_data_transfer_cost(
618
618
  inter_az_gb=100, # Estimated
619
619
  inter_region_gb=50, # Estimated
620
- internet_out_gb=200 # Estimated
620
+ internet_out_gb=200, # Estimated
621
621
  )
622
622
  vpc_cost_data["cost_breakdown"]["data_transfer"] = data_transfer_cost.get("total_cost", 0.0)
623
-
623
+
624
624
  # Calculate total cost
625
625
  vpc_cost_data["total_cost"] = sum(vpc_cost_data["cost_breakdown"].values())
626
-
626
+
627
627
  # Historical analysis if requested
628
628
  if include_historical:
629
629
  vpc_cost_data["historical_analysis"] = self._get_historical_vpc_costs(vpc_id, days_analysis)
630
-
630
+
631
631
  # Cost projections if requested
632
632
  if include_projections:
633
633
  vpc_cost_data["cost_projections"] = self._calculate_vpc_cost_projections(vpc_cost_data, days_analysis)
634
-
634
+
635
635
  # Optimization opportunities
636
636
  vpc_cost_data["optimization_opportunities"] = self._identify_vpc_optimization_opportunities(vpc_cost_data)
637
-
637
+
638
638
  except Exception as e:
639
639
  vpc_cost_data["error"] = str(e)
640
640
  vpc_cost_data["analysis_failed"] = True
641
641
  logger.error(f"Single VPC cost analysis failed for {vpc_id}: {e}")
642
-
642
+
643
643
  return vpc_cost_data
644
644
 
645
645
  def _get_vpc_nat_gateways(self, vpc_id: str) -> List[str]:
646
646
  """Get NAT Gateway IDs for a VPC."""
647
647
  try:
648
- ec2 = self.session.client('ec2')
649
- response = ec2.describe_nat_gateways(
650
- Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
651
- )
652
- return [nat['NatGatewayId'] for nat in response.get('NatGateways', [])]
648
+ ec2 = self.session.client("ec2")
649
+ response = ec2.describe_nat_gateways(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
650
+ return [nat["NatGatewayId"] for nat in response.get("NatGateways", [])]
653
651
  except Exception as e:
654
652
  logger.warning(f"Failed to get NAT Gateways for VPC {vpc_id}: {e}")
655
653
  return []
@@ -657,11 +655,9 @@ class NetworkingCostEngine:
657
655
  def _get_vpc_endpoints(self, vpc_id: str) -> List[Dict[str, Any]]:
658
656
  """Get VPC Endpoints for a VPC."""
659
657
  try:
660
- ec2 = self.session.client('ec2')
661
- response = ec2.describe_vpc_endpoints(
662
- Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
663
- )
664
- return response.get('VpcEndpoints', [])
658
+ ec2 = self.session.client("ec2")
659
+ response = ec2.describe_vpc_endpoints(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
660
+ return response.get("VpcEndpoints", [])
665
661
  except Exception as e:
666
662
  logger.warning(f"Failed to get VPC Endpoints for VPC {vpc_id}: {e}")
667
663
  return []
@@ -669,47 +665,41 @@ class NetworkingCostEngine:
669
665
  def _get_vpc_elastic_ips(self, vpc_id: str) -> List[Dict[str, Any]]:
670
666
  """Get Elastic IPs associated with a VPC."""
671
667
  try:
672
- ec2 = self.session.client('ec2')
668
+ ec2 = self.session.client("ec2")
673
669
  # Get instances in VPC first
674
- instances = ec2.describe_instances(
675
- Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
676
- )
677
-
670
+ instances = ec2.describe_instances(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
671
+
678
672
  instance_ids = []
679
- for reservation in instances.get('Reservations', []):
680
- for instance in reservation.get('Instances', []):
681
- instance_ids.append(instance['InstanceId'])
682
-
673
+ for reservation in instances.get("Reservations", []):
674
+ for instance in reservation.get("Instances", []):
675
+ instance_ids.append(instance["InstanceId"])
676
+
683
677
  # Get EIPs associated with these instances
684
678
  addresses = ec2.describe_addresses()
685
679
  vpc_eips = []
686
-
687
- for address in addresses.get('Addresses', []):
688
- if address.get('InstanceId') in instance_ids:
680
+
681
+ for address in addresses.get("Addresses", []):
682
+ if address.get("InstanceId") in instance_ids:
689
683
  vpc_eips.append(address)
690
-
684
+
691
685
  return vpc_eips
692
-
686
+
693
687
  except Exception as e:
694
688
  logger.warning(f"Failed to get Elastic IPs for VPC {vpc_id}: {e}")
695
689
  return []
696
690
 
697
691
  def _analyze_vpc_costs_sequential(
698
- self,
699
- vpc_ids: List[str],
700
- include_historical: bool,
701
- include_projections: bool,
702
- days_analysis: int
692
+ self, vpc_ids: List[str], include_historical: bool, include_projections: bool, days_analysis: int
703
693
  ) -> Dict[str, Any]:
704
694
  """Sequential fallback cost analysis."""
705
695
  results = {}
706
-
696
+
707
697
  for vpc_id in vpc_ids:
708
698
  results[vpc_id] = self._analyze_single_vpc_costs(
709
699
  vpc_id, include_historical, include_projections, days_analysis
710
700
  )
711
701
  self.performance_metrics.total_operations += 1
712
-
702
+
713
703
  return {
714
704
  "vpc_costs": results,
715
705
  "analysis_method": "sequential",
@@ -717,8 +707,8 @@ class NetworkingCostEngine:
717
707
  "performance_metrics": {
718
708
  "total_time": self.performance_metrics.total_time,
719
709
  "average_operation_time": self.performance_metrics.average_operation_time,
720
- "cache_hit_ratio": self.performance_metrics.get_cache_hit_ratio()
721
- }
710
+ "cache_hit_ratio": self.performance_metrics.get_cache_hit_ratio(),
711
+ },
722
712
  }
723
713
 
724
714
  def _aggregate_parallel_cost_results(self, cost_results: Dict[str, Any]) -> Dict[str, Any]:
@@ -727,20 +717,20 @@ class NetworkingCostEngine:
727
717
  total_vpcs = len(cost_results)
728
718
  failed_analyses = 0
729
719
  cost_breakdown_summary = {}
730
-
720
+
731
721
  for vpc_id, vpc_data in cost_results.items():
732
722
  if vpc_data.get("analysis_failed"):
733
723
  failed_analyses += 1
734
724
  continue
735
-
725
+
736
726
  total_cost += vpc_data.get("total_cost", 0.0)
737
-
727
+
738
728
  # Aggregate cost breakdowns
739
729
  for cost_type, cost_value in vpc_data.get("cost_breakdown", {}).items():
740
730
  if cost_type not in cost_breakdown_summary:
741
731
  cost_breakdown_summary[cost_type] = 0.0
742
732
  cost_breakdown_summary[cost_type] += cost_value
743
-
733
+
744
734
  return {
745
735
  "vpc_costs": cost_results,
746
736
  "summary": {
@@ -749,7 +739,7 @@ class NetworkingCostEngine:
749
739
  "successful_analyses": total_vpcs - failed_analyses,
750
740
  "failed_analyses": failed_analyses,
751
741
  "success_rate": (total_vpcs - failed_analyses) / max(total_vpcs, 1) * 100,
752
- "cost_breakdown_summary": cost_breakdown_summary
742
+ "cost_breakdown_summary": cost_breakdown_summary,
753
743
  },
754
744
  "analysis_method": "parallel",
755
745
  "performance_metrics": {
@@ -758,29 +748,29 @@ class NetworkingCostEngine:
758
748
  "api_calls": self.performance_metrics.api_calls,
759
749
  "cache_hit_ratio": self.performance_metrics.get_cache_hit_ratio(),
760
750
  "total_time": self.performance_metrics.total_time,
761
- "average_operation_time": self.performance_metrics.average_operation_time
762
- }
751
+ "average_operation_time": self.performance_metrics.average_operation_time,
752
+ },
763
753
  }
764
754
 
765
755
  def _get_historical_vpc_costs(self, vpc_id: str, days: int) -> Dict[str, Any]:
766
756
  """Get historical cost data for VPC (simplified implementation)."""
767
757
  # This would integrate with Cost Explorer for actual historical data
768
758
  # For now, return estimated historical trends
769
-
759
+
770
760
  return {
771
761
  "historical_period_days": days,
772
762
  "cost_trend": "stable", # Could be "increasing", "decreasing", "stable"
773
763
  "average_daily_cost": 0.0, # Would be calculated from actual data
774
764
  "peak_daily_cost": 0.0,
775
765
  "lowest_daily_cost": 0.0,
776
- "trend_analysis": "Stable cost pattern with minor fluctuations"
766
+ "trend_analysis": "Stable cost pattern with minor fluctuations",
777
767
  }
778
768
 
779
769
  def _calculate_vpc_cost_projections(self, vpc_cost_data: Dict[str, Any], days_analyzed: int) -> Dict[str, Any]:
780
770
  """Calculate cost projections based on current analysis."""
781
771
  current_total = vpc_cost_data.get("total_cost", 0.0)
782
772
  daily_average = current_total / max(days_analyzed, 1)
783
-
773
+
784
774
  return {
785
775
  "daily_projection": daily_average,
786
776
  "weekly_projection": daily_average * 7,
@@ -788,47 +778,53 @@ class NetworkingCostEngine:
788
778
  "quarterly_projection": daily_average * 90,
789
779
  "annual_projection": daily_average * 365,
790
780
  "projection_confidence": "medium", # Would be based on historical variance
791
- "projection_basis": f"Based on {days_analyzed} days analysis"
781
+ "projection_basis": f"Based on {days_analyzed} days analysis",
792
782
  }
793
783
 
794
784
  def _identify_vpc_optimization_opportunities(self, vpc_cost_data: Dict[str, Any]) -> List[Dict[str, Any]]:
795
785
  """Identify cost optimization opportunities for VPC."""
796
786
  opportunities = []
797
787
  cost_breakdown = vpc_cost_data.get("cost_breakdown", {})
798
-
788
+
799
789
  # NAT Gateway optimization
800
790
  nat_cost = cost_breakdown.get("nat_gateways", 0.0)
801
791
  if nat_cost > 100: # Monthly threshold
802
- opportunities.append({
803
- "type": "nat_gateway_optimization",
804
- "description": "Consider VPC Endpoints or NAT Instances for high NAT Gateway costs",
805
- "potential_savings": nat_cost * 0.3, # 30% potential savings
806
- "implementation_effort": "medium",
807
- "risk_level": "low"
808
- })
809
-
792
+ opportunities.append(
793
+ {
794
+ "type": "nat_gateway_optimization",
795
+ "description": "Consider VPC Endpoints or NAT Instances for high NAT Gateway costs",
796
+ "potential_savings": nat_cost * 0.3, # 30% potential savings
797
+ "implementation_effort": "medium",
798
+ "risk_level": "low",
799
+ }
800
+ )
801
+
810
802
  # VPC Endpoint optimization
811
803
  endpoint_cost = cost_breakdown.get("vpc_endpoints", 0.0)
812
804
  if endpoint_cost > 50:
813
- opportunities.append({
814
- "type": "vpc_endpoint_optimization",
815
- "description": "Review VPC Endpoint usage and consider Gateway endpoints where possible",
816
- "potential_savings": endpoint_cost * 0.2, # 20% potential savings
817
- "implementation_effort": "low",
818
- "risk_level": "low"
819
- })
820
-
805
+ opportunities.append(
806
+ {
807
+ "type": "vpc_endpoint_optimization",
808
+ "description": "Review VPC Endpoint usage and consider Gateway endpoints where possible",
809
+ "potential_savings": endpoint_cost * 0.2, # 20% potential savings
810
+ "implementation_effort": "low",
811
+ "risk_level": "low",
812
+ }
813
+ )
814
+
821
815
  # Elastic IP optimization
822
816
  eip_cost = cost_breakdown.get("elastic_ips", 0.0)
823
817
  if eip_cost > 20:
824
- opportunities.append({
825
- "type": "elastic_ip_optimization",
826
- "description": "Review idle Elastic IPs and consider release or attachment",
827
- "potential_savings": eip_cost * 0.8, # High savings potential for idle EIPs
828
- "implementation_effort": "low",
829
- "risk_level": "medium"
830
- })
831
-
818
+ opportunities.append(
819
+ {
820
+ "type": "elastic_ip_optimization",
821
+ "description": "Review idle Elastic IPs and consider release or attachment",
822
+ "potential_savings": eip_cost * 0.8, # High savings potential for idle EIPs
823
+ "implementation_effort": "low",
824
+ "risk_level": "medium",
825
+ }
826
+ )
827
+
832
828
  return opportunities
833
829
 
834
830
  def get_cost_engine_performance_metrics(self) -> Dict[str, Any]:
@@ -839,7 +835,7 @@ class NetworkingCostEngine:
839
835
  "parallel_processing_enabled": self.enable_parallel,
840
836
  "max_workers": self.max_workers,
841
837
  "caching_enabled": self.enable_caching,
842
- "cache_ttl_seconds": self.cost_cache.cache_ttl if self.cost_cache else 0
838
+ "cache_ttl_seconds": self.cost_cache.cache_ttl if self.cost_cache else 0,
843
839
  },
844
840
  "operation_metrics": {
845
841
  "total_operations": self.performance_metrics.total_operations,
@@ -848,23 +844,290 @@ class NetworkingCostEngine:
848
844
  "api_calls": self.performance_metrics.api_calls,
849
845
  "cache_hit_ratio": self.performance_metrics.get_cache_hit_ratio(),
850
846
  "total_time": self.performance_metrics.total_time,
851
- "average_operation_time": self.performance_metrics.average_operation_time
847
+ "average_operation_time": self.performance_metrics.average_operation_time,
852
848
  },
853
849
  "cache_health": {
854
850
  "cache_size": len(self.cost_cache.cost_data) if self.cost_cache else 0,
855
- "valid_entries": sum(
856
- 1 for key in self.cost_cache.cost_data.keys()
857
- if self.cost_cache.is_valid(key)
858
- ) if self.cost_cache else 0,
859
- "cache_efficiency": "healthy" if self.performance_metrics.get_cache_hit_ratio() > 0.2 else "low"
851
+ "valid_entries": sum(1 for key in self.cost_cache.cost_data.keys() if self.cost_cache.is_valid(key))
852
+ if self.cost_cache
853
+ else 0,
854
+ "cache_efficiency": "healthy" if self.performance_metrics.get_cache_hit_ratio() > 0.2 else "low",
860
855
  },
861
856
  "thread_pool_health": {
862
857
  "executor_available": self.executor is not None,
863
858
  "max_workers": self.max_workers if self.executor else 0,
864
- "parallel_efficiency": min(100, (self.performance_metrics.parallel_operations / max(self.max_workers, 1)) * 100) if self.enable_parallel else 0
859
+ "parallel_efficiency": min(
860
+ 100, (self.performance_metrics.parallel_operations / max(self.max_workers, 1)) * 100
861
+ )
862
+ if self.enable_parallel
863
+ else 0,
864
+ },
865
+ }
866
+
867
+ def analyze_networking_costs(
868
+ self,
869
+ vpc_ids: Optional[List[str]] = None,
870
+ include_recommendations: bool = True,
871
+ target_savings_percentage: float = 0.30,
872
+ ) -> Dict[str, Any]:
873
+ """
874
+ Comprehensive networking cost analysis for AWS-25 VPC cleanup requirements.
875
+
876
+ Args:
877
+ vpc_ids: List of VPC IDs to analyze (if None, analyzes all accessible VPCs)
878
+ include_recommendations: Include optimization recommendations
879
+ target_savings_percentage: Target savings percentage for recommendations
880
+
881
+ Returns:
882
+ Comprehensive networking cost analysis with optimization opportunities
883
+ """
884
+ start_time = time.time()
885
+
886
+ try:
887
+ # If no VPC IDs provided, return empty analysis with recommendations
888
+ if not vpc_ids:
889
+ logger.info("No VPCs provided for analysis - generating default cost framework")
890
+ return self._generate_default_networking_analysis(target_savings_percentage)
891
+
892
+ # Analyze costs for provided VPCs
893
+ vpc_cost_analysis = self.analyze_vpc_costs_parallel(
894
+ vpc_ids=vpc_ids, include_historical=True, include_projections=True, days_analysis=30
895
+ )
896
+
897
+ # Extract cost summary
898
+ total_monthly_cost = 0.0
899
+ cost_breakdown = {}
900
+
901
+ if "summary" in vpc_cost_analysis:
902
+ summary = vpc_cost_analysis["summary"]
903
+ total_monthly_cost = summary.get("total_cost", 0.0)
904
+ cost_breakdown = summary.get("cost_breakdown_summary", {})
905
+
906
+ # Generate optimization scenarios
907
+ optimization_scenarios = self._generate_optimization_scenarios(
908
+ total_monthly_cost, target_savings_percentage
909
+ )
910
+
911
+ # Calculate savings estimates
912
+ savings_analysis = self.estimate_optimization_savings(
913
+ current_costs=cost_breakdown or {"networking": total_monthly_cost},
914
+ optimization_scenarios=optimization_scenarios,
915
+ )
916
+
917
+ analysis_time = time.time() - start_time
918
+
919
+ networking_analysis = {
920
+ "analysis_timestamp": datetime.now().isoformat(),
921
+ "analysis_duration_seconds": analysis_time,
922
+ "vpc_analysis": vpc_cost_analysis,
923
+ "total_monthly_cost": total_monthly_cost,
924
+ "annual_cost_projection": total_monthly_cost * 12,
925
+ "cost_breakdown": cost_breakdown,
926
+ "optimization_scenarios": optimization_scenarios,
927
+ "savings_analysis": savings_analysis,
928
+ "target_savings_percentage": target_savings_percentage,
929
+ "performance_metrics": self.get_cost_engine_performance_metrics(),
930
+ }
931
+
932
+ if include_recommendations:
933
+ networking_analysis["recommendations"] = self._generate_networking_recommendations(
934
+ networking_analysis, target_savings_percentage
935
+ )
936
+
937
+ logger.info(f"Networking cost analysis completed in {analysis_time:.2f}s")
938
+ return networking_analysis
939
+
940
+ except Exception as e:
941
+ logger.error(f"Networking cost analysis failed: {e}")
942
+ return {
943
+ "error": str(e),
944
+ "analysis_timestamp": datetime.now().isoformat(),
945
+ "analysis_failed": True,
946
+ "fallback_analysis": self._generate_default_networking_analysis(target_savings_percentage),
865
947
  }
948
+
949
+ def _generate_default_networking_analysis(self, target_savings_percentage: float) -> Dict[str, Any]:
950
+ """Generate default networking analysis when no VPCs are found."""
951
+ return {
952
+ "analysis_timestamp": datetime.now().isoformat(),
953
+ "total_monthly_cost": 0.0,
954
+ "annual_cost_projection": 0.0,
955
+ "cost_breakdown": {"nat_gateways": 0.0, "vpc_endpoints": 0.0, "elastic_ips": 0.0, "data_transfer": 0.0},
956
+ "vpc_count": 0,
957
+ "target_savings_percentage": target_savings_percentage,
958
+ "optimization_opportunities": [
959
+ {
960
+ "type": "infrastructure_assessment",
961
+ "description": "No VPCs found for analysis - review VPC discovery and permissions",
962
+ "potential_savings": 0.0,
963
+ "implementation_effort": "low",
964
+ "risk_level": "low",
965
+ }
966
+ ],
967
+ "recommendations": [
968
+ "Verify AWS profile permissions for VPC discovery",
969
+ "Check regional VPC distribution",
970
+ "Review multi-account VPC architecture",
971
+ ],
866
972
  }
867
973
 
974
+ def _generate_optimization_scenarios(
975
+ self, current_monthly_cost: float, target_percentage: float
976
+ ) -> List[Dict[str, Any]]:
977
+ """Generate optimization scenarios for networking costs."""
978
+ base_target = current_monthly_cost * target_percentage
979
+
980
+ return [
981
+ {
982
+ "name": "Conservative Optimization",
983
+ "description": "Low-risk optimizations with immediate implementation",
984
+ "reductions": {
985
+ "nat_gateways": 15, # NAT Gateway → VPC Endpoints
986
+ "elastic_ips": 50, # Release idle EIPs
987
+ "data_transfer": 10, # Optimize data transfer patterns
988
+ },
989
+ "risk_level": "low",
990
+ "effort": "low",
991
+ "timeline_days": 7,
992
+ },
993
+ {
994
+ "name": "Moderate Optimization",
995
+ "description": "Medium-risk optimizations with architectural changes",
996
+ "reductions": {
997
+ "nat_gateways": 30, # Consolidated NAT Gateways
998
+ "vpc_endpoints": 20, # Interface → Gateway endpoints
999
+ "elastic_ips": 80, # Comprehensive EIP audit
1000
+ "data_transfer": 25, # Regional optimization
1001
+ },
1002
+ "risk_level": "medium",
1003
+ "effort": "medium",
1004
+ "timeline_days": 21,
1005
+ },
1006
+ {
1007
+ "name": "Aggressive Optimization",
1008
+ "description": "High-impact optimizations requiring significant changes",
1009
+ "reductions": {
1010
+ "nat_gateways": 50, # Major architectural refactoring
1011
+ "vpc_endpoints": 40, # Complete endpoint strategy review
1012
+ "elastic_ips": 90, # Full EIP elimination where possible
1013
+ "data_transfer": 40, # Cross-AZ and inter-region optimization
1014
+ },
1015
+ "risk_level": "high",
1016
+ "effort": "high",
1017
+ "timeline_days": 60,
1018
+ },
1019
+ ]
1020
+
1021
+ def _generate_networking_recommendations(
1022
+ self, analysis: Dict[str, Any], target_percentage: float
1023
+ ) -> List[Dict[str, Any]]:
1024
+ """Generate actionable networking cost optimization recommendations."""
1025
+ recommendations = []
1026
+
1027
+ total_cost = analysis.get("total_monthly_cost", 0.0)
1028
+ cost_breakdown = analysis.get("cost_breakdown", {})
1029
+
1030
+ # NAT Gateway recommendations
1031
+ nat_cost = cost_breakdown.get("nat_gateways", 0.0)
1032
+ if nat_cost > 50: # Monthly threshold
1033
+ recommendations.append(
1034
+ {
1035
+ "type": "nat_gateway_optimization",
1036
+ "priority": "high",
1037
+ "title": "NAT Gateway Cost Optimization",
1038
+ "description": f"NAT Gateways costing ${nat_cost:.2f}/month - consider VPC Endpoints",
1039
+ "potential_monthly_savings": nat_cost * 0.3,
1040
+ "implementation_steps": [
1041
+ "Identify services using NAT Gateway for AWS API access",
1042
+ "Implement VPC Endpoints for S3, DynamoDB, and other AWS services",
1043
+ "Consolidate NAT Gateways across availability zones where possible",
1044
+ "Monitor data transfer patterns for optimization opportunities",
1045
+ ],
1046
+ "risk_assessment": "Low risk with proper testing and staged rollout",
1047
+ }
1048
+ )
1049
+
1050
+ # VPC Endpoint recommendations
1051
+ endpoint_cost = cost_breakdown.get("vpc_endpoints", 0.0)
1052
+ if endpoint_cost > 30:
1053
+ recommendations.append(
1054
+ {
1055
+ "type": "vpc_endpoint_optimization",
1056
+ "priority": "medium",
1057
+ "title": "VPC Endpoint Optimization",
1058
+ "description": f"VPC Endpoints costing ${endpoint_cost:.2f}/month - review usage patterns",
1059
+ "potential_monthly_savings": endpoint_cost * 0.2,
1060
+ "implementation_steps": [
1061
+ "Audit VPC Endpoint usage patterns",
1062
+ "Convert Interface endpoints to Gateway endpoints where supported",
1063
+ "Remove unused VPC Endpoints",
1064
+ "Consolidate endpoints across multiple VPCs",
1065
+ ],
1066
+ "risk_assessment": "Low risk - no service disruption expected",
1067
+ }
1068
+ )
1069
+
1070
+ # Elastic IP recommendations
1071
+ eip_cost = cost_breakdown.get("elastic_ips", 0.0)
1072
+ if eip_cost > 10:
1073
+ recommendations.append(
1074
+ {
1075
+ "type": "elastic_ip_optimization",
1076
+ "priority": "high",
1077
+ "title": "Elastic IP Cost Reduction",
1078
+ "description": f"Elastic IPs costing ${eip_cost:.2f}/month - high savings potential",
1079
+ "potential_monthly_savings": eip_cost * 0.8,
1080
+ "implementation_steps": [
1081
+ "Identify idle/unattached Elastic IPs",
1082
+ "Review EIP requirements for each application",
1083
+ "Implement Application Load Balancer for public endpoints where appropriate",
1084
+ "Release unused Elastic IPs immediately",
1085
+ ],
1086
+ "risk_assessment": "Medium risk - verify connectivity requirements before release",
1087
+ }
1088
+ )
1089
+
1090
+ # Data transfer recommendations
1091
+ transfer_cost = cost_breakdown.get("data_transfer", 0.0)
1092
+ if transfer_cost > 25:
1093
+ recommendations.append(
1094
+ {
1095
+ "type": "data_transfer_optimization",
1096
+ "priority": "medium",
1097
+ "title": "Data Transfer Cost Optimization",
1098
+ "description": f"Data transfer costing ${transfer_cost:.2f}/month - optimize patterns",
1099
+ "potential_monthly_savings": transfer_cost * 0.25,
1100
+ "implementation_steps": [
1101
+ "Analyze inter-AZ data transfer patterns",
1102
+ "Optimize application architecture for regional efficiency",
1103
+ "Implement CloudFront for frequently accessed content",
1104
+ "Review cross-region replication requirements",
1105
+ ],
1106
+ "risk_assessment": "Low risk with proper testing of application performance",
1107
+ }
1108
+ )
1109
+
1110
+ # Add overall assessment
1111
+ if total_cost == 0:
1112
+ recommendations.append(
1113
+ {
1114
+ "type": "infrastructure_discovery",
1115
+ "priority": "high",
1116
+ "title": "Infrastructure Discovery Required",
1117
+ "description": "No networking costs detected - verify VPC discovery and analysis scope",
1118
+ "potential_monthly_savings": 0.0,
1119
+ "implementation_steps": [
1120
+ "Verify AWS profile permissions for VPC and networking resource access",
1121
+ "Check multi-region VPC distribution",
1122
+ "Review organizational unit and account structure",
1123
+ "Validate Cost Explorer API access for historical data",
1124
+ ],
1125
+ "risk_assessment": "No risk - discovery and validation activities only",
1126
+ }
1127
+ )
1128
+
1129
+ return recommendations
1130
+
868
1131
  def __del__(self):
869
1132
  """Cleanup resources when cost engine is destroyed."""
870
1133
  if self.executor: