runbooks 0.9.9__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/WEIGHT_CONFIG_README.md +368 -0
  3. runbooks/cfat/app.ts +27 -19
  4. runbooks/cfat/assessment/runner.py +6 -5
  5. runbooks/cfat/cloud_foundations_assessment.py +626 -0
  6. runbooks/cfat/tests/test_weight_configuration.ts +449 -0
  7. runbooks/cfat/weight_config.ts +574 -0
  8. runbooks/cloudops/cost_optimizer.py +95 -33
  9. runbooks/common/__init__.py +26 -9
  10. runbooks/common/aws_pricing.py +1353 -0
  11. runbooks/common/aws_pricing_api.py +205 -0
  12. runbooks/common/aws_utils.py +2 -2
  13. runbooks/common/comprehensive_cost_explorer_integration.py +979 -0
  14. runbooks/common/cross_account_manager.py +606 -0
  15. runbooks/common/date_utils.py +115 -0
  16. runbooks/common/enhanced_exception_handler.py +14 -7
  17. runbooks/common/env_utils.py +96 -0
  18. runbooks/common/mcp_cost_explorer_integration.py +5 -4
  19. runbooks/common/mcp_integration.py +49 -2
  20. runbooks/common/organizations_client.py +579 -0
  21. runbooks/common/profile_utils.py +127 -72
  22. runbooks/common/rich_utils.py +3 -3
  23. runbooks/finops/cost_optimizer.py +2 -1
  24. runbooks/finops/dashboard_runner.py +47 -28
  25. runbooks/finops/ebs_optimizer.py +56 -9
  26. runbooks/finops/elastic_ip_optimizer.py +13 -9
  27. runbooks/finops/embedded_mcp_validator.py +31 -0
  28. runbooks/finops/enhanced_trend_visualization.py +10 -4
  29. runbooks/finops/finops_dashboard.py +6 -5
  30. runbooks/finops/iam_guidance.py +6 -1
  31. runbooks/finops/markdown_exporter.py +217 -2
  32. runbooks/finops/nat_gateway_optimizer.py +76 -20
  33. runbooks/finops/tests/test_integration.py +3 -1
  34. runbooks/finops/vpc_cleanup_exporter.py +28 -26
  35. runbooks/finops/vpc_cleanup_optimizer.py +363 -16
  36. runbooks/inventory/__init__.py +10 -1
  37. runbooks/inventory/cloud_foundations_integration.py +409 -0
  38. runbooks/inventory/core/collector.py +1177 -94
  39. runbooks/inventory/discovery.md +339 -0
  40. runbooks/inventory/drift_detection_cli.py +327 -0
  41. runbooks/inventory/inventory_mcp_cli.py +171 -0
  42. runbooks/inventory/inventory_modules.py +6 -9
  43. runbooks/inventory/list_ec2_instances.py +3 -3
  44. runbooks/inventory/mcp_inventory_validator.py +2149 -0
  45. runbooks/inventory/mcp_vpc_validator.py +23 -6
  46. runbooks/inventory/organizations_discovery.py +104 -9
  47. runbooks/inventory/rich_inventory_display.py +129 -1
  48. runbooks/inventory/unified_validation_engine.py +1279 -0
  49. runbooks/inventory/verify_ec2_security_groups.py +3 -1
  50. runbooks/inventory/vpc_analyzer.py +825 -7
  51. runbooks/inventory/vpc_flow_analyzer.py +36 -42
  52. runbooks/main.py +708 -47
  53. runbooks/monitoring/performance_monitor.py +11 -7
  54. runbooks/operate/base.py +9 -6
  55. runbooks/operate/deployment_framework.py +5 -4
  56. runbooks/operate/deployment_validator.py +6 -5
  57. runbooks/operate/dynamodb_operations.py +6 -5
  58. runbooks/operate/ec2_operations.py +3 -2
  59. runbooks/operate/mcp_integration.py +6 -5
  60. runbooks/operate/networking_cost_heatmap.py +21 -16
  61. runbooks/operate/s3_operations.py +13 -12
  62. runbooks/operate/vpc_operations.py +100 -12
  63. runbooks/remediation/base.py +4 -2
  64. runbooks/remediation/commons.py +5 -5
  65. runbooks/remediation/commvault_ec2_analysis.py +68 -15
  66. runbooks/remediation/config/accounts_example.json +31 -0
  67. runbooks/remediation/ec2_unattached_ebs_volumes.py +6 -3
  68. runbooks/remediation/multi_account.py +120 -7
  69. runbooks/remediation/rds_snapshot_list.py +5 -3
  70. runbooks/remediation/remediation_cli.py +710 -0
  71. runbooks/remediation/universal_account_discovery.py +377 -0
  72. runbooks/security/compliance_automation_engine.py +99 -20
  73. runbooks/security/config/__init__.py +24 -0
  74. runbooks/security/config/compliance_config.py +255 -0
  75. runbooks/security/config/compliance_weights_example.json +22 -0
  76. runbooks/security/config_template_generator.py +500 -0
  77. runbooks/security/security_cli.py +377 -0
  78. runbooks/validation/__init__.py +21 -1
  79. runbooks/validation/cli.py +8 -7
  80. runbooks/validation/comprehensive_2way_validator.py +2007 -0
  81. runbooks/validation/mcp_validator.py +965 -101
  82. runbooks/validation/terraform_citations_validator.py +363 -0
  83. runbooks/validation/terraform_drift_detector.py +1098 -0
  84. runbooks/vpc/cleanup_wrapper.py +231 -10
  85. runbooks/vpc/config.py +346 -73
  86. runbooks/vpc/cross_account_session.py +312 -0
  87. runbooks/vpc/heatmap_engine.py +115 -41
  88. runbooks/vpc/manager_interface.py +9 -9
  89. runbooks/vpc/mcp_no_eni_validator.py +1630 -0
  90. runbooks/vpc/networking_wrapper.py +14 -8
  91. runbooks/vpc/runbooks_adapter.py +33 -12
  92. runbooks/vpc/tests/conftest.py +4 -2
  93. runbooks/vpc/tests/test_cost_engine.py +4 -2
  94. runbooks/vpc/unified_scenarios.py +73 -3
  95. runbooks/vpc/vpc_cleanup_integration.py +512 -78
  96. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/METADATA +94 -52
  97. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/RECORD +101 -81
  98. runbooks/finops/runbooks.inventory.organizations_discovery.log +0 -0
  99. runbooks/finops/runbooks.security.report_generator.log +0 -0
  100. runbooks/finops/runbooks.security.run_script.log +0 -0
  101. runbooks/finops/runbooks.security.security_export.log +0 -0
  102. runbooks/finops/tests/results_test_finops_dashboard.xml +0 -1
  103. runbooks/inventory/artifacts/scale-optimize-status.txt +0 -12
  104. runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
  105. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  106. runbooks/inventory/runbooks.security.run_script.log +0 -0
  107. runbooks/inventory/runbooks.security.security_export.log +0 -0
  108. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/WHEEL +0 -0
  109. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.9.9.dist-info → runbooks-1.0.1.dist-info}/top_level.txt +0 -0
@@ -26,7 +26,7 @@ from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeRe
26
26
  from rich.table import Table
27
27
  from rich.tree import Tree
28
28
 
29
- from runbooks.common.profile_utils import create_operational_session
29
+ from runbooks.common.profile_utils import create_operational_session, create_cost_session, create_management_session
30
30
  from runbooks.common.performance_monitor import get_performance_benchmark
31
31
  from runbooks.common.enhanced_exception_handler import create_exception_handler, ErrorContext
32
32
  from .cost_engine import NetworkingCostEngine
@@ -251,10 +251,13 @@ class VPCCleanupFramework:
251
251
  console=console
252
252
  )
253
253
 
254
- # Initialize cost engine for financial impact analysis
255
- self.cost_engine = NetworkingCostEngine(
256
- session=self.session
257
- ) if self.session else None
254
+ # Initialize cost engine for financial impact analysis with billing session
255
+ try:
256
+ billing_session = create_cost_session(profile=profile)
257
+ self.cost_engine = NetworkingCostEngine(session=billing_session)
258
+ except Exception as e:
259
+ self.console.log(f"[yellow]Warning: Cost analysis unavailable - {e}[/]")
260
+ self.cost_engine = None
258
261
 
259
262
  # Results storage
260
263
  self.cleanup_candidates: List[VPCCleanupCandidate] = []
@@ -320,13 +323,22 @@ class VPCCleanupFramework:
320
323
  )
321
324
 
322
325
  # Enhanced performance target validation
323
- self._validate_performance_targets(metrics)
326
+ try:
327
+ self._validate_performance_targets(metrics)
328
+ except Exception as e:
329
+ logger.error(f"Error in performance validation: {e}")
324
330
 
325
331
  # Display comprehensive performance summary
326
- self._display_enhanced_performance_summary()
332
+ try:
333
+ self._display_enhanced_performance_summary()
334
+ except Exception as e:
335
+ logger.error(f"Error in performance summary display: {e}")
327
336
 
328
337
  # Log DORA metrics for compliance
329
- self._log_dora_metrics(start_time, len(candidates), True)
338
+ try:
339
+ self._log_dora_metrics(start_time, len(candidates), True)
340
+ except Exception as e:
341
+ logger.error(f"Error in DORA metrics logging: {e}")
330
342
 
331
343
  return candidates
332
344
 
@@ -525,7 +537,16 @@ class VPCCleanupFramework:
525
537
 
526
538
  except Exception as e:
527
539
  circuit_breaker.record_failure()
528
- logger.error(f"VPC analysis failed for {vpc_id}: {e}")
540
+ # Add detailed debugging for format string errors
541
+ import traceback
542
+ if "unsupported format string passed to NoneType.__format__" in str(e):
543
+ logger.error(f"FORMAT STRING ERROR in VPC analysis for {vpc_id}:")
544
+ logger.error(f"Exception type: {type(e)}")
545
+ logger.error(f"Exception message: {e}")
546
+ logger.error("Full traceback:")
547
+ logger.error(traceback.format_exc())
548
+ else:
549
+ logger.error(f"VPC analysis failed for {vpc_id}: {e}")
529
550
  raise
530
551
 
531
552
  def _analyze_vpc_dependencies_optimized(self, candidate: VPCCleanupCandidate, ec2_client) -> None:
@@ -545,34 +566,53 @@ class VPCCleanupFramework:
545
566
  dependency_start_time = time.time()
546
567
 
547
568
  try:
548
- # Batch dependency analysis operations for better performance
569
+ # Batch dependency analysis operations with enhanced error handling
549
570
  if self.enable_parallel_processing and self.executor:
550
- # Parallel dependency analysis
551
- dependency_futures = {
552
- 'nat_gateways': self.executor.submit(self._analyze_nat_gateways, vpc_id, ec2_client),
553
- 'vpc_endpoints': self.executor.submit(self._analyze_vpc_endpoints, vpc_id, ec2_client),
554
- 'route_tables': self.executor.submit(self._analyze_route_tables, vpc_id, ec2_client),
555
- 'security_groups': self.executor.submit(self._analyze_security_groups, vpc_id, ec2_client),
556
- 'network_acls': self.executor.submit(self._analyze_network_acls, vpc_id, ec2_client),
557
- 'vpc_peering': self.executor.submit(self._analyze_vpc_peering, vpc_id, ec2_client),
558
- 'tgw_attachments': self.executor.submit(self._analyze_transit_gateway_attachments, vpc_id, ec2_client),
559
- 'internet_gateways': self.executor.submit(self._analyze_internet_gateways, vpc_id, ec2_client),
560
- 'vpn_gateways': self.executor.submit(self._analyze_vpn_gateways, vpc_id, ec2_client),
561
- 'elastic_ips': self.executor.submit(self._analyze_elastic_ips, vpc_id, ec2_client),
562
- 'load_balancers': self.executor.submit(self._analyze_load_balancers, vpc_id, ec2_client),
563
- 'network_interfaces': self.executor.submit(self._analyze_network_interfaces, vpc_id, ec2_client),
564
- 'rds_subnet_groups': self.executor.submit(self._analyze_rds_subnet_groups, vpc_id),
565
- 'elasticache_subnet_groups': self.executor.submit(self._analyze_elasticache_subnet_groups, vpc_id),
566
- }
571
+ dependency_futures = {}
567
572
 
568
- # Collect results
569
- for dep_type, future in dependency_futures.items():
570
- try:
571
- deps = future.result(timeout=30) # 30 second timeout per dependency type
572
- dependencies.extend(deps)
573
- except Exception as e:
574
- logger.warning(f"Failed to analyze {dep_type} for VPC {vpc_id}: {e}")
575
- self.performance_metrics.error_count += 1
573
+ try:
574
+ # Check executor state before submitting tasks
575
+ if self.executor._shutdown:
576
+ logger.warning("Executor is shutdown, falling back to sequential processing")
577
+ raise Exception("Executor unavailable")
578
+
579
+ # Parallel dependency analysis with enhanced error handling
580
+ dependency_futures = {
581
+ 'nat_gateways': self.executor.submit(self._analyze_nat_gateways, vpc_id, ec2_client),
582
+ 'vpc_endpoints': self.executor.submit(self._analyze_vpc_endpoints, vpc_id, ec2_client),
583
+ 'route_tables': self.executor.submit(self._analyze_route_tables, vpc_id, ec2_client),
584
+ 'security_groups': self.executor.submit(self._analyze_security_groups, vpc_id, ec2_client),
585
+ 'network_acls': self.executor.submit(self._analyze_network_acls, vpc_id, ec2_client),
586
+ 'vpc_peering': self.executor.submit(self._analyze_vpc_peering, vpc_id, ec2_client),
587
+ 'tgw_attachments': self.executor.submit(self._analyze_transit_gateway_attachments, vpc_id, ec2_client),
588
+ 'internet_gateways': self.executor.submit(self._analyze_internet_gateways, vpc_id, ec2_client),
589
+ 'vpn_gateways': self.executor.submit(self._analyze_vpn_gateways, vpc_id, ec2_client),
590
+ 'elastic_ips': self.executor.submit(self._analyze_elastic_ips, vpc_id, ec2_client),
591
+ 'load_balancers': self.executor.submit(self._analyze_load_balancers, vpc_id, ec2_client),
592
+ 'network_interfaces': self.executor.submit(self._analyze_network_interfaces, vpc_id, ec2_client),
593
+ 'rds_subnet_groups': self.executor.submit(self._analyze_rds_subnet_groups, vpc_id),
594
+ 'elasticache_subnet_groups': self.executor.submit(self._analyze_elasticache_subnet_groups, vpc_id),
595
+ }
596
+
597
+ # Collect results with enhanced timeout and error handling
598
+ for dep_type, future in dependency_futures.items():
599
+ try:
600
+ deps = future.result(timeout=30) # 30 second timeout per dependency type
601
+ if deps: # Only extend if not None
602
+ dependencies.extend(deps)
603
+ except concurrent.futures.TimeoutError:
604
+ logger.warning(f"Timeout analyzing {dep_type} for VPC {vpc_id} (>30s)")
605
+ self.performance_metrics.error_count += 1
606
+ except AttributeError as e:
607
+ logger.error(f"Executor attribute error for {dep_type} in VPC {vpc_id}: {e}")
608
+ self.performance_metrics.error_count += 1
609
+ except Exception as e:
610
+ logger.warning(f"Failed to analyze {dep_type} for VPC {vpc_id}: {e}")
611
+ self.performance_metrics.error_count += 1
612
+
613
+ except Exception as executor_error:
614
+ logger.error(f"Executor initialization/submission failed: {executor_error}")
615
+ # Fall through to sequential processing
576
616
 
577
617
  else:
578
618
  # Sequential analysis (fallback)
@@ -673,10 +713,26 @@ class VPCCleanupFramework:
673
713
 
674
714
  self.console.print(f"[cyan]🌐 Multi-account analysis across {len(account_profiles)} accounts[/cyan]")
675
715
 
676
- for profile in account_profiles:
716
+ for account_item in account_profiles:
677
717
  try:
678
- # Create session for this account
679
- account_session = create_operational_session(profile=profile)
718
+ # Handle both AccountSession objects and profile strings for backward compatibility
719
+ if hasattr(account_item, 'session') and hasattr(account_item, 'account_id'):
720
+ # New AccountSession object from cross-account session manager
721
+ account_session = account_item.session
722
+ account_id = account_item.account_id
723
+ account_name = getattr(account_item, 'account_name', account_id)
724
+ profile_display = f"{account_name} ({account_id})"
725
+ else:
726
+ # Legacy profile string - use old method for backward compatibility
727
+ profile = account_item
728
+ try:
729
+ from runbooks.finops.aws_client import get_cached_session
730
+ account_session = get_cached_session(profile)
731
+ except ImportError:
732
+ # Extract profile name from Organizations API format (profile@accountId)
733
+ actual_profile = profile.split("@")[0] if "@" in profile else profile
734
+ account_session = create_operational_session(profile=actual_profile)
735
+ profile_display = profile
680
736
 
681
737
  # Temporarily update session for analysis
682
738
  original_session = self.session
@@ -1276,7 +1332,7 @@ class VPCCleanupFramework:
1276
1332
  if dep.resource_type == 'NatGateway':
1277
1333
  # Base NAT Gateway cost
1278
1334
  monthly_cost += 45.0 # $0.05/hour * 24 * 30
1279
- elif dep.resource_type == 'VpcEndpoint' and 'Interface' in dep.description:
1335
+ elif dep.resource_type == 'VpcEndpoint' and 'Interface' in (dep.description or ''):
1280
1336
  # Interface endpoint cost (estimated 1 AZ)
1281
1337
  monthly_cost += 10.0
1282
1338
  elif dep.resource_type == 'LoadBalancer':
@@ -1325,10 +1381,10 @@ class VPCCleanupFramework:
1325
1381
  for candidate in candidates:
1326
1382
  phases[candidate.cleanup_phase].append(candidate)
1327
1383
 
1328
- # Calculate totals
1384
+ # Calculate totals with None-safe calculations
1329
1385
  total_vpcs = len(candidates)
1330
- total_cost_savings = sum(candidate.annual_savings for candidate in candidates)
1331
- total_blocking_deps = sum(candidate.blocking_dependencies for candidate in candidates)
1386
+ total_cost_savings = sum((candidate.annual_savings or 0.0) for candidate in candidates)
1387
+ total_blocking_deps = sum((candidate.blocking_dependencies or 0) for candidate in candidates)
1332
1388
 
1333
1389
  # Enhanced Three-Bucket Logic Implementation
1334
1390
  three_bucket_classification = self._apply_three_bucket_logic(candidates)
@@ -1362,7 +1418,7 @@ class VPCCleanupFramework:
1362
1418
  cleanup_plan['phases'][phase.value] = {
1363
1419
  'candidate_count': len(phase_candidates),
1364
1420
  'candidates': [self._serialize_candidate(c) for c in phase_candidates],
1365
- 'total_savings': sum(c.annual_savings for c in phase_candidates),
1421
+ 'total_savings': sum((c.annual_savings or 0.0) for c in phase_candidates),
1366
1422
  'average_timeline': self._calculate_average_timeline(phase_candidates),
1367
1423
  'risk_distribution': self._analyze_risk_distribution(phase_candidates)
1368
1424
  }
@@ -1466,21 +1522,21 @@ class VPCCleanupFramework:
1466
1522
  'count': len(bucket_1_safe),
1467
1523
  'percentage': round(safe_percentage, 1),
1468
1524
  'vpc_ids': [c.vpc_id for c in bucket_1_safe],
1469
- 'total_savings': sum(c.annual_savings for c in bucket_1_safe),
1525
+ 'total_savings': sum((c.annual_savings or 0.0) for c in bucket_1_safe),
1470
1526
  'criteria': 'Zero ENIs, no dependencies, no IaC (default/non-default both allowed)'
1471
1527
  },
1472
1528
  'bucket_2_analysis_required': {
1473
1529
  'count': len(bucket_2_analysis),
1474
1530
  'percentage': round(analysis_percentage, 1),
1475
1531
  'vpc_ids': [c.vpc_id for c in bucket_2_analysis],
1476
- 'total_savings': sum(c.annual_savings for c in bucket_2_analysis),
1532
+ 'total_savings': sum((c.annual_savings or 0.0) for c in bucket_2_analysis),
1477
1533
  'criteria': 'Limited dependencies, low-medium risk, analysis needed'
1478
1534
  },
1479
1535
  'bucket_3_complex_approval': {
1480
1536
  'count': len(bucket_3_complex),
1481
1537
  'percentage': round(complex_percentage, 1),
1482
1538
  'vpc_ids': [c.vpc_id for c in bucket_3_complex],
1483
- 'total_savings': sum(c.annual_savings for c in bucket_3_complex),
1539
+ 'total_savings': sum((c.annual_savings or 0.0) for c in bucket_3_complex),
1484
1540
  'criteria': 'Multiple dependencies, IaC managed, or high risk'
1485
1541
  },
1486
1542
  'safety_gates': {
@@ -1528,7 +1584,7 @@ class VPCCleanupFramework:
1528
1584
  'name': phase.value,
1529
1585
  'duration': self._calculate_average_timeline(candidates),
1530
1586
  'vpc_count': len(candidates),
1531
- 'savings_potential': sum(c.annual_savings for c in candidates),
1587
+ 'savings_potential': sum((c.annual_savings or 0.0) for c in candidates),
1532
1588
  'key_activities': self._get_phase_activities(phase),
1533
1589
  'success_criteria': self._get_phase_success_criteria(phase),
1534
1590
  'stakeholders': self._get_phase_stakeholders(phase)
@@ -1543,7 +1599,7 @@ class VPCCleanupFramework:
1543
1599
  return {
1544
1600
  'security_improvement': {
1545
1601
  'default_vpcs_eliminated': default_vpc_count,
1546
- 'attack_surface_reduction': f"{(len([c for c in candidates if c.blocking_dependencies == 0]) / len(candidates) * 100):.1f}%" if candidates else "0%",
1602
+ 'attack_surface_reduction': f"{(len([c for c in candidates if (c.blocking_dependencies or 0) == 0]) / len(candidates) * 100):.1f}%" if candidates else "0%",
1547
1603
  'compliance_benefit': 'CIS Benchmark compliance' if default_vpc_count > 0 else 'Network governance improvement'
1548
1604
  },
1549
1605
  'operational_benefits': {
@@ -1553,10 +1609,10 @@ class VPCCleanupFramework:
1553
1609
  'enhanced_incident_response': True
1554
1610
  },
1555
1611
  'financial_impact': {
1556
- 'total_annual_savings': sum(c.annual_savings for c in candidates),
1612
+ 'total_annual_savings': sum((c.annual_savings or 0.0) for c in candidates),
1557
1613
  'implementation_cost_estimate': 5000, # Conservative estimate
1558
- 'roi_percentage': ((sum(c.annual_savings for c in candidates) / 5000) * 100) if sum(c.annual_savings for c in candidates) > 0 else 0,
1559
- 'payback_period_months': max(1, 5000 / max(sum(c.monthly_cost for c in candidates), 1))
1614
+ 'roi_percentage': ((sum((c.annual_savings or 0.0) for c in candidates) / 5000) * 100) if sum((c.annual_savings or 0.0) for c in candidates) > 0 else 0,
1615
+ 'payback_period_months': max(1, 5000 / max(sum((c.monthly_cost or 0.0) for c in candidates), 1))
1560
1616
  }
1561
1617
  }
1562
1618
 
@@ -1689,7 +1745,7 @@ class VPCCleanupFramework:
1689
1745
  return stakeholders.get(phase, [])
1690
1746
 
1691
1747
  def display_cleanup_analysis(self, candidates: Optional[List[VPCCleanupCandidate]] = None) -> None:
1692
- """Display comprehensive VPC cleanup analysis with Rich formatting"""
1748
+ """Display comprehensive VPC cleanup analysis with Rich formatting and 16-column business-ready table"""
1693
1749
  if not candidates:
1694
1750
  candidates = self.cleanup_candidates
1695
1751
 
@@ -1700,12 +1756,13 @@ class VPCCleanupFramework:
1700
1756
  # Summary panel
1701
1757
  total_vpcs = len(candidates)
1702
1758
  immediate_count = len([c for c in candidates if c.cleanup_phase == VPCCleanupPhase.IMMEDIATE])
1703
- total_savings = sum(c.annual_savings for c in candidates)
1759
+ total_savings = sum((c.annual_savings or 0.0) for c in candidates)
1704
1760
 
1761
+ percentage = (immediate_count/total_vpcs*100) if total_vpcs > 0 else 0
1705
1762
  summary = (
1706
1763
  f"[bold blue]📊 VPC CLEANUP ANALYSIS SUMMARY[/bold blue]\n"
1707
1764
  f"Total VPCs Analyzed: [yellow]{total_vpcs}[/yellow]\n"
1708
- f"Immediate Cleanup Ready: [green]{immediate_count}[/green] ({(immediate_count/total_vpcs*100):.1f}%)\n"
1765
+ f"Immediate Cleanup Ready: [green]{immediate_count}[/green] ({percentage:.1f}%)\n"
1709
1766
  f"Total Annual Savings: [bold green]${total_savings:,.2f}[/bold green]\n"
1710
1767
  f"Default VPCs Found: [red]{len([c for c in candidates if c.is_default])}[/red]\n"
1711
1768
  f"Safety Mode: [cyan]{'ENABLED' if self.safety_mode else 'DISABLED'}[/cyan]"
@@ -1713,7 +1770,11 @@ class VPCCleanupFramework:
1713
1770
 
1714
1771
  self.console.print(Panel(summary, title="VPC Cleanup Analysis", style="white", width=80))
1715
1772
 
1716
- # Candidates by phase
1773
+ # Display comprehensive 16-column analysis table
1774
+ self._display_comprehensive_analysis_table(candidates)
1775
+
1776
+ # Display phase-grouped candidates (legacy view)
1777
+ self.console.print(f"\n[dim]💡 Displaying phase-grouped analysis below...[/dim]")
1717
1778
  phases = {}
1718
1779
  for candidate in candidates:
1719
1780
  phase = candidate.cleanup_phase
@@ -1725,6 +1786,351 @@ class VPCCleanupFramework:
1725
1786
  if phase_candidates:
1726
1787
  self._display_phase_candidates(phase, phase_candidates)
1727
1788
 
1789
+ def _display_comprehensive_analysis_table(self, candidates: List[VPCCleanupCandidate]) -> None:
1790
+ """Display comprehensive 16-column business-ready VPC cleanup analysis table"""
1791
+ self.console.print(f"\n[bold blue]📋 COMPREHENSIVE VPC CLEANUP ANALYSIS TABLE[/bold blue]")
1792
+
1793
+ # Detect CIDR overlaps
1794
+ cidr_overlaps = self._detect_cidr_overlaps(candidates)
1795
+
1796
+ # Create comprehensive table with all 16 columns (optimized widths for better readability)
1797
+ table = Table(
1798
+ show_header=True,
1799
+ header_style="bold magenta",
1800
+ title="VPC Cleanup Decision Table - Business Approval Ready",
1801
+ show_lines=True,
1802
+ width=200 # Allow wider table for better display
1803
+ )
1804
+
1805
+ # Add all 16 required columns with optimized widths and shortened names for better visibility
1806
+ table.add_column("#", style="dim", width=2, justify="right")
1807
+ table.add_column("Account", style="cyan", width=8)
1808
+ table.add_column("VPC_ID", style="yellow", width=12)
1809
+ table.add_column("VPC_Name", style="green", width=12)
1810
+ table.add_column("CIDR", style="blue", width=11)
1811
+ table.add_column("Overlap", style="red", width=7, justify="center")
1812
+ table.add_column("Default", style="magenta", width=7, justify="center")
1813
+ table.add_column("ENIs", style="orange1", width=4, justify="right")
1814
+ table.add_column("Tags", style="dim", width=18)
1815
+ table.add_column("FlowLog", style="purple", width=7, justify="center")
1816
+ table.add_column("TGW/Peer", style="bright_red", width=8, justify="center")
1817
+ table.add_column("LBs", style="bright_green", width=6, justify="center")
1818
+ table.add_column("IaC", style="bright_blue", width=4, justify="center")
1819
+ table.add_column("Timeline", style="bright_cyan", width=8)
1820
+ table.add_column("Decision", style="bold white", width=10)
1821
+ table.add_column("Owners", style="bright_yellow", width=12)
1822
+ table.add_column("Notes", style="dim", width=12)
1823
+
1824
+ # Add data rows
1825
+ for idx, candidate in enumerate(candidates, 1):
1826
+ # Extract comprehensive metadata
1827
+ tags_str = self._format_tags_string(candidate.tags)
1828
+ owners_str = self._extract_owner_information(candidate.tags)
1829
+ overlapping = "YES" if candidate.vpc_id in cidr_overlaps else "NO"
1830
+ tgw_peering = self._check_tgw_peering_connections(candidate)
1831
+ lbs_present = self._check_load_balancers(candidate)
1832
+ decision = self._determine_cleanup_decision(candidate)
1833
+ notes = self._generate_analysis_notes(candidate)
1834
+
1835
+ # Defensive handling for None values in table row
1836
+ try:
1837
+ table.add_row(
1838
+ str(idx),
1839
+ (candidate.account_id[-6:] if candidate.account_id and candidate.account_id != "unknown" else "N/A"),
1840
+ self._truncate_text(candidate.vpc_id or "N/A", 11),
1841
+ self._truncate_text(candidate.vpc_name or "N/A", 11),
1842
+ self._truncate_text(candidate.cidr_block or "N/A", 10),
1843
+ overlapping or "N/A",
1844
+ "YES" if candidate.is_default else "NO",
1845
+ str(candidate.eni_count or 0),
1846
+ self._truncate_text(tags_str or "N/A", 17),
1847
+ "YES" if candidate.flow_logs_enabled else "NO",
1848
+ tgw_peering or "NO",
1849
+ lbs_present or "NO",
1850
+ "YES" if candidate.iac_managed else "NO",
1851
+ self._truncate_text(candidate.implementation_timeline or "TBD", 7),
1852
+ decision or "REVIEW",
1853
+ self._truncate_text(owners_str or "N/A", 11),
1854
+ self._truncate_text(notes or "N/A", 11)
1855
+ )
1856
+ except Exception as e:
1857
+ logger.error(f"Error adding table row for VPC {candidate.vpc_id}: {e}")
1858
+ # Add a minimal safe row
1859
+ table.add_row(
1860
+ str(idx),
1861
+ "ERROR",
1862
+ candidate.vpc_id or "N/A",
1863
+ "ERROR",
1864
+ "N/A",
1865
+ "N/A",
1866
+ "N/A",
1867
+ "0",
1868
+ "ERROR",
1869
+ "N/A",
1870
+ "N/A",
1871
+ "N/A",
1872
+ "N/A",
1873
+ "N/A",
1874
+ "ERROR",
1875
+ "N/A",
1876
+ f"Row error: {str(e)[:10]}"
1877
+ )
1878
+
1879
+ self.console.print(table)
1880
+
1881
+ # Display information about table completeness
1882
+ self.console.print(f"\n[dim]💡 16-column comprehensive table displayed above. For full data export, use --export option.[/dim]")
1883
+ self.console.print(f"[dim] Additional columns: Tags, FlowLog, TGW/Peer, LBs, IaC, Timeline, Decision, Owners, Notes[/dim]")
1884
+
1885
+ # Display business impact summary
1886
+ self._display_business_impact_summary(candidates, cidr_overlaps)
1887
+
1888
+ def export_16_column_analysis_csv(self, candidates: Optional[List[VPCCleanupCandidate]] = None, output_file: str = "./vpc_cleanup_16_column_analysis.csv") -> str:
1889
+ """Export comprehensive 16-column VPC cleanup analysis to CSV format"""
1890
+ import csv
1891
+ from pathlib import Path
1892
+
1893
+ if not candidates:
1894
+ candidates = self.cleanup_candidates
1895
+
1896
+ if not candidates:
1897
+ self.console.print("[red]❌ No VPC candidates available for export[/red]")
1898
+ return ""
1899
+
1900
+ # Detect CIDR overlaps
1901
+ cidr_overlaps = self._detect_cidr_overlaps(candidates)
1902
+
1903
+ # Prepare CSV data
1904
+ csv_data = []
1905
+ headers = [
1906
+ "#", "Account_ID", "VPC_ID", "VPC_Name", "CIDR_Block", "Overlapping",
1907
+ "Is_Default", "ENI_Count", "Tags", "Flow Logs", "TGW/Peering",
1908
+ "LBs Present", "IaC", "Timeline", "Decision", "Owners / Approvals", "Notes"
1909
+ ]
1910
+
1911
+ csv_data.append(headers)
1912
+
1913
+ # Add data rows
1914
+ for idx, candidate in enumerate(candidates, 1):
1915
+ # Extract comprehensive metadata
1916
+ tags_str = self._format_tags_string(candidate.tags)
1917
+ owners_str = self._extract_owner_information(candidate.tags)
1918
+ overlapping = "YES" if candidate.vpc_id in cidr_overlaps else "NO"
1919
+ tgw_peering = self._check_tgw_peering_connections(candidate)
1920
+ lbs_present = self._check_load_balancers(candidate)
1921
+ decision = self._determine_cleanup_decision(candidate)
1922
+ notes = self._generate_analysis_notes(candidate)
1923
+
1924
+ row = [
1925
+ str(idx),
1926
+ candidate.account_id,
1927
+ candidate.vpc_id,
1928
+ candidate.vpc_name or "N/A",
1929
+ candidate.cidr_block,
1930
+ overlapping,
1931
+ "YES" if candidate.is_default else "NO",
1932
+ str(candidate.eni_count),
1933
+ tags_str,
1934
+ "YES" if candidate.flow_logs_enabled else "NO",
1935
+ tgw_peering,
1936
+ lbs_present,
1937
+ "YES" if candidate.iac_managed else "NO",
1938
+ candidate.implementation_timeline,
1939
+ decision,
1940
+ owners_str,
1941
+ notes
1942
+ ]
1943
+
1944
+ csv_data.append(row)
1945
+
1946
+ # Write to CSV file
1947
+ output_path = Path(output_file)
1948
+ output_path.parent.mkdir(parents=True, exist_ok=True)
1949
+
1950
+ with open(output_path, 'w', newline='', encoding='utf-8') as csvfile:
1951
+ writer = csv.writer(csvfile)
1952
+ writer.writerows(csv_data)
1953
+
1954
+ self.console.print(f"[green]✅ 16-column VPC cleanup analysis exported to: {output_path.absolute()}[/green]")
1955
+ self.console.print(f"[dim] Contains {len(candidates)} VPCs with comprehensive metadata and business approval information[/dim]")
1956
+
1957
+ return str(output_path.absolute())
1958
+
1959
+ def _detect_cidr_overlaps(self, candidates: List[VPCCleanupCandidate]) -> Set[str]:
1960
+ """Detect CIDR block overlaps between VPCs (both within and across accounts)"""
1961
+ overlapping_vpcs = set()
1962
+
1963
+ try:
1964
+ from ipaddress import IPv4Network
1965
+
1966
+ # Create list of all VPC networks for comprehensive overlap checking
1967
+ vpc_networks = []
1968
+ for candidate in candidates:
1969
+ try:
1970
+ network = IPv4Network(candidate.cidr_block, strict=False)
1971
+ vpc_networks.append((candidate.vpc_id, network, candidate.account_id, candidate.region))
1972
+ except Exception:
1973
+ continue
1974
+
1975
+ # Check for overlaps between all VPC pairs (comprehensive check)
1976
+ for i, (vpc1_id, network1, account1, region1) in enumerate(vpc_networks):
1977
+ for j, (vpc2_id, network2, account2, region2) in enumerate(vpc_networks[i+1:], i+1):
1978
+ # Explicit same-VPC exclusion (prevent false positives)
1979
+ if vpc1_id == vpc2_id:
1980
+ continue
1981
+
1982
+ # Check overlaps within same region (cross-account overlaps are also important)
1983
+ if region1 == region2 and network1.overlaps(network2):
1984
+ overlapping_vpcs.add(vpc1_id)
1985
+ overlapping_vpcs.add(vpc2_id)
1986
+ # Enhanced overlap logging with account context
1987
+ if self.console:
1988
+ account_context = f" (Account: {account1}->{account2})" if account1 != account2 else f" (Account: {account1})"
1989
+ self.console.log(f"[yellow]CIDR Overlap detected: {vpc1_id}({network1}) overlaps with {vpc2_id}({network2}){account_context}[/yellow]")
1990
+
1991
+ except ImportError:
1992
+ self.console.print("[yellow]⚠️ ipaddress module not available - CIDR overlap detection disabled[/yellow]")
1993
+ except Exception as e:
1994
+ self.console.print(f"[yellow]⚠️ CIDR overlap detection failed: {e}[/yellow]")
1995
+
1996
+ return overlapping_vpcs
1997
+
1998
+ def _format_tags_string(self, tags: Dict[str, str]) -> str:
1999
+ """Format tags as 'key=value,key2=value2' string"""
2000
+ if not tags:
2001
+ return "none"
2002
+
2003
+ # Limit to most important tags to avoid overwhelming display
2004
+ important_tags = ['Name', 'Environment', 'Owner', 'Team', 'Department', 'CostCenter']
2005
+ filtered_tags = {}
2006
+
2007
+ # First include important tags
2008
+ for key in important_tags:
2009
+ if key in tags:
2010
+ filtered_tags[key] = tags[key]
2011
+
2012
+ # Then add remaining tags up to a reasonable limit
2013
+ remaining_count = 6 - len(filtered_tags)
2014
+ for key, value in tags.items():
2015
+ if key not in filtered_tags and remaining_count > 0:
2016
+ filtered_tags[key] = value
2017
+ remaining_count -= 1
2018
+
2019
+ return ",".join([f"{k}={v}" for k, v in filtered_tags.items()])
2020
+
2021
+ def _extract_owner_information(self, tags: Dict[str, str]) -> str:
2022
+ """Extract owner information from AWS tags"""
2023
+ owner_keys = ['Owner', 'BusinessOwner', 'TechnicalOwner', 'Team', 'Department', 'CostCenter']
2024
+ owners = []
2025
+
2026
+ for key in owner_keys:
2027
+ if key in tags and tags[key]:
2028
+ owners.append(f"{key}:{tags[key]}")
2029
+
2030
+ return ";".join(owners) if owners else "unknown"
2031
+
2032
+ def _check_tgw_peering_connections(self, candidate: VPCCleanupCandidate) -> str:
2033
+ """Check for Transit Gateway and Peering connections"""
2034
+ connections = []
2035
+
2036
+ # Check dependencies for TGW and peering connections
2037
+ for dep in candidate.dependencies:
2038
+ if dep.resource_type in ['TransitGatewayAttachment', 'VpcPeeringConnection']:
2039
+ connections.append(dep.resource_type[:3]) # TGW or VPC
2040
+
2041
+ return ",".join(connections) if connections else "NO"
2042
+
2043
+ def _check_load_balancers(self, candidate: VPCCleanupCandidate) -> str:
2044
+ """Check for Load Balancers in VPC"""
2045
+ lb_types = []
2046
+
2047
+ # Check dependencies for load balancers
2048
+ for dep in candidate.dependencies:
2049
+ if 'LoadBalancer' in dep.resource_type or 'ELB' in dep.resource_type:
2050
+ if 'Application' in dep.resource_type:
2051
+ lb_types.append('ALB')
2052
+ elif 'Network' in dep.resource_type:
2053
+ lb_types.append('NLB')
2054
+ elif 'Classic' in dep.resource_type:
2055
+ lb_types.append('CLB')
2056
+ else:
2057
+ lb_types.append('LB')
2058
+
2059
+ return ",".join(set(lb_types)) if lb_types else "NO"
2060
+
2061
+ def _determine_cleanup_decision(self, candidate: VPCCleanupCandidate) -> str:
2062
+ """Determine cleanup decision based on analysis"""
2063
+ if candidate.cleanup_phase == VPCCleanupPhase.IMMEDIATE:
2064
+ if candidate.iac_managed:
2065
+ return "DELETE (IaC)"
2066
+ else:
2067
+ return "DELETE (Manual)"
2068
+ elif candidate.cleanup_phase == VPCCleanupPhase.INVESTIGATION:
2069
+ return "INVESTIGATE"
2070
+ elif candidate.cleanup_phase == VPCCleanupPhase.GOVERNANCE:
2071
+ return "HOLD"
2072
+ elif candidate.cleanup_phase == VPCCleanupPhase.COMPLEX:
2073
+ return "COMPLEX"
2074
+ else:
2075
+ return "REVIEW"
2076
+
2077
+ def _generate_analysis_notes(self, candidate: VPCCleanupCandidate) -> str:
2078
+ """Generate analysis notes for the VPC"""
2079
+ notes = []
2080
+
2081
+ if candidate.is_default:
2082
+ notes.append("Default VPC")
2083
+
2084
+ if candidate.risk_level == VPCCleanupRisk.HIGH:
2085
+ notes.append("High Risk")
2086
+ elif candidate.risk_level == VPCCleanupRisk.CRITICAL:
2087
+ notes.append("Critical Risk")
2088
+
2089
+ if candidate.blocking_dependencies > 0:
2090
+ notes.append(f"{candidate.blocking_dependencies} blocking deps")
2091
+
2092
+ if candidate.annual_savings > 1000:
2093
+ notes.append(f"${candidate.annual_savings:,.0f}/yr savings")
2094
+
2095
+ return ";".join(notes) if notes else "standard cleanup"
2096
+
2097
+ def _display_business_impact_summary(self, candidates: List[VPCCleanupCandidate], cidr_overlaps: Set[str]) -> None:
2098
+ """Display business impact summary for stakeholder approval"""
2099
+
2100
+ # Calculate comprehensive metrics
2101
+ immediate_vpcs = [c for c in candidates if c.cleanup_phase == VPCCleanupPhase.IMMEDIATE]
2102
+ investigation_vpcs = [c for c in candidates if c.cleanup_phase == VPCCleanupPhase.INVESTIGATION]
2103
+ governance_vpcs = [c for c in candidates if c.cleanup_phase == VPCCleanupPhase.GOVERNANCE]
2104
+ complex_vpcs = [c for c in candidates if c.cleanup_phase == VPCCleanupPhase.COMPLEX]
2105
+
2106
+ default_vpcs = [c for c in candidates if c.is_default]
2107
+ zero_eni_vpcs = [c for c in candidates if c.eni_count == 0]
2108
+ total_savings = sum(c.annual_savings or 0.0 for c in candidates)
2109
+
2110
+ summary = (
2111
+ f"[bold green]💰 BUSINESS IMPACT SUMMARY[/bold green]\n\n"
2112
+ f"[bold blue]Step 1: Immediate Deletion Candidates ({len(immediate_vpcs)} VPCs - {(len(immediate_vpcs)/len(candidates)*100):.1f}%)[/bold blue]\n"
2113
+ f"[bold yellow]Step 2: Investigation Required ({len(investigation_vpcs)} VPCs)[/bold yellow]\n"
2114
+ f"[bold cyan]Step 3: Governance Approval ({len(governance_vpcs)} VPCs)[/bold cyan]\n"
2115
+ f"[bold red]Step 4: Complex Migration ({len(complex_vpcs)} VPCs)[/bold red]\n\n"
2116
+ f"[green]✅ Immediate Security Value:[/green] {(len(zero_eni_vpcs)/len(candidates)*100):.1f}% of VPCs ({len(zero_eni_vpcs)} out of {len(candidates)}) ready for immediate deletion with zero dependencies\n"
2117
+ f"[red]🛡️ Default VPC Elimination:[/red] {len(default_vpcs)} default VPCs eliminated for CIS Benchmark compliance\n"
2118
+ f"[blue]📉 Attack Surface Reduction:[/blue] {(len(zero_eni_vpcs)/len(candidates)*100):.1f}% of VPCs have zero blocking dependencies\n"
2119
+ f"[magenta]🎯 CIDR Overlap Detection:[/magenta] {len(cidr_overlaps)} VPCs with overlapping CIDR blocks identified\n"
2120
+ f"[bold green]💵 Annual Savings Potential:[/bold green] ${total_savings:,.2f}\n"
2121
+ f"[cyan]⏱️ Implementation Timeline:[/cyan] Phase 1 (Immediate), Investigation, Complex Migration phases defined"
2122
+ )
2123
+
2124
+ self.console.print(Panel(summary, title="Executive Summary - VPC Cleanup Business Case", style="green", width=120))
2125
+
2126
+ def _truncate_text(self, text: Optional[str], max_length: int) -> str:
2127
+ """Truncate text to specified length with ellipsis"""
2128
+ if text is None:
2129
+ return ""
2130
+ if not text or len(text) <= max_length:
2131
+ return text or ""
2132
+ return text[:max_length-3] + "..."
2133
+
1728
2134
  def _display_phase_candidates(self, phase: VPCCleanupPhase, candidates: List[VPCCleanupCandidate]) -> None:
1729
2135
  """Display candidates for a specific cleanup phase"""
1730
2136
  # Phase header
@@ -1755,16 +2161,16 @@ class VPCCleanupFramework:
1755
2161
  candidate.vpc_id,
1756
2162
  (candidate.vpc_name or "N/A")[:18] + ("..." if len(candidate.vpc_name or "") > 18 else ""),
1757
2163
  "✅" if candidate.is_default else "❌",
1758
- str(candidate.blocking_dependencies),
1759
- candidate.risk_level.value,
1760
- f"${candidate.annual_savings:,.0f}",
2164
+ str(candidate.blocking_dependencies or 0),
2165
+ (candidate.risk_level.value if candidate.risk_level else "LOW"),
2166
+ f"${(candidate.annual_savings or 0.0):,.0f}",
1761
2167
  candidate.implementation_timeline
1762
2168
  )
1763
2169
 
1764
2170
  self.console.print(table)
1765
2171
 
1766
2172
  # Phase summary
1767
- phase_savings = sum(c.annual_savings for c in candidates)
2173
+ phase_savings = sum((c.annual_savings or 0.0) for c in candidates)
1768
2174
  phase_risk_high = len([c for c in candidates if c.risk_level in [VPCCleanupRisk.HIGH, VPCCleanupRisk.CRITICAL]])
1769
2175
 
1770
2176
  phase_summary = (
@@ -2083,43 +2489,61 @@ class VPCCleanupFramework:
2083
2489
  if self.enable_parallel_processing and len(account_profiles) > 1:
2084
2490
  account_futures = {}
2085
2491
 
2086
- for profile in account_profiles:
2087
- future = self.executor.submit(self._analyze_account_with_circuit_breaker, profile, vpc_ids)
2088
- account_futures[profile] = future
2492
+ for account_item in account_profiles:
2493
+ future = self.executor.submit(self._analyze_account_with_circuit_breaker, account_item, vpc_ids)
2494
+ # Use account ID for tracking if available, otherwise use the profile string
2495
+ profile_key = account_item.account_id if hasattr(account_item, 'account_id') else str(account_item)
2496
+ account_futures[profile_key] = future
2089
2497
 
2090
2498
  # Collect results
2091
- for profile, future in account_futures.items():
2499
+ for profile_key, future in account_futures.items():
2092
2500
  try:
2093
2501
  account_candidates = future.result(timeout=300) # 5 minute timeout per account
2094
2502
  all_candidates.extend(account_candidates)
2095
2503
  except Exception as e:
2096
- self.console.print(f"[red]❌ Error analyzing account {profile}: {e}[/red]")
2097
- logger.error(f"Multi-account analysis failed for {profile}: {e}")
2504
+ self.console.print(f"[red]❌ Error analyzing account {profile_key}: {e}[/red]")
2505
+ logger.error(f"Multi-account analysis failed for {profile_key}: {e}")
2098
2506
  else:
2099
2507
  # Sequential account processing
2100
- for profile in account_profiles:
2508
+ for account_item in account_profiles:
2101
2509
  try:
2102
- account_candidates = self._analyze_account_with_circuit_breaker(profile, vpc_ids)
2510
+ account_candidates = self._analyze_account_with_circuit_breaker(account_item, vpc_ids)
2103
2511
  all_candidates.extend(account_candidates)
2104
2512
  except Exception as e:
2105
- self.console.print(f"[red]❌ Error analyzing account {profile}: {e}[/red]")
2106
- logger.error(f"Multi-account analysis failed for {profile}: {e}")
2513
+ profile_key = account_item.account_id if hasattr(account_item, 'account_id') else str(account_item)
2514
+ self.console.print(f"[red]❌ Error analyzing account {profile_key}: {e}[/red]")
2515
+ logger.error(f"Multi-account analysis failed for {profile_key}: {e}")
2107
2516
 
2108
2517
  self.cleanup_candidates = all_candidates
2109
2518
  return all_candidates
2110
2519
 
2111
- def _analyze_account_with_circuit_breaker(self, profile: str, vpc_ids: Optional[List[str]]) -> List[VPCCleanupCandidate]:
2520
+ def _analyze_account_with_circuit_breaker(self, account_item, vpc_ids: Optional[List[str]]) -> List[VPCCleanupCandidate]:
2112
2521
  """Analyze single account with circuit breaker protection."""
2113
- circuit_breaker = self.circuit_breakers[f"account_analysis_{profile}"]
2522
+ # Handle both AccountSession objects and profile strings
2523
+ if hasattr(account_item, 'session') and hasattr(account_item, 'account_id'):
2524
+ # New AccountSession object from cross-account session manager
2525
+ account_session = account_item.session
2526
+ account_id = account_item.account_id
2527
+ profile_key = account_id
2528
+ else:
2529
+ # Legacy profile string
2530
+ profile = account_item
2531
+ profile_key = profile
2532
+ try:
2533
+ from runbooks.finops.aws_client import get_cached_session
2534
+ account_session = get_cached_session(profile)
2535
+ except ImportError:
2536
+ # Extract profile name from Organizations API format (profile@accountId)
2537
+ actual_profile = profile.split("@")[0] if "@" in profile else profile
2538
+ account_session = create_operational_session(profile=actual_profile)
2539
+
2540
+ circuit_breaker = self.circuit_breakers[f"account_analysis_{profile_key}"]
2114
2541
 
2115
2542
  if not circuit_breaker.should_allow_request():
2116
- logger.warning(f"Circuit breaker open for account {profile}, skipping analysis")
2543
+ logger.warning(f"Circuit breaker open for account {profile_key}, skipping analysis")
2117
2544
  return []
2118
2545
 
2119
2546
  try:
2120
- # Create session for this account
2121
- account_session = create_operational_session(profile=profile)
2122
-
2123
2547
  # Temporarily update session for analysis
2124
2548
  original_session = self.session
2125
2549
  self.session = account_session
@@ -2308,7 +2732,12 @@ class VPCCleanupFramework:
2308
2732
  """Enhanced performance target validation with detailed analysis."""
2309
2733
  target_time = 30.0 # <30s requirement
2310
2734
 
2311
- if metrics.duration and metrics.duration > target_time:
2735
+ # Defensive check for None values
2736
+ if not hasattr(metrics, 'duration') or metrics.duration is None:
2737
+ logger.warning("Performance metrics duration is None, skipping performance validation")
2738
+ return
2739
+
2740
+ if metrics.duration > target_time:
2312
2741
  performance_degradation = {
2313
2742
  "execution_time": metrics.duration,
2314
2743
  "target_time": target_time,
@@ -2625,5 +3054,10 @@ class VPCCleanupFramework:
2625
3054
 
2626
3055
  def __del__(self):
2627
3056
  """Cleanup resources when framework is destroyed."""
2628
- if self.executor:
2629
- self.executor.shutdown(wait=True)
3057
+ try:
3058
+ if hasattr(self, 'executor') and self.executor:
3059
+ if not self.executor._shutdown:
3060
+ self.executor.shutdown(wait=True)
3061
+ except Exception as e:
3062
+ # Silently handle cleanup errors to avoid issues during garbage collection
3063
+ pass