runbooks 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/assessment/collectors.py +3 -2
  3. runbooks/cloudops/cost_optimizer.py +235 -83
  4. runbooks/cloudops/models.py +8 -2
  5. runbooks/common/aws_pricing.py +12 -0
  6. runbooks/common/business_logic.py +1 -1
  7. runbooks/common/profile_utils.py +213 -310
  8. runbooks/common/rich_utils.py +15 -21
  9. runbooks/finops/README.md +3 -3
  10. runbooks/finops/__init__.py +13 -5
  11. runbooks/finops/business_case_config.py +5 -5
  12. runbooks/finops/cli.py +170 -95
  13. runbooks/finops/cost_optimizer.py +2 -1
  14. runbooks/finops/cost_processor.py +69 -22
  15. runbooks/finops/dashboard_router.py +3 -3
  16. runbooks/finops/dashboard_runner.py +3 -4
  17. runbooks/finops/embedded_mcp_validator.py +101 -23
  18. runbooks/finops/enhanced_progress.py +213 -0
  19. runbooks/finops/finops_scenarios.py +90 -16
  20. runbooks/finops/markdown_exporter.py +4 -2
  21. runbooks/finops/multi_dashboard.py +1 -1
  22. runbooks/finops/nat_gateway_optimizer.py +85 -57
  23. runbooks/finops/rds_snapshot_optimizer.py +1389 -0
  24. runbooks/finops/scenario_cli_integration.py +212 -22
  25. runbooks/finops/scenarios.py +41 -25
  26. runbooks/finops/single_dashboard.py +68 -9
  27. runbooks/finops/tests/run_tests.py +5 -3
  28. runbooks/finops/vpc_cleanup_optimizer.py +1 -1
  29. runbooks/finops/workspaces_analyzer.py +40 -16
  30. runbooks/inventory/list_rds_snapshots_aggregator.py +745 -0
  31. runbooks/main.py +393 -61
  32. runbooks/operate/executive_dashboard.py +4 -3
  33. runbooks/remediation/rds_snapshot_list.py +13 -0
  34. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/METADATA +234 -40
  35. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/RECORD +39 -37
  36. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/WHEEL +0 -0
  37. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/entry_points.txt +0 -0
  38. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/licenses/LICENSE +0 -0
  39. {runbooks-1.1.1.dist-info → runbooks-1.1.3.dist-info}/top_level.txt +0 -0
runbooks/__init__.py CHANGED
@@ -61,7 +61,7 @@ s3_ops = S3Operations()
61
61
 
62
62
  # Centralized Version Management - Single Source of Truth
63
63
  # All modules MUST import __version__ from this location
64
- __version__ = "1.1.1"
64
+ __version__ = "1.1.3"
65
65
 
66
66
  # Fallback for legacy importlib.metadata usage during transition
67
67
  try:
@@ -20,6 +20,7 @@ from typing import Any, Dict, List, Optional
20
20
 
21
21
  from loguru import logger
22
22
 
23
+ from runbooks import __version__
23
24
  from runbooks.base import CloudFoundationsBase
24
25
 
25
26
 
@@ -147,7 +148,7 @@ class VPCCollector(BaseCollector):
147
148
  "internet_gateways": internet_gateways,
148
149
  "route_tables": route_tables,
149
150
  "assessment_metadata": {
150
- "collector_version": "v0.7.8-vpc-enhanced",
151
+ "collector_version": f"v{__version__}-vpc-enhanced",
151
152
  "github_issue": "#96",
152
153
  "cost_optimization_enabled": True,
153
154
  },
@@ -165,7 +166,7 @@ class VPCCollector(BaseCollector):
165
166
  "flow_logs": [],
166
167
  "internet_gateways": [],
167
168
  "route_tables": [],
168
- "assessment_metadata": {"collector_version": "v0.7.8-vpc-enhanced", "error": str(e)},
169
+ "assessment_metadata": {"collector_version": f"v{__version__}-vpc-enhanced", "error": str(e)},
169
170
  }
170
171
 
171
172
  def _analyze_nat_optimization(self, nat_gateways: List[Dict], subnets: List[Dict]) -> int:
@@ -20,6 +20,7 @@ Source Notebooks:
20
20
  """
21
21
 
22
22
  import asyncio
23
+ import json
23
24
  import time
24
25
  from typing import Dict, List, Optional, Any, Tuple
25
26
  import boto3
@@ -64,25 +65,31 @@ class CostOptimizer(CloudOpsBase):
64
65
  """
65
66
 
66
67
  def __init__(
67
- self,
68
- profile: str = "default",
68
+ self,
69
+ profile: str = "default",
69
70
  dry_run: bool = True,
70
- execution_mode: ExecutionMode = ExecutionMode.DRY_RUN
71
+ execution_mode: ExecutionMode = ExecutionMode.DRY_RUN,
72
+ region: str = "us-east-1"
71
73
  ):
72
74
  """
73
75
  Initialize Cost Optimizer with enterprise patterns.
74
-
76
+
75
77
  Args:
76
78
  profile: AWS profile (typically billing profile for cost data)
77
79
  dry_run: Enable safe analysis mode (default True)
78
80
  execution_mode: Execution mode for operations
81
+ region: AWS region for operations (default us-east-1)
79
82
  """
80
83
  super().__init__(profile, dry_run, execution_mode)
81
-
82
- print_header("CloudOps Cost Optimizer", "1.0.0")
84
+
85
+ # Initialize region attribute
86
+ self.region = region
87
+
88
+ from runbooks import __version__
89
+ print_header("CloudOps Cost Optimizer", __version__)
83
90
  print_info(f"Execution mode: {execution_mode.value}")
84
91
  print_info(f"Profile: {profile}")
85
-
92
+
86
93
  if dry_run:
87
94
  print_warning("🛡️ DRY RUN MODE: No resources will be modified")
88
95
 
@@ -1201,41 +1208,44 @@ class CostOptimizer(CloudOpsBase):
1201
1208
  )
1202
1209
 
1203
1210
  async def optimize_workspaces(
1204
- self,
1211
+ self,
1205
1212
  usage_threshold_days: int = 180,
1213
+ analysis_days: int = 30,
1206
1214
  dry_run: bool = True
1207
1215
  ) -> CostOptimizationResult:
1208
1216
  """
1209
1217
  Business Scenario: Cleanup unused WorkSpaces with zero usage in last 6 months
1210
1218
  JIRA Reference: FinOps-24
1211
1219
  Expected Savings: USD $12,518 annually
1212
-
1220
+
1213
1221
  Args:
1214
- usage_threshold_days: Days of zero usage to consider for deletion
1222
+ usage_threshold_days: Days of zero usage to consider for deletion (default: 180)
1223
+ analysis_days: Period for usage analysis in days, configurable 30/60 (default: 30)
1215
1224
  dry_run: If True, only analyze without deletion
1216
-
1225
+
1217
1226
  Returns:
1218
1227
  CostOptimizationResult with WorkSpaces cleanup analysis
1219
1228
  """
1220
1229
  operation_name = "WorkSpaces Cost Optimization"
1221
- print_header(f"🏢 {operation_name} (FinOps-24)")
1230
+ print_header(f"🏢 {operation_name}")
1222
1231
 
1223
1232
  # Import existing workspaces analyzer
1224
1233
  try:
1225
- from runbooks.finops.workspaces_analyzer import WorkSpacesAnalyzer
1226
- except ImportError:
1227
- print_error("WorkSpaces analyzer not available - implementing basic analysis")
1234
+ from runbooks.finops.workspaces_analyzer import WorkSpacesCostAnalyzer, analyze_workspaces
1235
+ except ImportError as e:
1236
+ print_error(f"WorkSpaces analyzer not available: {e}")
1237
+ print_warning("This is likely due to missing dependencies or import issues")
1228
1238
  return CostOptimizationResult(
1229
1239
  scenario=BusinessScenario.COST_OPTIMIZATION,
1230
1240
  scenario_name=operation_name,
1231
1241
  execution_timestamp=datetime.now(),
1232
1242
  execution_mode=self.execution_mode,
1233
1243
  success=False,
1234
- error_message="WorkSpaces analyzer module not found",
1244
+ error_message=f"WorkSpaces analyzer import failed: {e}",
1235
1245
  # Add required fields to prevent Pydantic validation errors
1236
1246
  execution_time=0.0,
1237
1247
  resources_analyzed=0,
1238
- resources_impacted=[], # Must be a list, not an integer
1248
+ resources_impacted=[],
1239
1249
  business_metrics={
1240
1250
  "total_monthly_savings": 0.0,
1241
1251
  "overall_risk_level": "low"
@@ -1244,42 +1254,38 @@ class CostOptimizer(CloudOpsBase):
1244
1254
  aws_profile_used=self.profile or "default",
1245
1255
  current_monthly_spend=0.0,
1246
1256
  optimized_monthly_spend=0.0,
1247
- savings_percentage=0.0
1248
- )
1249
-
1250
- with create_progress_bar() as progress:
1251
- task = progress.add_task("Analyzing WorkSpaces usage...", total=100)
1252
-
1253
- # Step 1: Initialize WorkSpaces analyzer
1254
- workspaces_analyzer = WorkSpacesAnalyzer(
1255
- session=self.session,
1256
- region=self.region
1257
- )
1258
- progress.update(task, advance=25)
1259
-
1260
- # Step 2: Analyze unused WorkSpaces
1261
- unused_workspaces = await workspaces_analyzer.find_unused_workspaces(
1262
- usage_threshold_days=usage_threshold_days
1257
+ savings_percentage=0.0,
1258
+ annual_savings=0.0
1263
1259
  )
1264
- progress.update(task, advance=50)
1265
-
1266
- # Step 3: Calculate cost savings
1267
- estimated_savings = len(unused_workspaces) * 45 # ~$45/month per WorkSpace
1268
- progress.update(task, advance=75)
1269
-
1270
- # Step 4: Execute cleanup if not dry_run
1271
- if not dry_run and unused_workspaces:
1272
- await self._execute_workspaces_cleanup(unused_workspaces)
1273
- progress.update(task, advance=100)
1274
1260
 
1275
- # Display results
1276
- results_table = create_table("WorkSpaces Optimization Results")
1277
- results_table.add_row("Unused WorkSpaces Found", str(len(unused_workspaces)))
1278
- results_table.add_row("Monthly Savings", format_cost(estimated_savings))
1279
- results_table.add_row("Annual Savings", format_cost(estimated_savings * 12))
1280
- results_table.add_row("Execution Mode", "Analysis Only" if dry_run else "Cleanup Executed")
1281
- console.print(results_table)
1261
+ # Execute WorkSpaces analysis using proven finops function
1262
+ analysis_results = analyze_workspaces(
1263
+ profile=self.profile,
1264
+ unused_days=usage_threshold_days,
1265
+ analysis_days=analysis_days,
1266
+ output_format="json",
1267
+ dry_run=dry_run
1268
+ )
1269
+
1270
+ # Extract analysis results
1271
+ if analysis_results.get("status") == "success":
1272
+ summary = analysis_results.get("summary", {})
1273
+ estimated_monthly_savings = summary.get("unused_monthly_cost", 0.0)
1274
+ estimated_annual_savings = summary.get("potential_annual_savings", 0.0)
1275
+ unused_workspaces_count = summary.get("unused_workspaces", 0)
1276
+ total_workspaces = summary.get("total_workspaces", 0)
1277
+ else:
1278
+ print_error(f"WorkSpaces analysis failed: {analysis_results.get('error', 'Unknown error')}")
1279
+ estimated_monthly_savings = 0.0
1280
+ estimated_annual_savings = 0.0
1281
+ unused_workspaces_count = 0
1282
+ total_workspaces = 0
1282
1283
 
1284
+ # Calculate savings percentage if we have baseline cost data
1285
+ savings_percentage = 0.0
1286
+ if summary.get("total_monthly_cost", 0) > 0:
1287
+ savings_percentage = (estimated_monthly_savings / summary.get("total_monthly_cost", 1)) * 100
1288
+
1283
1289
  return CostOptimizationResult(
1284
1290
  scenario=BusinessScenario.COST_OPTIMIZATION,
1285
1291
  scenario_name=operation_name,
@@ -1287,30 +1293,44 @@ class CostOptimizer(CloudOpsBase):
1287
1293
  execution_mode=self.execution_mode,
1288
1294
  execution_time=15.0,
1289
1295
  success=True,
1290
- total_monthly_savings=estimated_savings,
1291
- annual_savings=estimated_savings * 12,
1292
- savings_percentage=0.0, # Would need baseline cost to calculate
1293
- affected_resources=len(unused_workspaces),
1296
+ # Core cost metrics using correct variable names
1297
+ current_monthly_spend=summary.get("total_monthly_cost", 0.0),
1298
+ optimized_monthly_spend=summary.get("total_monthly_cost", 0.0) - estimated_monthly_savings,
1299
+ total_monthly_savings=estimated_monthly_savings,
1300
+ annual_savings=estimated_annual_savings,
1301
+ savings_percentage=savings_percentage,
1302
+ # Resource metrics
1303
+ affected_resources=unused_workspaces_count,
1304
+ resources_analyzed=total_workspaces,
1305
+ resources_impacted=[], # Must be a list
1294
1306
  resource_impacts=[
1295
1307
  ResourceImpact(
1296
- resource_id=f"workspaces-cleanup-{len(unused_workspaces)}",
1308
+ resource_id=f"workspaces-optimization-{unused_workspaces_count}",
1297
1309
  resource_type="AWS::WorkSpaces::Workspace",
1298
- action="terminate",
1299
- monthly_savings=estimated_savings,
1300
- risk_level=RiskLevel.LOW
1310
+ resource_name=f"{unused_workspaces_count} unused WorkSpaces",
1311
+ region=self.session.region_name or "us-east-1",
1312
+ account_id=self.account_id,
1313
+ estimated_monthly_cost=summary.get("unused_monthly_cost", 0.0),
1314
+ projected_savings=estimated_monthly_savings,
1315
+ risk_level=RiskLevel.LOW,
1316
+ business_criticality="low",
1317
+ modification_required=not dry_run
1301
1318
  )
1302
1319
  ],
1303
- # Add missing required fields
1304
- resources_analyzed=len(unused_workspaces),
1305
- resources_impacted=[], # Must be a list
1320
+ # Business metrics for executive reporting
1306
1321
  business_metrics={
1307
- "total_monthly_savings": estimated_savings,
1308
- "overall_risk_level": "low"
1322
+ "total_monthly_savings": estimated_monthly_savings,
1323
+ "overall_risk_level": "low",
1324
+ "unused_workspaces_count": unused_workspaces_count,
1325
+ "total_workspaces_analyzed": total_workspaces
1309
1326
  },
1310
- recommendations=[],
1311
- aws_profile_used=self.profile or "default",
1312
- current_monthly_spend=0.0,
1313
- optimized_monthly_spend=0.0
1327
+ recommendations=[
1328
+ f"Terminate {unused_workspaces_count} unused WorkSpaces to save ${estimated_monthly_savings:.2f}/month",
1329
+ f"Estimated annual savings: ${estimated_annual_savings:.2f}",
1330
+ "Verify WorkSpaces are truly unused before termination",
1331
+ "Consider implementing usage monitoring for remaining WorkSpaces"
1332
+ ],
1333
+ aws_profile_used=self.profile or "default"
1314
1334
  )
1315
1335
 
1316
1336
  async def optimize_rds_snapshots(
@@ -1336,20 +1356,70 @@ class CostOptimizer(CloudOpsBase):
1336
1356
  with create_progress_bar() as progress:
1337
1357
  task = progress.add_task("Analyzing RDS manual snapshots...", total=100)
1338
1358
 
1339
- # Step 1: Discover manual RDS snapshots across regions
1359
+ # Step 1: Discover manual RDS snapshots using proven AWS Config aggregator method
1340
1360
  all_manual_snapshots = []
1341
- regions = ['us-east-1', 'us-west-2', 'ap-southeast-2'] # Common regions
1342
-
1343
- for region in regions:
1344
- regional_client = self.session.client('rds', region_name=region)
1345
- try:
1346
- response = regional_client.describe_db_snapshots(
1347
- SnapshotType='manual',
1348
- MaxRecords=100
1349
- )
1350
- all_manual_snapshots.extend(response.get('DBSnapshots', []))
1351
- except Exception as e:
1352
- print_warning(f"Could not access region {region}: {e}")
1361
+
1362
+ try:
1363
+ # Use AWS Config aggregator to discover all RDS snapshots across organization
1364
+ config_client = self.session.client('config', region_name='ap-southeast-2')
1365
+
1366
+ # Get all RDS snapshots via AWS Config aggregator (proven method)
1367
+ response = config_client.select_aggregate_resource_config(
1368
+ Expression="SELECT configuration, accountId, awsRegion WHERE resourceType = 'AWS::RDS::DBSnapshot'",
1369
+ ConfigurationAggregatorName='organization-aggregator',
1370
+ MaxResults=100 # AWS limit is 100
1371
+ )
1372
+
1373
+ print_info(f"Found {len(response.get('Results', []))} RDS snapshots via AWS Config aggregator")
1374
+
1375
+ # Process snapshots found by Config aggregator
1376
+ for result in response.get('Results', []):
1377
+ try:
1378
+ resource_data = json.loads(result)
1379
+ config_data = resource_data.get('configuration', {})
1380
+
1381
+ # Handle case where configuration might be a string
1382
+ if isinstance(config_data, str):
1383
+ config_data = json.loads(config_data)
1384
+
1385
+ # Filter for manual snapshots only
1386
+ if config_data.get('snapshotType') == 'manual':
1387
+ # Create snapshot object compatible with describe_db_snapshots format
1388
+ snapshot = {
1389
+ 'DBSnapshotIdentifier': config_data.get('dBSnapshotIdentifier'),
1390
+ 'SnapshotCreateTime': datetime.fromisoformat(config_data.get('snapshotCreateTime', '').replace('Z', '+00:00')) if config_data.get('snapshotCreateTime') else datetime.now(),
1391
+ 'AllocatedStorage': config_data.get('allocatedStorage', 0),
1392
+ 'DBInstanceIdentifier': config_data.get('dBInstanceIdentifier'),
1393
+ 'SnapshotType': config_data.get('snapshotType'),
1394
+ 'Status': config_data.get('status', 'available'),
1395
+ 'Engine': config_data.get('engine'),
1396
+ 'EngineVersion': config_data.get('engineVersion')
1397
+ }
1398
+ all_manual_snapshots.append(snapshot)
1399
+ except Exception as e:
1400
+ print_warning(f"Error processing snapshot from Config: {e}")
1401
+
1402
+ print_success(f"Successfully processed {len(all_manual_snapshots)} manual snapshots from Config aggregator")
1403
+
1404
+ except Exception as e:
1405
+ print_warning(f"AWS Config aggregator query failed, falling back to regional discovery: {e}")
1406
+
1407
+ # Fallback to regional discovery if Config aggregator fails
1408
+ regions = ['us-east-1', 'us-west-2', 'ap-southeast-2', 'eu-west-1', 'ap-southeast-1'] # Extended regions
1409
+
1410
+ for region in regions:
1411
+ regional_client = self.session.client('rds', region_name=region)
1412
+ try:
1413
+ # Get all manual snapshots in this region
1414
+ paginator = regional_client.get_paginator('describe_db_snapshots')
1415
+ page_iterator = paginator.paginate(SnapshotType='manual')
1416
+
1417
+ for page in page_iterator:
1418
+ all_manual_snapshots.extend(page.get('DBSnapshots', []))
1419
+
1420
+ print_info(f"Found {len([s for s in all_manual_snapshots if 'region' not in s])} manual snapshots in {region}")
1421
+ except Exception as e:
1422
+ print_warning(f"Could not access region {region}: {e}")
1353
1423
 
1354
1424
  progress.update(task, advance=40)
1355
1425
 
@@ -1363,7 +1433,86 @@ class CostOptimizer(CloudOpsBase):
1363
1433
 
1364
1434
  progress.update(task, advance=70)
1365
1435
 
1366
- # Step 3: Calculate estimated savings
1436
+ # Step 3: Use enhanced RDS snapshot optimizer for consistent results
1437
+ try:
1438
+ from runbooks.finops.rds_snapshot_optimizer import EnhancedRDSSnapshotOptimizer
1439
+
1440
+ print_info("🔧 Using enhanced RDS snapshot optimization logic...")
1441
+ enhanced_optimizer = EnhancedRDSSnapshotOptimizer(profile=self.profile, dry_run=dry_run)
1442
+
1443
+ if enhanced_optimizer.initialize_session():
1444
+ # Discover all snapshots (not just manual)
1445
+ all_snapshots = enhanced_optimizer.discover_snapshots_via_config_aggregator()
1446
+
1447
+ if all_snapshots:
1448
+ # Run enhanced optimization analysis
1449
+ optimization_results = enhanced_optimizer.analyze_optimization_opportunities(
1450
+ all_snapshots, age_threshold=snapshot_age_threshold_days
1451
+ )
1452
+
1453
+ # Use comprehensive scenario for realistic savings
1454
+ comprehensive_scenario = optimization_results['optimization_scenarios']['comprehensive']
1455
+
1456
+ # Create resource impacts for comprehensive scenario
1457
+ resource_impacts = []
1458
+ for snapshot in comprehensive_scenario['snapshots']:
1459
+ resource_impacts.append(
1460
+ ResourceImpact(
1461
+ resource_type="rds-snapshot",
1462
+ resource_id=snapshot.get('DBSnapshotIdentifier', 'unknown'),
1463
+ region=snapshot.get('Region', 'unknown'),
1464
+ account_id=snapshot.get('AccountId', 'unknown'),
1465
+ estimated_monthly_cost=snapshot.get('EstimatedMonthlyCost', 0.0),
1466
+ projected_savings=snapshot.get('EstimatedMonthlyCost', 0.0),
1467
+ risk_level=RiskLevel.MEDIUM,
1468
+ modification_required=True,
1469
+ resource_name=f"RDS Snapshot {snapshot.get('DBSnapshotIdentifier', 'unknown')}",
1470
+ estimated_downtime=0.0
1471
+ )
1472
+ )
1473
+
1474
+ progress.update(task, advance=100)
1475
+
1476
+ return CostOptimizationResult(
1477
+ scenario=BusinessScenario.COST_OPTIMIZATION,
1478
+ scenario_name=operation_name,
1479
+ execution_timestamp=datetime.now(),
1480
+ execution_mode=self.execution_mode,
1481
+ execution_time=30.0,
1482
+ success=True,
1483
+ error_message=None,
1484
+ resources_analyzed=optimization_results['total_snapshots'],
1485
+ resources_impacted=resource_impacts,
1486
+ business_metrics=self.create_business_metrics(
1487
+ total_savings=optimization_results['potential_monthly_savings'],
1488
+ overall_risk=RiskLevel.MEDIUM
1489
+ ),
1490
+ recommendations=[
1491
+ f"Review {optimization_results['cleanup_candidates']} snapshots older than {snapshot_age_threshold_days} days",
1492
+ f"Potential annual savings: ${optimization_results['potential_annual_savings']:,.2f}",
1493
+ "Consider implementing automated retention policies",
1494
+ "Review backup requirements before deletion"
1495
+ ],
1496
+ # CostOptimizationResult specific fields
1497
+ current_monthly_spend=optimization_results.get('current_monthly_spend', 0.0),
1498
+ optimized_monthly_spend=optimization_results.get('current_monthly_spend', 0.0) - optimization_results['potential_monthly_savings'],
1499
+ savings_percentage=(optimization_results['potential_monthly_savings'] / max(optimization_results.get('current_monthly_spend', 1), 1)) * 100,
1500
+ annual_savings=optimization_results['potential_annual_savings'],
1501
+ total_monthly_savings=optimization_results['potential_monthly_savings'],
1502
+ affected_resources=optimization_results['cleanup_candidates'],
1503
+ resource_impacts=resource_impacts
1504
+ )
1505
+ else:
1506
+ print_warning("No snapshots discovered via enhanced optimizer")
1507
+
1508
+ except ImportError as e:
1509
+ print_warning(f"Enhanced optimizer not available, using legacy logic: {e}")
1510
+ except Exception as e:
1511
+ print_warning(f"Enhanced optimizer failed, using legacy logic: {e}")
1512
+
1513
+ # Fallback to legacy calculation for compatibility
1514
+ print_info("Using legacy optimization calculation...")
1515
+ # Step 3: Calculate estimated savings (legacy)
1367
1516
  # Based on JIRA data: $5K-24K range for manual snapshots
1368
1517
  total_size_gb = sum(snapshot.get('AllocatedStorage', 0) for snapshot in old_snapshots)
1369
1518
  estimated_monthly_savings = total_size_gb * 0.05 # ~$0.05/GB-month for snapshots
@@ -1399,8 +1548,11 @@ class CostOptimizer(CloudOpsBase):
1399
1548
  ResourceImpact(
1400
1549
  resource_id=f"rds-snapshots-cleanup-{len(old_snapshots)}",
1401
1550
  resource_type="AWS::RDS::DBSnapshot",
1402
- action="delete",
1403
- monthly_savings=estimated_monthly_savings,
1551
+ resource_name=f"RDS Manual Snapshots Cleanup ({len(old_snapshots)} snapshots)",
1552
+ region=self.region,
1553
+ account_id=self.account_id,
1554
+ estimated_monthly_cost=estimated_monthly_savings,
1555
+ projected_savings=estimated_monthly_savings,
1404
1556
  risk_level=RiskLevel.MEDIUM
1405
1557
  )
1406
1558
  ],
@@ -164,16 +164,22 @@ class CloudOpsExecutionResult(BaseModel):
164
164
 
165
165
  class CostOptimizationResult(CloudOpsExecutionResult):
166
166
  """Specialized result for cost optimization scenarios."""
167
-
167
+
168
168
  # Cost-Specific Metrics
169
169
  current_monthly_spend: float = Field(description="Current monthly spend for analyzed resources")
170
170
  optimized_monthly_spend: float = Field(description="Projected monthly spend after optimization")
171
171
  savings_percentage: float = Field(ge=0, le=100, description="Savings percentage")
172
-
172
+ annual_savings: float = Field(description="Annual savings projection for business scenarios", default=0.0)
173
+ total_monthly_savings: float = Field(description="Total projected monthly savings", default=0.0)
174
+
173
175
  # Resource Categories
174
176
  idle_resources: List[ResourceImpact] = Field(description="Identified idle resources", default=[])
175
177
  oversized_resources: List[ResourceImpact] = Field(description="Identified oversized resources", default=[])
176
178
  unattached_resources: List[ResourceImpact] = Field(description="Identified unattached resources", default=[])
179
+
180
+ # Additional fields used by cost_optimizer.py
181
+ affected_resources: int = Field(description="Number of resources affected by optimization", default=0)
182
+ resource_impacts: List[ResourceImpact] = Field(description="Detailed resource impact analysis", default=[])
177
183
 
178
184
  @field_validator('optimized_monthly_spend')
179
185
  @classmethod
@@ -771,6 +771,18 @@ class DynamicAWSPricing:
771
771
  ]
772
772
  }
773
773
 
774
+ # Handle data_transfer service with graceful fallback
775
+ if service_key == "data_transfer":
776
+ print_warning("data_transfer service not supported by AWS Pricing API - using standard rates")
777
+ # Return standard AWS data transfer pricing structure
778
+ return AWSPricingResult(
779
+ service_key="data_transfer",
780
+ region=region,
781
+ monthly_cost=0.045, # $0.045/GB for NAT Gateway data processing
782
+ pricing_source="aws_standard_rates",
783
+ last_updated=datetime.now()
784
+ )
785
+
774
786
  if service_key not in service_mapping:
775
787
  raise ValueError(f"Service {service_key} not supported by AWS Pricing API integration")
776
788
 
@@ -189,7 +189,7 @@ class UniversalBusinessLogic:
189
189
  # Apply proven profile management patterns
190
190
  selected_profile = get_profile_for_operation("operational", profile)
191
191
 
192
- print_header(f"{resource_type.title()} {operation.title()}", f"v1.0.0 - {self.module_name}")
192
+ print_header(f"{resource_type.title()} {operation.title()}", f"v1.1.2 - {self.module_name}")
193
193
  print_info(f"Using profile: {selected_profile}")
194
194
 
195
195
  # Standard operation tracking