runbooks 1.0.3__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +10 -5
- runbooks/__init__.py.backup +134 -0
- runbooks/__init___optimized.py +110 -0
- runbooks/cloudops/base.py +56 -3
- runbooks/cloudops/cost_optimizer.py +496 -42
- runbooks/common/aws_pricing.py +236 -80
- runbooks/common/business_logic.py +485 -0
- runbooks/common/cli_decorators.py +219 -0
- runbooks/common/error_handling.py +424 -0
- runbooks/common/lazy_loader.py +186 -0
- runbooks/common/module_cli_base.py +378 -0
- runbooks/common/performance_monitoring.py +512 -0
- runbooks/common/profile_utils.py +133 -6
- runbooks/enterprise/logging.py +30 -2
- runbooks/enterprise/validation.py +177 -0
- runbooks/finops/README.md +311 -236
- runbooks/finops/aws_client.py +1 -1
- runbooks/finops/business_case_config.py +723 -19
- runbooks/finops/cli.py +136 -0
- runbooks/finops/commvault_ec2_analysis.py +25 -9
- runbooks/finops/config.py +272 -0
- runbooks/finops/dashboard_runner.py +136 -23
- runbooks/finops/ebs_cost_optimizer.py +39 -40
- runbooks/finops/enhanced_trend_visualization.py +7 -2
- runbooks/finops/enterprise_wrappers.py +45 -18
- runbooks/finops/finops_dashboard.py +50 -25
- runbooks/finops/finops_scenarios.py +22 -7
- runbooks/finops/helpers.py +115 -2
- runbooks/finops/multi_dashboard.py +7 -5
- runbooks/finops/optimizer.py +97 -6
- runbooks/finops/scenario_cli_integration.py +247 -0
- runbooks/finops/scenarios.py +12 -1
- runbooks/finops/unlimited_scenarios.py +393 -0
- runbooks/finops/validation_framework.py +19 -7
- runbooks/finops/workspaces_analyzer.py +1 -5
- runbooks/inventory/mcp_inventory_validator.py +2 -1
- runbooks/main.py +132 -94
- runbooks/main_final.py +358 -0
- runbooks/main_minimal.py +84 -0
- runbooks/main_optimized.py +493 -0
- runbooks/main_ultra_minimal.py +47 -0
- runbooks/utils/version_validator.py +1 -1
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/METADATA +1 -1
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/RECORD +48 -32
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/WHEEL +0 -0
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/entry_points.txt +0 -0
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.0.3.dist-info → runbooks-1.1.1.dist-info}/top_level.txt +0 -0
@@ -85,8 +85,61 @@ class CostOptimizer(CloudOpsBase):
|
|
85
85
|
|
86
86
|
if dry_run:
|
87
87
|
print_warning("🛡️ DRY RUN MODE: No resources will be modified")
|
88
|
-
|
89
|
-
|
88
|
+
|
89
|
+
# Performance tracking
|
90
|
+
self.operation_start_time = time.time()
|
91
|
+
|
92
|
+
def _measure_execution_time(self) -> float:
|
93
|
+
"""
|
94
|
+
Measure actual execution time from operation start.
|
95
|
+
|
96
|
+
Returns:
|
97
|
+
Execution time in seconds
|
98
|
+
"""
|
99
|
+
if hasattr(self, 'operation_start_time'):
|
100
|
+
return time.time() - self.operation_start_time
|
101
|
+
else:
|
102
|
+
# Fallback if start time not tracked
|
103
|
+
import time
|
104
|
+
return time.time() - time.time() # Returns ~0.0
|
105
|
+
|
106
|
+
def _suggest_smaller_instance_type(self, instance_type: str) -> Optional[str]:
|
107
|
+
"""
|
108
|
+
Suggest a smaller instance type for rightsizing.
|
109
|
+
|
110
|
+
Args:
|
111
|
+
instance_type: Current EC2 instance type
|
112
|
+
|
113
|
+
Returns:
|
114
|
+
Suggested smaller instance type or None
|
115
|
+
"""
|
116
|
+
# Simple rightsizing mapping - can be enhanced with CloudWatch metrics
|
117
|
+
rightsizing_map = {
|
118
|
+
# T3 family
|
119
|
+
't3.large': 't3.medium',
|
120
|
+
't3.xlarge': 't3.large',
|
121
|
+
't3.2xlarge': 't3.xlarge',
|
122
|
+
|
123
|
+
# M5 family
|
124
|
+
'm5.large': 'm5.medium',
|
125
|
+
'm5.xlarge': 'm5.large',
|
126
|
+
'm5.2xlarge': 'm5.xlarge',
|
127
|
+
'm5.4xlarge': 'm5.2xlarge',
|
128
|
+
|
129
|
+
# C5 family
|
130
|
+
'c5.large': 'c5.medium',
|
131
|
+
'c5.xlarge': 'c5.large',
|
132
|
+
'c5.2xlarge': 'c5.xlarge',
|
133
|
+
'c5.4xlarge': 'c5.2xlarge',
|
134
|
+
|
135
|
+
# R5 family
|
136
|
+
'r5.large': 'r5.medium',
|
137
|
+
'r5.xlarge': 'r5.large',
|
138
|
+
'r5.2xlarge': 'r5.xlarge',
|
139
|
+
}
|
140
|
+
|
141
|
+
return rightsizing_map.get(instance_type)
|
142
|
+
|
90
143
|
async def discover_infrastructure(
|
91
144
|
self,
|
92
145
|
regions: Optional[List[str]] = None,
|
@@ -383,54 +436,396 @@ class CostOptimizer(CloudOpsBase):
|
|
383
436
|
"""Analyze EC2 instances for rightsizing opportunities."""
|
384
437
|
print_info("🔍 Analyzing EC2 rightsizing opportunities...")
|
385
438
|
|
386
|
-
#
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
439
|
+
# Real AWS integration for rightsizing analysis
|
440
|
+
from runbooks.common.aws_pricing import get_aws_pricing_engine, get_ec2_monthly_cost
|
441
|
+
|
442
|
+
try:
|
443
|
+
pricing_engine = get_aws_pricing_engine(profile=self.profile)
|
444
|
+
|
445
|
+
# Get actual EC2 instances from AWS API
|
446
|
+
ec2_client = self.session.client('ec2')
|
447
|
+
response = ec2_client.describe_instances()
|
448
|
+
|
449
|
+
instances_analyzed = 0
|
450
|
+
oversized_instances = 0
|
451
|
+
potential_monthly_savings = 0.0
|
452
|
+
|
453
|
+
for reservation in response['Reservations']:
|
454
|
+
for instance in reservation['Instances']:
|
455
|
+
if instance['State']['Name'] in ['running', 'stopped']:
|
456
|
+
instances_analyzed += 1
|
457
|
+
instance_type = instance['InstanceType']
|
458
|
+
|
459
|
+
# Calculate potential savings from rightsizing
|
460
|
+
current_cost = get_ec2_monthly_cost(instance_type, self.region, self.profile)
|
461
|
+
|
462
|
+
# Simple rightsizing heuristic - suggest one size smaller if available
|
463
|
+
smaller_instance = self._suggest_smaller_instance_type(instance_type)
|
464
|
+
if smaller_instance:
|
465
|
+
smaller_cost = get_ec2_monthly_cost(smaller_instance, self.region, self.profile)
|
466
|
+
if smaller_cost < current_cost:
|
467
|
+
oversized_instances += 1
|
468
|
+
potential_monthly_savings += (current_cost - smaller_cost)
|
469
|
+
|
470
|
+
return {
|
471
|
+
'instances_analyzed': instances_analyzed,
|
472
|
+
'oversized_instances': oversized_instances,
|
473
|
+
'potential_savings': round(potential_monthly_savings, 2),
|
474
|
+
'resources_analyzed': instances_analyzed,
|
475
|
+
'resource_impacts': []
|
476
|
+
}
|
477
|
+
|
478
|
+
except Exception as e:
|
479
|
+
print_warning(f"Could not get real EC2 data: {e}")
|
480
|
+
# Return minimal fallback
|
481
|
+
return {
|
482
|
+
'instances_analyzed': 0,
|
483
|
+
'oversized_instances': 0,
|
484
|
+
'potential_savings': 0.0,
|
485
|
+
'resources_analyzed': 0,
|
486
|
+
'resource_impacts': []
|
487
|
+
}
|
394
488
|
|
395
489
|
async def analyze_ebs_optimization(self) -> Dict[str, Any]:
|
396
490
|
"""Analyze EBS volumes for optimization opportunities."""
|
397
491
|
print_info("🔍 Analyzing EBS optimization opportunities...")
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
'
|
405
|
-
|
406
|
-
|
492
|
+
|
493
|
+
# Real AWS integration for EBS analysis
|
494
|
+
from runbooks.common.aws_pricing import get_ebs_gb_monthly_cost
|
495
|
+
|
496
|
+
try:
|
497
|
+
# Get actual EBS volumes from AWS API
|
498
|
+
ec2_client = self.session.client('ec2')
|
499
|
+
response = ec2_client.describe_volumes()
|
500
|
+
|
501
|
+
volumes_analyzed = len(response['Volumes'])
|
502
|
+
unattached_volumes = 0
|
503
|
+
oversized_volumes = 0
|
504
|
+
potential_monthly_savings = 0.0
|
505
|
+
|
506
|
+
for volume in response['Volumes']:
|
507
|
+
# Count unattached volumes
|
508
|
+
if volume['State'] == 'available':
|
509
|
+
unattached_volumes += 1
|
510
|
+
volume_size = volume['Size']
|
511
|
+
volume_type = volume.get('VolumeType', 'gp3')
|
512
|
+
cost_per_gb = get_ebs_gb_monthly_cost(volume_type, self.region, self.profile)
|
513
|
+
potential_monthly_savings += (volume_size * cost_per_gb)
|
514
|
+
|
515
|
+
# Identify potentially oversized volumes (basic heuristic)
|
516
|
+
elif volume['State'] == 'in-use' and volume['Size'] > 100:
|
517
|
+
oversized_volumes += 1
|
518
|
+
|
519
|
+
return {
|
520
|
+
'volumes_analyzed': volumes_analyzed,
|
521
|
+
'unattached_volumes': unattached_volumes,
|
522
|
+
'oversized_volumes': oversized_volumes,
|
523
|
+
'potential_savings': round(potential_monthly_savings, 2),
|
524
|
+
'resources_analyzed': volumes_analyzed,
|
525
|
+
'resource_impacts': []
|
526
|
+
}
|
527
|
+
|
528
|
+
except Exception as e:
|
529
|
+
print_warning(f"Could not get real EBS data: {e}")
|
530
|
+
return {
|
531
|
+
'volumes_analyzed': 0,
|
532
|
+
'unattached_volumes': 0,
|
533
|
+
'oversized_volumes': 0,
|
534
|
+
'potential_savings': 0.0,
|
535
|
+
'resources_analyzed': 0,
|
536
|
+
'resource_impacts': []
|
537
|
+
}
|
407
538
|
|
408
539
|
async def analyze_unused_resources(self) -> Dict[str, Any]:
|
409
540
|
"""Analyze and identify unused AWS resources."""
|
410
541
|
print_info("🔍 Analyzing unused resources...")
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
'
|
417
|
-
|
418
|
-
|
419
|
-
|
542
|
+
|
543
|
+
# Real AWS integration for unused resources analysis
|
544
|
+
from runbooks.common.aws_pricing import get_eip_monthly_cost, get_ebs_gb_monthly_cost
|
545
|
+
|
546
|
+
try:
|
547
|
+
ec2_client = self.session.client('ec2')
|
548
|
+
|
549
|
+
# Analyze unused Elastic IPs
|
550
|
+
eips_response = ec2_client.describe_addresses()
|
551
|
+
eip_unused = len([eip for eip in eips_response['Addresses'] if 'AssociationId' not in eip])
|
552
|
+
|
553
|
+
# Analyze unattached volumes (already calculated in EBS optimization)
|
554
|
+
volumes_response = ec2_client.describe_volumes()
|
555
|
+
volumes_unattached = len([vol for vol in volumes_response['Volumes'] if vol['State'] == 'available'])
|
556
|
+
|
557
|
+
# Analyze old snapshots (older than 30 days)
|
558
|
+
from datetime import datetime, timedelta
|
559
|
+
cutoff_date = datetime.now() - timedelta(days=30)
|
560
|
+
snapshots_response = ec2_client.describe_snapshots(OwnerIds=['self'])
|
561
|
+
snapshots_old = len([
|
562
|
+
snap for snap in snapshots_response['Snapshots']
|
563
|
+
if datetime.fromisoformat(snap['StartTime'].replace('Z', '+00:00')).replace(tzinfo=None) < cutoff_date
|
564
|
+
])
|
565
|
+
|
566
|
+
# Calculate potential savings
|
567
|
+
eip_monthly_cost = get_eip_monthly_cost(self.region, self.profile)
|
568
|
+
potential_eip_savings = eip_unused * eip_monthly_cost
|
569
|
+
|
570
|
+
# Estimate EBS snapshot costs (minimal but accumulated)
|
571
|
+
ebs_cost_per_gb = get_ebs_gb_monthly_cost('gp3', self.region, self.profile)
|
572
|
+
estimated_snapshot_savings = snapshots_old * 5.0 * ebs_cost_per_gb # Assume 5GB average per snapshot
|
573
|
+
|
574
|
+
total_potential_savings = potential_eip_savings + estimated_snapshot_savings
|
575
|
+
|
576
|
+
return {
|
577
|
+
'eip_unused': eip_unused,
|
578
|
+
'volumes_unattached': volumes_unattached,
|
579
|
+
'snapshots_old': snapshots_old,
|
580
|
+
'potential_savings': round(total_potential_savings, 2),
|
581
|
+
'resources_analyzed': eip_unused + volumes_unattached + snapshots_old,
|
582
|
+
'resource_impacts': []
|
583
|
+
}
|
584
|
+
|
585
|
+
except Exception as e:
|
586
|
+
print_warning(f"Could not get real unused resources data: {e}")
|
587
|
+
return {
|
588
|
+
'eip_unused': 0,
|
589
|
+
'volumes_unattached': 0,
|
590
|
+
'snapshots_old': 0,
|
591
|
+
'potential_savings': 0.0,
|
592
|
+
'resources_analyzed': 0,
|
593
|
+
'resource_impacts': []
|
594
|
+
}
|
420
595
|
|
421
596
|
async def analyze_s3_optimization(self) -> Dict[str, Any]:
|
422
|
-
"""Analyze S3 buckets for storage class optimization."""
|
597
|
+
"""Analyze S3 buckets for storage class optimization using real AWS data."""
|
423
598
|
print_info("🔍 Analyzing S3 optimization opportunities...")
|
424
|
-
|
599
|
+
|
600
|
+
buckets_analyzed = 0
|
601
|
+
lifecycle_opportunities = 0
|
602
|
+
storage_class_optimization = 0
|
603
|
+
potential_savings = 0.0
|
604
|
+
resource_impacts = []
|
605
|
+
|
606
|
+
try:
|
607
|
+
s3_client = self.session.client('s3')
|
608
|
+
|
609
|
+
# Get all S3 buckets
|
610
|
+
response = s3_client.list_buckets()
|
611
|
+
all_buckets = response.get('Buckets', [])
|
612
|
+
buckets_analyzed = len(all_buckets)
|
613
|
+
|
614
|
+
print_info(f"Found {buckets_analyzed} S3 buckets for analysis")
|
615
|
+
|
616
|
+
# Analyze each bucket for optimization opportunities
|
617
|
+
with create_progress_bar() as progress:
|
618
|
+
task = progress.add_task("[cyan]Analyzing S3 buckets...", total=len(all_buckets))
|
619
|
+
|
620
|
+
for bucket in all_buckets:
|
621
|
+
bucket_name = bucket['Name']
|
622
|
+
|
623
|
+
try:
|
624
|
+
# Check bucket region to create regional client
|
625
|
+
bucket_region = await self._get_bucket_region(s3_client, bucket_name)
|
626
|
+
regional_s3 = self.session.client('s3', region_name=bucket_region)
|
627
|
+
|
628
|
+
# Analyze lifecycle configuration
|
629
|
+
lifecycle_needed = await self._analyze_bucket_lifecycle(regional_s3, bucket_name)
|
630
|
+
if lifecycle_needed:
|
631
|
+
lifecycle_opportunities += 1
|
632
|
+
|
633
|
+
# Analyze storage class optimization
|
634
|
+
storage_optimization = await self._analyze_bucket_storage_classes(regional_s3, bucket_name)
|
635
|
+
if storage_optimization['has_optimization_opportunity']:
|
636
|
+
storage_class_optimization += 1
|
637
|
+
potential_savings += storage_optimization['estimated_monthly_savings']
|
638
|
+
|
639
|
+
# Create resource impact for this bucket
|
640
|
+
resource_impacts.append(
|
641
|
+
ResourceImpact(
|
642
|
+
resource_type="s3-bucket",
|
643
|
+
resource_id=bucket_name,
|
644
|
+
region=bucket_region,
|
645
|
+
account_id=self.account_id,
|
646
|
+
estimated_monthly_cost=storage_optimization['current_cost'],
|
647
|
+
projected_savings=storage_optimization['estimated_monthly_savings'],
|
648
|
+
risk_level=RiskLevel.LOW,
|
649
|
+
modification_required=True,
|
650
|
+
resource_name=f"S3 Bucket {bucket_name}",
|
651
|
+
estimated_downtime=0.0
|
652
|
+
)
|
653
|
+
)
|
654
|
+
|
655
|
+
progress.advance(task)
|
656
|
+
|
657
|
+
except Exception as e:
|
658
|
+
print_warning(f"Could not analyze bucket {bucket_name}: {str(e)}")
|
659
|
+
progress.advance(task)
|
660
|
+
continue
|
661
|
+
|
662
|
+
print_success(f"S3 Analysis Complete:")
|
663
|
+
print_success(f" • Buckets analyzed: {buckets_analyzed}")
|
664
|
+
print_success(f" • Lifecycle opportunities: {lifecycle_opportunities}")
|
665
|
+
print_success(f" • Storage class optimizations: {storage_class_optimization}")
|
666
|
+
print_success(f" • Potential monthly savings: {format_cost(potential_savings)}")
|
667
|
+
|
668
|
+
except Exception as e:
|
669
|
+
print_error(f"S3 analysis failed: {str(e)}")
|
670
|
+
# Return zero values if analysis fails, but don't use hardcoded success data
|
671
|
+
buckets_analyzed = 0
|
672
|
+
lifecycle_opportunities = 0
|
673
|
+
storage_class_optimization = 0
|
674
|
+
potential_savings = 0.0
|
675
|
+
|
425
676
|
return {
|
426
|
-
'buckets_analyzed':
|
427
|
-
'lifecycle_opportunities':
|
428
|
-
'storage_class_optimization':
|
429
|
-
'potential_savings':
|
430
|
-
'resources_analyzed':
|
431
|
-
'resource_impacts':
|
677
|
+
'buckets_analyzed': buckets_analyzed,
|
678
|
+
'lifecycle_opportunities': lifecycle_opportunities,
|
679
|
+
'storage_class_optimization': storage_class_optimization,
|
680
|
+
'potential_savings': potential_savings,
|
681
|
+
'resources_analyzed': buckets_analyzed,
|
682
|
+
'resource_impacts': resource_impacts
|
432
683
|
}
|
433
684
|
|
685
|
+
async def _get_bucket_region(self, s3_client, bucket_name: str) -> str:
|
686
|
+
"""Get the region for a specific S3 bucket."""
|
687
|
+
try:
|
688
|
+
response = s3_client.get_bucket_location(Bucket=bucket_name)
|
689
|
+
region = response.get('LocationConstraint')
|
690
|
+
|
691
|
+
# Handle special case for US East 1
|
692
|
+
if region is None:
|
693
|
+
return 'us-east-1'
|
694
|
+
|
695
|
+
return region
|
696
|
+
|
697
|
+
except Exception as e:
|
698
|
+
print_warning(f"Could not determine region for bucket {bucket_name}: {str(e)}")
|
699
|
+
return 'us-east-1' # Default fallback
|
700
|
+
|
701
|
+
async def _analyze_bucket_lifecycle(self, s3_client, bucket_name: str) -> bool:
|
702
|
+
"""
|
703
|
+
Analyze if a bucket would benefit from lifecycle policies.
|
704
|
+
|
705
|
+
Returns True if lifecycle policies would provide cost savings.
|
706
|
+
"""
|
707
|
+
try:
|
708
|
+
# Check if lifecycle configuration already exists
|
709
|
+
try:
|
710
|
+
s3_client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
|
711
|
+
# If lifecycle exists, assume it's already optimized
|
712
|
+
return False
|
713
|
+
except ClientError as e:
|
714
|
+
if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
|
715
|
+
# No lifecycle policy exists - could benefit from one
|
716
|
+
pass
|
717
|
+
else:
|
718
|
+
# Other error, skip this bucket
|
719
|
+
return False
|
720
|
+
|
721
|
+
# Check bucket size and object count to determine if lifecycle is beneficial
|
722
|
+
try:
|
723
|
+
paginator = s3_client.get_paginator('list_objects_v2')
|
724
|
+
page_iterator = paginator.paginate(Bucket=bucket_name, PaginationConfig={'MaxItems': 100})
|
725
|
+
|
726
|
+
object_count = 0
|
727
|
+
total_size = 0
|
728
|
+
|
729
|
+
for page in page_iterator:
|
730
|
+
if 'Contents' in page:
|
731
|
+
object_count += len(page['Contents'])
|
732
|
+
total_size += sum(obj.get('Size', 0) for obj in page['Contents'])
|
733
|
+
|
734
|
+
# Recommend lifecycle if bucket has significant content
|
735
|
+
# and could benefit from automatic transitions
|
736
|
+
if object_count > 50 and total_size > 1024 * 1024 * 100: # >100MB
|
737
|
+
return True
|
738
|
+
|
739
|
+
except Exception:
|
740
|
+
# If we can't analyze objects, be conservative
|
741
|
+
pass
|
742
|
+
|
743
|
+
return False
|
744
|
+
|
745
|
+
except Exception:
|
746
|
+
return False
|
747
|
+
|
748
|
+
async def _analyze_bucket_storage_classes(self, s3_client, bucket_name: str) -> Dict[str, Any]:
|
749
|
+
"""
|
750
|
+
Analyze bucket storage classes for optimization opportunities.
|
751
|
+
|
752
|
+
Returns analysis results with optimization opportunities and cost estimates.
|
753
|
+
"""
|
754
|
+
try:
|
755
|
+
# Get storage class analytics if available
|
756
|
+
paginator = s3_client.get_paginator('list_objects_v2')
|
757
|
+
page_iterator = paginator.paginate(Bucket=bucket_name, PaginationConfig={'MaxItems': 1000})
|
758
|
+
|
759
|
+
storage_analysis = {
|
760
|
+
'standard_objects': 0,
|
761
|
+
'standard_size': 0,
|
762
|
+
'infrequent_access_candidates': 0,
|
763
|
+
'archive_candidates': 0,
|
764
|
+
'current_cost': 0.0,
|
765
|
+
'optimized_cost': 0.0,
|
766
|
+
'has_optimization_opportunity': False,
|
767
|
+
'estimated_monthly_savings': 0.0
|
768
|
+
}
|
769
|
+
|
770
|
+
current_time = datetime.now()
|
771
|
+
|
772
|
+
for page in page_iterator:
|
773
|
+
if 'Contents' not in page:
|
774
|
+
continue
|
775
|
+
|
776
|
+
for obj in page['Contents']:
|
777
|
+
size_gb = obj.get('Size', 0) / (1024 * 1024 * 1024) # Convert to GB
|
778
|
+
last_modified = obj.get('LastModified', current_time)
|
779
|
+
|
780
|
+
# Calculate age of object
|
781
|
+
if hasattr(last_modified, 'replace'):
|
782
|
+
age_days = (current_time - last_modified.replace(tzinfo=None)).days
|
783
|
+
else:
|
784
|
+
age_days = 0
|
785
|
+
|
786
|
+
storage_class = obj.get('StorageClass', 'STANDARD')
|
787
|
+
|
788
|
+
# Analyze optimization opportunities
|
789
|
+
if storage_class == 'STANDARD':
|
790
|
+
storage_analysis['standard_objects'] += 1
|
791
|
+
storage_analysis['standard_size'] += size_gb
|
792
|
+
|
793
|
+
# Current cost (Standard storage ~$0.023/GB/month)
|
794
|
+
standard_cost = size_gb * 0.023
|
795
|
+
storage_analysis['current_cost'] += standard_cost
|
796
|
+
|
797
|
+
# Check if object could be moved to cheaper storage class
|
798
|
+
if age_days > 30 and size_gb > 0.1: # Objects older than 30 days and >100MB
|
799
|
+
storage_analysis['infrequent_access_candidates'] += 1
|
800
|
+
# IA storage ~$0.0125/GB/month
|
801
|
+
ia_cost = size_gb * 0.0125
|
802
|
+
storage_analysis['optimized_cost'] += ia_cost
|
803
|
+
elif age_days > 90 and size_gb > 0.05: # Objects older than 90 days
|
804
|
+
storage_analysis['archive_candidates'] += 1
|
805
|
+
# Glacier ~$0.004/GB/month
|
806
|
+
glacier_cost = size_gb * 0.004
|
807
|
+
storage_analysis['optimized_cost'] += glacier_cost
|
808
|
+
else:
|
809
|
+
# No optimization for this object
|
810
|
+
storage_analysis['optimized_cost'] += standard_cost
|
811
|
+
|
812
|
+
# Calculate potential savings
|
813
|
+
potential_savings = storage_analysis['current_cost'] - storage_analysis['optimized_cost']
|
814
|
+
|
815
|
+
if potential_savings > 1.0: # Minimum $1/month savings to be worth it
|
816
|
+
storage_analysis['has_optimization_opportunity'] = True
|
817
|
+
storage_analysis['estimated_monthly_savings'] = potential_savings
|
818
|
+
|
819
|
+
return storage_analysis
|
820
|
+
|
821
|
+
except Exception as e:
|
822
|
+
print_warning(f"Could not analyze storage classes for {bucket_name}: {str(e)}")
|
823
|
+
return {
|
824
|
+
'has_optimization_opportunity': False,
|
825
|
+
'estimated_monthly_savings': 0.0,
|
826
|
+
'current_cost': 0.0
|
827
|
+
}
|
828
|
+
|
434
829
|
async def optimize_nat_gateways(
|
435
830
|
self,
|
436
831
|
regions: Optional[List[str]] = None,
|
@@ -836,7 +1231,20 @@ class CostOptimizer(CloudOpsBase):
|
|
836
1231
|
execution_timestamp=datetime.now(),
|
837
1232
|
execution_mode=self.execution_mode,
|
838
1233
|
success=False,
|
839
|
-
error_message="WorkSpaces analyzer module not found"
|
1234
|
+
error_message="WorkSpaces analyzer module not found",
|
1235
|
+
# Add required fields to prevent Pydantic validation errors
|
1236
|
+
execution_time=0.0,
|
1237
|
+
resources_analyzed=0,
|
1238
|
+
resources_impacted=[], # Must be a list, not an integer
|
1239
|
+
business_metrics={
|
1240
|
+
"total_monthly_savings": 0.0,
|
1241
|
+
"overall_risk_level": "low"
|
1242
|
+
},
|
1243
|
+
recommendations=[],
|
1244
|
+
aws_profile_used=self.profile or "default",
|
1245
|
+
current_monthly_spend=0.0,
|
1246
|
+
optimized_monthly_spend=0.0,
|
1247
|
+
savings_percentage=0.0
|
840
1248
|
)
|
841
1249
|
|
842
1250
|
with create_progress_bar() as progress:
|
@@ -891,7 +1299,18 @@ class CostOptimizer(CloudOpsBase):
|
|
891
1299
|
monthly_savings=estimated_savings,
|
892
1300
|
risk_level=RiskLevel.LOW
|
893
1301
|
)
|
894
|
-
]
|
1302
|
+
],
|
1303
|
+
# Add missing required fields
|
1304
|
+
resources_analyzed=len(unused_workspaces),
|
1305
|
+
resources_impacted=[], # Must be a list
|
1306
|
+
business_metrics={
|
1307
|
+
"total_monthly_savings": estimated_savings,
|
1308
|
+
"overall_risk_level": "low"
|
1309
|
+
},
|
1310
|
+
recommendations=[],
|
1311
|
+
aws_profile_used=self.profile or "default",
|
1312
|
+
current_monthly_spend=0.0,
|
1313
|
+
optimized_monthly_spend=0.0
|
895
1314
|
)
|
896
1315
|
|
897
1316
|
async def optimize_rds_snapshots(
|
@@ -984,7 +1403,18 @@ class CostOptimizer(CloudOpsBase):
|
|
984
1403
|
monthly_savings=estimated_monthly_savings,
|
985
1404
|
risk_level=RiskLevel.MEDIUM
|
986
1405
|
)
|
987
|
-
]
|
1406
|
+
],
|
1407
|
+
# Add missing required fields
|
1408
|
+
resources_analyzed=len(all_manual_snapshots),
|
1409
|
+
resources_impacted=[], # Must be a list
|
1410
|
+
business_metrics={
|
1411
|
+
"total_monthly_savings": estimated_monthly_savings,
|
1412
|
+
"overall_risk_level": "medium"
|
1413
|
+
},
|
1414
|
+
recommendations=[],
|
1415
|
+
aws_profile_used=self.profile or "default",
|
1416
|
+
current_monthly_spend=0.0,
|
1417
|
+
optimized_monthly_spend=0.0
|
988
1418
|
)
|
989
1419
|
|
990
1420
|
async def investigate_commvault_ec2(
|
@@ -1039,7 +1469,20 @@ class CostOptimizer(CloudOpsBase):
|
|
1039
1469
|
execution_timestamp=datetime.now(),
|
1040
1470
|
execution_mode=self.execution_mode,
|
1041
1471
|
success=False,
|
1042
|
-
error_message=f"Cross-account access required for {account_id}"
|
1472
|
+
error_message=f"Cross-account access required for {account_id}",
|
1473
|
+
# Add required fields to prevent Pydantic validation errors
|
1474
|
+
execution_time=0.0,
|
1475
|
+
resources_analyzed=0,
|
1476
|
+
resources_impacted=[], # Must be a list
|
1477
|
+
business_metrics={
|
1478
|
+
"total_monthly_savings": 0.0,
|
1479
|
+
"overall_risk_level": "high"
|
1480
|
+
},
|
1481
|
+
recommendations=[],
|
1482
|
+
aws_profile_used=self.profile or "default",
|
1483
|
+
current_monthly_spend=0.0,
|
1484
|
+
optimized_monthly_spend=0.0,
|
1485
|
+
savings_percentage=0.0
|
1043
1486
|
)
|
1044
1487
|
|
1045
1488
|
# Step 2: Analyze instance utilization patterns
|
@@ -1105,7 +1548,18 @@ class CostOptimizer(CloudOpsBase):
|
|
1105
1548
|
monthly_savings=potential_savings,
|
1106
1549
|
risk_level=RiskLevel.HIGH # High risk due to potential backup disruption
|
1107
1550
|
)
|
1108
|
-
]
|
1551
|
+
],
|
1552
|
+
# Add missing required fields
|
1553
|
+
resources_analyzed=len(commvault_instances),
|
1554
|
+
resources_impacted=[], # Must be a list
|
1555
|
+
business_metrics={
|
1556
|
+
"total_monthly_savings": potential_savings,
|
1557
|
+
"overall_risk_level": "high"
|
1558
|
+
},
|
1559
|
+
recommendations=[],
|
1560
|
+
aws_profile_used=self.profile or "default",
|
1561
|
+
current_monthly_spend=0.0,
|
1562
|
+
optimized_monthly_spend=0.0
|
1109
1563
|
)
|
1110
1564
|
|
1111
1565
|
async def _execute_workspaces_cleanup(self, unused_workspaces: List[dict]) -> None:
|
@@ -1177,7 +1631,7 @@ class CostOptimizer(CloudOpsBase):
|
|
1177
1631
|
scenario_name="Emergency Cost Spike Response",
|
1178
1632
|
execution_timestamp=datetime.now(),
|
1179
1633
|
execution_mode=self.execution_mode,
|
1180
|
-
execution_time=
|
1634
|
+
execution_time=self._measure_execution_time(), # Real measured execution time
|
1181
1635
|
success=True,
|
1182
1636
|
error_message=None, # Required field for CloudOpsExecutionResult base class
|
1183
1637
|
resources_analyzed=100, # Estimate for emergency scan
|