runbooks 0.7.9__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/cfat/README.md +12 -1
- runbooks/cfat/__init__.py +1 -1
- runbooks/cfat/assessment/compliance.py +4 -1
- runbooks/cfat/assessment/runner.py +42 -34
- runbooks/cfat/models.py +1 -1
- runbooks/cloudops/__init__.py +123 -0
- runbooks/cloudops/base.py +385 -0
- runbooks/cloudops/cost_optimizer.py +811 -0
- runbooks/cloudops/infrastructure_optimizer.py +29 -0
- runbooks/cloudops/interfaces.py +828 -0
- runbooks/cloudops/lifecycle_manager.py +29 -0
- runbooks/cloudops/mcp_cost_validation.py +678 -0
- runbooks/cloudops/models.py +251 -0
- runbooks/cloudops/monitoring_automation.py +29 -0
- runbooks/cloudops/notebook_framework.py +676 -0
- runbooks/cloudops/security_enforcer.py +449 -0
- runbooks/common/__init__.py +152 -0
- runbooks/common/accuracy_validator.py +1039 -0
- runbooks/common/context_logger.py +440 -0
- runbooks/common/cross_module_integration.py +594 -0
- runbooks/common/enhanced_exception_handler.py +1108 -0
- runbooks/common/enterprise_audit_integration.py +634 -0
- runbooks/common/mcp_cost_explorer_integration.py +900 -0
- runbooks/common/mcp_integration.py +548 -0
- runbooks/common/performance_monitor.py +387 -0
- runbooks/common/profile_utils.py +216 -0
- runbooks/common/rich_utils.py +172 -1
- runbooks/feedback/user_feedback_collector.py +440 -0
- runbooks/finops/README.md +377 -458
- runbooks/finops/__init__.py +4 -21
- runbooks/finops/account_resolver.py +279 -0
- runbooks/finops/accuracy_cross_validator.py +638 -0
- runbooks/finops/aws_client.py +721 -36
- runbooks/finops/budget_integration.py +313 -0
- runbooks/finops/cli.py +59 -5
- runbooks/finops/cost_optimizer.py +1340 -0
- runbooks/finops/cost_processor.py +211 -37
- runbooks/finops/dashboard_router.py +900 -0
- runbooks/finops/dashboard_runner.py +990 -232
- runbooks/finops/embedded_mcp_validator.py +288 -0
- runbooks/finops/enhanced_dashboard_runner.py +8 -7
- runbooks/finops/enhanced_progress.py +327 -0
- runbooks/finops/enhanced_trend_visualization.py +423 -0
- runbooks/finops/finops_dashboard.py +184 -1829
- runbooks/finops/helpers.py +509 -196
- runbooks/finops/iam_guidance.py +400 -0
- runbooks/finops/markdown_exporter.py +466 -0
- runbooks/finops/multi_dashboard.py +1502 -0
- runbooks/finops/optimizer.py +15 -15
- runbooks/finops/profile_processor.py +2 -2
- runbooks/finops/runbooks.inventory.organizations_discovery.log +0 -0
- runbooks/finops/runbooks.security.report_generator.log +0 -0
- runbooks/finops/runbooks.security.run_script.log +0 -0
- runbooks/finops/runbooks.security.security_export.log +0 -0
- runbooks/finops/schemas.py +589 -0
- runbooks/finops/service_mapping.py +195 -0
- runbooks/finops/single_dashboard.py +710 -0
- runbooks/finops/tests/test_reference_images_validation.py +1 -1
- runbooks/inventory/README.md +12 -1
- runbooks/inventory/core/collector.py +157 -29
- runbooks/inventory/list_ec2_instances.py +9 -6
- runbooks/inventory/list_ssm_parameters.py +10 -10
- runbooks/inventory/organizations_discovery.py +210 -164
- runbooks/inventory/rich_inventory_display.py +74 -107
- runbooks/inventory/run_on_multi_accounts.py +13 -13
- runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
- runbooks/inventory/runbooks.security.security_export.log +0 -0
- runbooks/main.py +1371 -240
- runbooks/metrics/dora_metrics_engine.py +711 -17
- runbooks/monitoring/performance_monitor.py +433 -0
- runbooks/operate/README.md +394 -0
- runbooks/operate/base.py +215 -47
- runbooks/operate/ec2_operations.py +435 -5
- runbooks/operate/iam_operations.py +598 -3
- runbooks/operate/privatelink_operations.py +1 -1
- runbooks/operate/rds_operations.py +508 -0
- runbooks/operate/s3_operations.py +508 -0
- runbooks/operate/vpc_endpoints.py +1 -1
- runbooks/remediation/README.md +489 -13
- runbooks/remediation/base.py +5 -3
- runbooks/remediation/commons.py +8 -4
- runbooks/security/ENTERPRISE_SECURITY_FRAMEWORK.md +506 -0
- runbooks/security/README.md +12 -1
- runbooks/security/__init__.py +265 -33
- runbooks/security/cloudops_automation_security_validator.py +1164 -0
- runbooks/security/compliance_automation.py +12 -10
- runbooks/security/compliance_automation_engine.py +1021 -0
- runbooks/security/enterprise_security_framework.py +930 -0
- runbooks/security/enterprise_security_policies.json +293 -0
- runbooks/security/executive_security_dashboard.py +1247 -0
- runbooks/security/integration_test_enterprise_security.py +879 -0
- runbooks/security/module_security_integrator.py +641 -0
- runbooks/security/multi_account_security_controls.py +2254 -0
- runbooks/security/real_time_security_monitor.py +1196 -0
- runbooks/security/report_generator.py +1 -1
- runbooks/security/run_script.py +4 -8
- runbooks/security/security_baseline_tester.py +39 -52
- runbooks/security/security_export.py +99 -120
- runbooks/sre/README.md +472 -0
- runbooks/sre/__init__.py +33 -0
- runbooks/sre/mcp_reliability_engine.py +1049 -0
- runbooks/sre/performance_optimization_engine.py +1032 -0
- runbooks/sre/production_monitoring_framework.py +584 -0
- runbooks/sre/reliability_monitoring_framework.py +1011 -0
- runbooks/validation/__init__.py +2 -2
- runbooks/validation/benchmark.py +154 -149
- runbooks/validation/cli.py +159 -147
- runbooks/validation/mcp_validator.py +291 -248
- runbooks/vpc/README.md +478 -0
- runbooks/vpc/__init__.py +2 -2
- runbooks/vpc/manager_interface.py +366 -351
- runbooks/vpc/networking_wrapper.py +68 -36
- runbooks/vpc/rich_formatters.py +22 -8
- runbooks-0.9.1.dist-info/METADATA +308 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/RECORD +120 -59
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/entry_points.txt +1 -1
- runbooks/finops/cross_validation.py +0 -375
- runbooks-0.7.9.dist-info/METADATA +0 -636
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/WHEEL +0 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/top_level.txt +0 -0
@@ -48,6 +48,9 @@ class EC2Operations(BaseOperation):
|
|
48
48
|
supported_operations = {
|
49
49
|
"start_instances",
|
50
50
|
"stop_instances",
|
51
|
+
"list_unattached_elastic_ips",
|
52
|
+
"release_elastic_ip",
|
53
|
+
"get_elastic_ip_cost_impact",
|
51
54
|
"terminate_instances",
|
52
55
|
"run_instances",
|
53
56
|
"copy_image",
|
@@ -59,6 +62,8 @@ class EC2Operations(BaseOperation):
|
|
59
62
|
"analyze_rightsizing",
|
60
63
|
"optimize_instance_types",
|
61
64
|
"generate_cost_recommendations",
|
65
|
+
"get_ebs_volumes_with_low_usage",
|
66
|
+
"delete_volumes_by_id",
|
62
67
|
}
|
63
68
|
requires_confirmation = True
|
64
69
|
|
@@ -187,6 +192,14 @@ class EC2Operations(BaseOperation):
|
|
187
192
|
return self.cleanup_unused_eips(context)
|
188
193
|
elif operation_type == "reboot_instances":
|
189
194
|
return self.reboot_instances(context, kwargs.get("instance_ids", []))
|
195
|
+
elif operation_type == "get_ebs_volumes_with_low_usage":
|
196
|
+
return self.get_ebs_volumes_with_low_usage(
|
197
|
+
context,
|
198
|
+
kwargs.get("threshold_days", 10),
|
199
|
+
kwargs.get("usage_threshold", 10.0)
|
200
|
+
)
|
201
|
+
elif operation_type == "delete_volumes_by_id":
|
202
|
+
return self.delete_volumes_by_id(context, kwargs.get("volume_data", []))
|
190
203
|
else:
|
191
204
|
raise ValueError(f"Unsupported operation: {operation_type}")
|
192
205
|
|
@@ -200,11 +213,13 @@ class EC2Operations(BaseOperation):
|
|
200
213
|
|
201
214
|
try:
|
202
215
|
if context.dry_run:
|
203
|
-
console.print(
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
216
|
+
console.print(
|
217
|
+
Panel(
|
218
|
+
f"[yellow]Would start instance {instance_id}[/yellow]",
|
219
|
+
title="🏃 DRY-RUN MODE",
|
220
|
+
border_style="yellow",
|
221
|
+
)
|
222
|
+
)
|
208
223
|
result.mark_completed(OperationStatus.DRY_RUN)
|
209
224
|
else:
|
210
225
|
response = self.execute_aws_call(ec2_client, "start_instances", InstanceIds=[instance_id])
|
@@ -682,6 +697,203 @@ class EC2Operations(BaseOperation):
|
|
682
697
|
|
683
698
|
return [result]
|
684
699
|
|
700
|
+
def get_ebs_volumes_with_low_usage(
|
701
|
+
self, context: OperationContext, threshold_days: int = 10, usage_threshold: float = 10.0
|
702
|
+
) -> List[OperationResult]:
|
703
|
+
"""
|
704
|
+
Find EBS volumes with low usage based on CloudWatch VolumeUsage metric.
|
705
|
+
|
706
|
+
Migrated from unSkript notebook: AWS_Delete_EBS_Volumes_With_Low_Usage.ipynb
|
707
|
+
Function: aws_get_ebs_volume_for_low_usage()
|
708
|
+
|
709
|
+
Args:
|
710
|
+
context: Operation execution context
|
711
|
+
threshold_days: Number of days to analyze usage
|
712
|
+
usage_threshold: Usage percentage threshold (default: 10.0)
|
713
|
+
|
714
|
+
Returns:
|
715
|
+
List of OperationResults with low usage volumes found
|
716
|
+
"""
|
717
|
+
ec2_client = self.get_client("ec2", context.region)
|
718
|
+
cloudwatch_client = self.get_client("cloudwatch", context.region)
|
719
|
+
|
720
|
+
result = self.create_operation_result(context, "get_ebs_volumes_with_low_usage", "ec2:volume", "analysis")
|
721
|
+
|
722
|
+
try:
|
723
|
+
console.print(f"[blue]🔍 Analyzing EBS volume usage over {threshold_days} days...[/blue]")
|
724
|
+
|
725
|
+
# Get all volumes - migrated logic from unSkript notebook
|
726
|
+
volumes_response = self.execute_aws_call(ec2_client, "describe_volumes")
|
727
|
+
low_usage_volumes = []
|
728
|
+
|
729
|
+
now = datetime.utcnow()
|
730
|
+
days_ago = now - timedelta(days=threshold_days)
|
731
|
+
|
732
|
+
with Progress(
|
733
|
+
SpinnerColumn(),
|
734
|
+
TextColumn("[progress.description]{task.description}"),
|
735
|
+
transient=True,
|
736
|
+
) as progress:
|
737
|
+
task = progress.add_task(f"Analyzing {len(volumes_response['Volumes'])} volumes...", total=len(volumes_response['Volumes']))
|
738
|
+
|
739
|
+
for volume in volumes_response["Volumes"]:
|
740
|
+
volume_id = volume["VolumeId"]
|
741
|
+
|
742
|
+
try:
|
743
|
+
# Get CloudWatch metrics for volume usage - exact logic from unSkript
|
744
|
+
cloudwatch_response = cloudwatch_client.get_metric_statistics(
|
745
|
+
Namespace='AWS/EBS',
|
746
|
+
MetricName='VolumeUsage',
|
747
|
+
Dimensions=[
|
748
|
+
{
|
749
|
+
'Name': 'VolumeId',
|
750
|
+
'Value': volume_id
|
751
|
+
}
|
752
|
+
],
|
753
|
+
StartTime=days_ago,
|
754
|
+
EndTime=now,
|
755
|
+
Period=3600,
|
756
|
+
Statistics=['Average']
|
757
|
+
)
|
758
|
+
|
759
|
+
# Analyze usage data - migrated from unSkript logic
|
760
|
+
for datapoint in cloudwatch_response.get('Datapoints', []):
|
761
|
+
if datapoint['Average'] < usage_threshold:
|
762
|
+
ebs_volume = {
|
763
|
+
"volume_id": volume_id,
|
764
|
+
"region": context.region,
|
765
|
+
"size": volume["Size"],
|
766
|
+
"state": volume["State"],
|
767
|
+
"volume_type": volume.get("VolumeType", "unknown"),
|
768
|
+
"encrypted": volume.get("Encrypted", False),
|
769
|
+
"create_time": str(volume["CreateTime"]),
|
770
|
+
"average_usage": datapoint['Average'],
|
771
|
+
"timestamp": str(datapoint['Timestamp'])
|
772
|
+
}
|
773
|
+
low_usage_volumes.append(ebs_volume)
|
774
|
+
logger.debug(f"Low usage volume found: {volume_id} (avg usage: {datapoint['Average']:.2f}%)")
|
775
|
+
break
|
776
|
+
|
777
|
+
except ClientError as e:
|
778
|
+
# Handle individual volume metric errors gracefully
|
779
|
+
logger.warning(f"Could not get metrics for volume {volume_id}: {e}")
|
780
|
+
continue
|
781
|
+
|
782
|
+
progress.update(task, advance=1)
|
783
|
+
|
784
|
+
result.response_data = {
|
785
|
+
"low_usage_volumes": low_usage_volumes,
|
786
|
+
"count": len(low_usage_volumes),
|
787
|
+
"total_scanned": len(volumes_response["Volumes"]),
|
788
|
+
"threshold_days": threshold_days,
|
789
|
+
"usage_threshold": usage_threshold
|
790
|
+
}
|
791
|
+
result.mark_completed(OperationStatus.SUCCESS)
|
792
|
+
|
793
|
+
if low_usage_volumes:
|
794
|
+
console.print(f"[yellow]⚠️ Found {len(low_usage_volumes)} volumes with usage < {usage_threshold}%[/yellow]")
|
795
|
+
|
796
|
+
# Create Rich table for display
|
797
|
+
table = Table(title=f"Low Usage EBS Volumes (< {usage_threshold}%)")
|
798
|
+
table.add_column("Volume ID", style="cyan")
|
799
|
+
table.add_column("Size (GB)", justify="right")
|
800
|
+
table.add_column("Type", style="green")
|
801
|
+
table.add_column("Usage %", justify="right", style="red")
|
802
|
+
table.add_column("State")
|
803
|
+
|
804
|
+
for vol in low_usage_volumes[:10]: # Show first 10
|
805
|
+
table.add_row(
|
806
|
+
vol["volume_id"],
|
807
|
+
str(vol["size"]),
|
808
|
+
vol["volume_type"],
|
809
|
+
f"{vol['average_usage']:.2f}%",
|
810
|
+
vol["state"]
|
811
|
+
)
|
812
|
+
|
813
|
+
console.print(table)
|
814
|
+
|
815
|
+
if len(low_usage_volumes) > 10:
|
816
|
+
console.print(f"[dim]... and {len(low_usage_volumes) - 10} more volumes[/dim]")
|
817
|
+
|
818
|
+
# SNS notification
|
819
|
+
message = f"Found {len(low_usage_volumes)} EBS volumes with usage < {usage_threshold}% in {context.region}"
|
820
|
+
self.send_sns_notification("Low Usage EBS Volumes Detected", message)
|
821
|
+
else:
|
822
|
+
console.print(f"[green]✅ No volumes found with usage < {usage_threshold}%[/green]")
|
823
|
+
|
824
|
+
except Exception as e:
|
825
|
+
error_msg = f"Failed to analyze EBS volume usage: {e}"
|
826
|
+
logger.error(error_msg)
|
827
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
828
|
+
|
829
|
+
return [result]
|
830
|
+
|
831
|
+
def delete_volumes_by_id(self, context: OperationContext, volume_data: List[Dict[str, str]]) -> List[OperationResult]:
|
832
|
+
"""
|
833
|
+
Delete EBS volumes by ID with safety checks and confirmation.
|
834
|
+
|
835
|
+
Migrated from unSkript notebook: AWS_Delete_EBS_Volumes_With_Low_Usage.ipynb
|
836
|
+
Function: aws_delete_volume_by_id()
|
837
|
+
|
838
|
+
Args:
|
839
|
+
context: Operation execution context
|
840
|
+
volume_data: List of dicts with 'volume_id' and 'region' keys
|
841
|
+
|
842
|
+
Returns:
|
843
|
+
List of OperationResults for each volume deletion attempt
|
844
|
+
"""
|
845
|
+
results = []
|
846
|
+
|
847
|
+
for vol_data in volume_data:
|
848
|
+
volume_id = vol_data.get("volume_id")
|
849
|
+
region = vol_data.get("region", context.region)
|
850
|
+
|
851
|
+
ec2_client = self.get_client("ec2", region)
|
852
|
+
result = self.create_operation_result(context, "delete_volumes_by_id", "ec2:volume", volume_id)
|
853
|
+
|
854
|
+
try:
|
855
|
+
# Safety confirmation - enhanced from original
|
856
|
+
if not self.confirm_operation(context, volume_id, "delete EBS volume"):
|
857
|
+
result.mark_completed(OperationStatus.CANCELLED, "Operation cancelled by user")
|
858
|
+
results.append(result)
|
859
|
+
continue
|
860
|
+
|
861
|
+
if context.dry_run:
|
862
|
+
console.print(f"[yellow]🏃 DRY-RUN: Would delete volume {volume_id} in {region}[/yellow]")
|
863
|
+
result.mark_completed(OperationStatus.DRY_RUN)
|
864
|
+
else:
|
865
|
+
# Execute deletion - exact logic from unSkript
|
866
|
+
delete_response = self.execute_aws_call(ec2_client, "delete_volume", VolumeId=volume_id)
|
867
|
+
|
868
|
+
result.response_data = delete_response
|
869
|
+
result.mark_completed(OperationStatus.SUCCESS)
|
870
|
+
console.print(f"[green]✅ Successfully deleted volume {volume_id}[/green]")
|
871
|
+
logger.info(f"Deleted EBS volume: {volume_id} in {region}")
|
872
|
+
|
873
|
+
except ClientError as e:
|
874
|
+
error_msg = f"Failed to delete volume {volume_id}: {e}"
|
875
|
+
console.print(f"[red]❌ {error_msg}[/red]")
|
876
|
+
logger.error(error_msg)
|
877
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
878
|
+
except Exception as e:
|
879
|
+
error_msg = f"Unexpected error deleting volume {volume_id}: {e}"
|
880
|
+
console.print(f"[red]❌ {error_msg}[/red]")
|
881
|
+
logger.error(error_msg)
|
882
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
883
|
+
|
884
|
+
results.append(result)
|
885
|
+
|
886
|
+
# Summary reporting
|
887
|
+
successful_deletions = [r.resource_id for r in results if r.success]
|
888
|
+
if successful_deletions:
|
889
|
+
message = f"Successfully deleted {len(successful_deletions)} EBS volumes: {', '.join(successful_deletions[:5])}"
|
890
|
+
if len(successful_deletions) > 5:
|
891
|
+
message += f" and {len(successful_deletions) - 5} more"
|
892
|
+
self.send_sns_notification("EBS Volumes Deleted", message)
|
893
|
+
console.print(f"[green]🎯 Deletion Summary: {len(successful_deletions)}/{len(results)} volumes deleted successfully[/green]")
|
894
|
+
|
895
|
+
return results
|
896
|
+
|
685
897
|
def cleanup_unused_eips(self, context: OperationContext) -> List[OperationResult]:
|
686
898
|
"""
|
687
899
|
Identify unused Elastic IPs with detailed reporting and SNS notifications.
|
@@ -1176,6 +1388,224 @@ def lambda_handler_run_instances(event, context):
|
|
1176
1388
|
|
1177
1389
|
|
1178
1390
|
# CLI Support
|
1391
|
+
def list_unattached_elastic_ips(self, context: OperationContext) -> List[OperationResult]:
|
1392
|
+
"""
|
1393
|
+
Find all unattached Elastic IPs across regions.
|
1394
|
+
|
1395
|
+
Extracted from: AWS_Release_Unattached_Elastic_IPs.ipynb
|
1396
|
+
|
1397
|
+
Args:
|
1398
|
+
context: Operation execution context
|
1399
|
+
|
1400
|
+
Returns:
|
1401
|
+
List of OperationResults with unattached Elastic IPs
|
1402
|
+
"""
|
1403
|
+
console.print("[bold cyan]Scanning for unattached Elastic IPs...[/bold cyan]")
|
1404
|
+
results = []
|
1405
|
+
|
1406
|
+
# Get all regions to check
|
1407
|
+
regions_to_check = [context.region] if context.region else self._get_all_regions()
|
1408
|
+
|
1409
|
+
for region in regions_to_check:
|
1410
|
+
result = OperationResult(
|
1411
|
+
operation_id=f"list_unattached_eips_{region}",
|
1412
|
+
operation_name="list_unattached_elastic_ips",
|
1413
|
+
resource_id=f"region:{region}",
|
1414
|
+
resource_type="elastic_ip",
|
1415
|
+
)
|
1416
|
+
|
1417
|
+
try:
|
1418
|
+
# Create EC2 client for specific region
|
1419
|
+
ec2_client = boto3.client('ec2', region_name=region)
|
1420
|
+
|
1421
|
+
# Get all Elastic IPs in region
|
1422
|
+
response = ec2_client.describe_addresses()
|
1423
|
+
unattached_eips = []
|
1424
|
+
|
1425
|
+
for eip in response.get('Addresses', []):
|
1426
|
+
# Check if EIP is not attached (no AssociationId)
|
1427
|
+
if 'AssociationId' not in eip:
|
1428
|
+
eip_info = {
|
1429
|
+
'public_ip': eip.get('PublicIp'),
|
1430
|
+
'allocation_id': eip.get('AllocationId'),
|
1431
|
+
'region': region,
|
1432
|
+
'domain': eip.get('Domain', 'vpc'),
|
1433
|
+
'network_interface_id': eip.get('NetworkInterfaceId'),
|
1434
|
+
'private_ip': eip.get('PrivateIpAddress'),
|
1435
|
+
'tags': eip.get('Tags', [])
|
1436
|
+
}
|
1437
|
+
unattached_eips.append(eip_info)
|
1438
|
+
|
1439
|
+
if unattached_eips:
|
1440
|
+
result.add_output("unattached_eips", unattached_eips)
|
1441
|
+
result.add_output("count", len(unattached_eips))
|
1442
|
+
result.add_output("monthly_cost", len(unattached_eips) * 3.60) # $3.60/month per EIP
|
1443
|
+
result.mark_completed(
|
1444
|
+
OperationStatus.SUCCESS,
|
1445
|
+
f"Found {len(unattached_eips)} unattached Elastic IPs in {region}"
|
1446
|
+
)
|
1447
|
+
console.print(f"[yellow]Found {len(unattached_eips)} unattached EIPs in {region}[/yellow]")
|
1448
|
+
else:
|
1449
|
+
result.mark_completed(
|
1450
|
+
OperationStatus.SUCCESS,
|
1451
|
+
f"No unattached Elastic IPs found in {region}"
|
1452
|
+
)
|
1453
|
+
|
1454
|
+
except ClientError as e:
|
1455
|
+
error_msg = f"Failed to list Elastic IPs in {region}: {e}"
|
1456
|
+
logger.error(error_msg)
|
1457
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
1458
|
+
console.print(f"[red]Error scanning {region}: {e}[/red]")
|
1459
|
+
|
1460
|
+
results.append(result)
|
1461
|
+
|
1462
|
+
return results
|
1463
|
+
|
1464
|
+
def release_elastic_ip(self, context: OperationContext, allocation_id: str, region: str) -> OperationResult:
|
1465
|
+
"""
|
1466
|
+
Release (delete) an unattached Elastic IP.
|
1467
|
+
|
1468
|
+
Extracted from: AWS_Release_Unattached_Elastic_IPs.ipynb
|
1469
|
+
|
1470
|
+
Args:
|
1471
|
+
context: Operation execution context
|
1472
|
+
allocation_id: Allocation ID of the Elastic IP
|
1473
|
+
region: AWS region where the EIP exists
|
1474
|
+
|
1475
|
+
Returns:
|
1476
|
+
OperationResult with release status
|
1477
|
+
"""
|
1478
|
+
result = OperationResult(
|
1479
|
+
operation_id=f"release_eip_{allocation_id}",
|
1480
|
+
operation_name="release_elastic_ip",
|
1481
|
+
resource_id=allocation_id,
|
1482
|
+
resource_type="elastic_ip",
|
1483
|
+
)
|
1484
|
+
|
1485
|
+
try:
|
1486
|
+
ec2_client = boto3.client('ec2', region_name=region)
|
1487
|
+
|
1488
|
+
if context.dry_run:
|
1489
|
+
result.add_output("action", "DRY_RUN")
|
1490
|
+
result.add_output("would_release", allocation_id)
|
1491
|
+
result.add_output("monthly_savings", 3.60)
|
1492
|
+
result.mark_completed(
|
1493
|
+
OperationStatus.SUCCESS,
|
1494
|
+
f"DRY RUN: Would release Elastic IP {allocation_id}"
|
1495
|
+
)
|
1496
|
+
console.print(f"[yellow]DRY RUN: Would release EIP {allocation_id}[/yellow]")
|
1497
|
+
else:
|
1498
|
+
# Actually release the Elastic IP
|
1499
|
+
response = ec2_client.release_address(AllocationId=allocation_id)
|
1500
|
+
result.add_output("response", response)
|
1501
|
+
result.add_output("released", True)
|
1502
|
+
result.add_output("monthly_savings", 3.60)
|
1503
|
+
result.mark_completed(
|
1504
|
+
OperationStatus.SUCCESS,
|
1505
|
+
f"Successfully released Elastic IP {allocation_id}"
|
1506
|
+
)
|
1507
|
+
console.print(f"[green]✅ Released Elastic IP {allocation_id}[/green]")
|
1508
|
+
|
1509
|
+
except ClientError as e:
|
1510
|
+
error_msg = f"Failed to release Elastic IP {allocation_id}: {e}"
|
1511
|
+
logger.error(error_msg)
|
1512
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
1513
|
+
console.print(f"[red]❌ Failed to release {allocation_id}: {e}[/red]")
|
1514
|
+
|
1515
|
+
return result
|
1516
|
+
|
1517
|
+
def get_elastic_ip_cost_impact(self, context: OperationContext) -> OperationResult:
|
1518
|
+
"""
|
1519
|
+
Calculate cost impact of unattached Elastic IPs.
|
1520
|
+
|
1521
|
+
Args:
|
1522
|
+
context: Operation execution context
|
1523
|
+
|
1524
|
+
Returns:
|
1525
|
+
OperationResult with cost analysis
|
1526
|
+
"""
|
1527
|
+
result = OperationResult(
|
1528
|
+
operation_id=f"eip_cost_analysis_{context.account_id}",
|
1529
|
+
operation_name="get_elastic_ip_cost_impact",
|
1530
|
+
resource_id=f"account:{context.account_id}",
|
1531
|
+
resource_type="cost_analysis",
|
1532
|
+
)
|
1533
|
+
|
1534
|
+
try:
|
1535
|
+
# Get all unattached EIPs
|
1536
|
+
eip_results = self.list_unattached_elastic_ips(context)
|
1537
|
+
|
1538
|
+
total_unattached = 0
|
1539
|
+
total_monthly_cost = 0.0
|
1540
|
+
regions_with_waste = []
|
1541
|
+
|
1542
|
+
for eip_result in eip_results:
|
1543
|
+
if eip_result.status == OperationStatus.SUCCESS and eip_result.outputs:
|
1544
|
+
count = eip_result.outputs.get('count', 0)
|
1545
|
+
if count > 0:
|
1546
|
+
total_unattached += count
|
1547
|
+
monthly_cost = eip_result.outputs.get('monthly_cost', 0)
|
1548
|
+
total_monthly_cost += monthly_cost
|
1549
|
+
regions_with_waste.append({
|
1550
|
+
'region': eip_result.resource_id.split(':')[1],
|
1551
|
+
'count': count,
|
1552
|
+
'monthly_cost': monthly_cost
|
1553
|
+
})
|
1554
|
+
|
1555
|
+
# Create cost analysis summary
|
1556
|
+
cost_summary = {
|
1557
|
+
'total_unattached_eips': total_unattached,
|
1558
|
+
'total_monthly_cost': total_monthly_cost,
|
1559
|
+
'total_annual_cost': total_monthly_cost * 12,
|
1560
|
+
'regions_affected': len(regions_with_waste),
|
1561
|
+
'regions_detail': regions_with_waste,
|
1562
|
+
'cost_per_eip_monthly': 3.60,
|
1563
|
+
'recommendation': 'Release unattached Elastic IPs to save costs'
|
1564
|
+
}
|
1565
|
+
|
1566
|
+
result.add_output("cost_analysis", cost_summary)
|
1567
|
+
result.mark_completed(
|
1568
|
+
OperationStatus.SUCCESS,
|
1569
|
+
f"Cost analysis complete: ${total_monthly_cost:.2f}/month waste from {total_unattached} unattached EIPs"
|
1570
|
+
)
|
1571
|
+
|
1572
|
+
# Display cost impact table
|
1573
|
+
if total_unattached > 0:
|
1574
|
+
table = Table(title="Elastic IP Cost Impact Analysis")
|
1575
|
+
table.add_column("Metric", style="cyan")
|
1576
|
+
table.add_column("Value", style="yellow")
|
1577
|
+
|
1578
|
+
table.add_row("Unattached EIPs", str(total_unattached))
|
1579
|
+
table.add_row("Monthly Cost", f"${total_monthly_cost:.2f}")
|
1580
|
+
table.add_row("Annual Cost", f"${total_monthly_cost * 12:.2f}")
|
1581
|
+
table.add_row("Regions Affected", str(len(regions_with_waste)))
|
1582
|
+
|
1583
|
+
console.print(table)
|
1584
|
+
console.print(f"[bold red]💰 Potential savings: ${total_monthly_cost:.2f}/month[/bold red]")
|
1585
|
+
else:
|
1586
|
+
console.print("[green]✅ No unattached Elastic IPs found - no waste![/green]")
|
1587
|
+
|
1588
|
+
except Exception as e:
|
1589
|
+
error_msg = f"Failed to analyze Elastic IP costs: {e}"
|
1590
|
+
logger.error(error_msg)
|
1591
|
+
result.mark_completed(OperationStatus.FAILED, error_msg)
|
1592
|
+
|
1593
|
+
return result
|
1594
|
+
|
1595
|
+
def _get_all_regions(self) -> List[str]:
|
1596
|
+
"""Get all available AWS regions for EC2."""
|
1597
|
+
try:
|
1598
|
+
ec2_client = boto3.client('ec2', region_name='us-east-1')
|
1599
|
+
response = ec2_client.describe_regions()
|
1600
|
+
return [region['RegionName'] for region in response['Regions']]
|
1601
|
+
except Exception:
|
1602
|
+
# Fallback to common regions if API call fails
|
1603
|
+
return [
|
1604
|
+
'us-east-1', 'us-west-2', 'eu-west-1', 'ap-southeast-1',
|
1605
|
+
'us-west-1', 'eu-central-1', 'ap-southeast-2'
|
1606
|
+
]
|
1607
|
+
|
1608
|
+
|
1179
1609
|
def main():
|
1180
1610
|
"""Main entry point for standalone execution."""
|
1181
1611
|
import sys
|