runbooks 0.7.0__py3-none-any.whl → 0.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +87 -37
- runbooks/cfat/README.md +300 -49
- runbooks/cfat/__init__.py +2 -2
- runbooks/finops/__init__.py +1 -1
- runbooks/finops/cli.py +1 -1
- runbooks/inventory/collectors/__init__.py +8 -0
- runbooks/inventory/collectors/aws_management.py +791 -0
- runbooks/inventory/collectors/aws_networking.py +3 -3
- runbooks/main.py +3389 -782
- runbooks/operate/__init__.py +207 -0
- runbooks/operate/base.py +311 -0
- runbooks/operate/cloudformation_operations.py +619 -0
- runbooks/operate/cloudwatch_operations.py +496 -0
- runbooks/operate/dynamodb_operations.py +812 -0
- runbooks/operate/ec2_operations.py +926 -0
- runbooks/operate/iam_operations.py +569 -0
- runbooks/operate/s3_operations.py +1211 -0
- runbooks/operate/tagging_operations.py +655 -0
- runbooks/remediation/CLAUDE.md +100 -0
- runbooks/remediation/DOME9.md +218 -0
- runbooks/remediation/README.md +26 -0
- runbooks/remediation/Tests/__init__.py +0 -0
- runbooks/remediation/Tests/update_policy.py +74 -0
- runbooks/remediation/__init__.py +95 -0
- runbooks/remediation/acm_cert_expired_unused.py +98 -0
- runbooks/remediation/acm_remediation.py +875 -0
- runbooks/remediation/api_gateway_list.py +167 -0
- runbooks/remediation/base.py +643 -0
- runbooks/remediation/cloudtrail_remediation.py +908 -0
- runbooks/remediation/cloudtrail_s3_modifications.py +296 -0
- runbooks/remediation/cognito_active_users.py +78 -0
- runbooks/remediation/cognito_remediation.py +856 -0
- runbooks/remediation/cognito_user_password_reset.py +163 -0
- runbooks/remediation/commons.py +455 -0
- runbooks/remediation/dynamodb_optimize.py +155 -0
- runbooks/remediation/dynamodb_remediation.py +744 -0
- runbooks/remediation/dynamodb_server_side_encryption.py +108 -0
- runbooks/remediation/ec2_public_ips.py +134 -0
- runbooks/remediation/ec2_remediation.py +892 -0
- runbooks/remediation/ec2_subnet_disable_auto_ip_assignment.py +72 -0
- runbooks/remediation/ec2_unattached_ebs_volumes.py +448 -0
- runbooks/remediation/ec2_unused_security_groups.py +202 -0
- runbooks/remediation/kms_enable_key_rotation.py +651 -0
- runbooks/remediation/kms_remediation.py +717 -0
- runbooks/remediation/lambda_list.py +243 -0
- runbooks/remediation/lambda_remediation.py +971 -0
- runbooks/remediation/multi_account.py +569 -0
- runbooks/remediation/rds_instance_list.py +199 -0
- runbooks/remediation/rds_remediation.py +873 -0
- runbooks/remediation/rds_snapshot_list.py +192 -0
- runbooks/remediation/requirements.txt +118 -0
- runbooks/remediation/s3_block_public_access.py +159 -0
- runbooks/remediation/s3_bucket_public_access.py +143 -0
- runbooks/remediation/s3_disable_static_website_hosting.py +74 -0
- runbooks/remediation/s3_downloader.py +215 -0
- runbooks/remediation/s3_enable_access_logging.py +562 -0
- runbooks/remediation/s3_encryption.py +526 -0
- runbooks/remediation/s3_force_ssl_secure_policy.py +143 -0
- runbooks/remediation/s3_list.py +141 -0
- runbooks/remediation/s3_object_search.py +201 -0
- runbooks/remediation/s3_remediation.py +816 -0
- runbooks/remediation/scan_for_phrase.py +425 -0
- runbooks/remediation/workspaces_list.py +220 -0
- runbooks/security/__init__.py +9 -10
- runbooks/security/security_baseline_tester.py +4 -2
- runbooks-0.7.6.dist-info/METADATA +608 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/RECORD +84 -76
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/entry_points.txt +0 -1
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/top_level.txt +0 -1
- jupyter-agent/.env +0 -2
- jupyter-agent/.env.template +0 -2
- jupyter-agent/.gitattributes +0 -35
- jupyter-agent/.gradio/certificate.pem +0 -31
- jupyter-agent/README.md +0 -16
- jupyter-agent/__main__.log +0 -8
- jupyter-agent/app.py +0 -256
- jupyter-agent/cloudops-agent.png +0 -0
- jupyter-agent/ds-system-prompt.txt +0 -154
- jupyter-agent/jupyter-agent.png +0 -0
- jupyter-agent/llama3_template.jinja +0 -123
- jupyter-agent/requirements.txt +0 -9
- jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +0 -68
- jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +0 -91
- jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +0 -91
- jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +0 -57
- jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +0 -53
- jupyter-agent/tmp/jupyter-agent.ipynb +0 -27
- jupyter-agent/utils.py +0 -409
- runbooks/aws/__init__.py +0 -58
- runbooks/aws/dynamodb_operations.py +0 -231
- runbooks/aws/ec2_copy_image_cross-region.py +0 -195
- runbooks/aws/ec2_describe_instances.py +0 -202
- runbooks/aws/ec2_ebs_snapshots_delete.py +0 -186
- runbooks/aws/ec2_run_instances.py +0 -213
- runbooks/aws/ec2_start_stop_instances.py +0 -212
- runbooks/aws/ec2_terminate_instances.py +0 -143
- runbooks/aws/ec2_unused_eips.py +0 -196
- runbooks/aws/ec2_unused_volumes.py +0 -188
- runbooks/aws/s3_create_bucket.py +0 -142
- runbooks/aws/s3_list_buckets.py +0 -152
- runbooks/aws/s3_list_objects.py +0 -156
- runbooks/aws/s3_object_operations.py +0 -183
- runbooks/aws/tagging_lambda_handler.py +0 -183
- runbooks/inventory/FAILED_SCRIPTS_TROUBLESHOOTING.md +0 -619
- runbooks/inventory/PASSED_SCRIPTS_GUIDE.md +0 -738
- runbooks/inventory/aws_organization.png +0 -0
- runbooks/inventory/cfn_move_stack_instances.py +0 -1526
- runbooks/inventory/delete_s3_buckets_objects.py +0 -169
- runbooks/inventory/lockdown_cfn_stackset_role.py +0 -224
- runbooks/inventory/update_aws_actions.py +0 -173
- runbooks/inventory/update_cfn_stacksets.py +0 -1215
- runbooks/inventory/update_cloudwatch_logs_retention_policy.py +0 -294
- runbooks/inventory/update_iam_roles_cross_accounts.py +0 -478
- runbooks/inventory/update_s3_public_access_block.py +0 -539
- runbooks/organizations/__init__.py +0 -12
- runbooks/organizations/manager.py +0 -374
- runbooks-0.7.0.dist-info/METADATA +0 -375
- /runbooks/inventory/{tests → Tests}/common_test_data.py +0 -0
- /runbooks/inventory/{tests → Tests}/common_test_functions.py +0 -0
- /runbooks/inventory/{tests → Tests}/script_test_data.py +0 -0
- /runbooks/inventory/{tests → Tests}/setup.py +0 -0
- /runbooks/inventory/{tests → Tests}/src.py +0 -0
- /runbooks/inventory/{tests/test_inventory_modules.py → Tests/test_Inventory_Modules.py} +0 -0
- /runbooks/inventory/{tests → Tests}/test_cfn_describe_stacks.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_ec2_describe_instances.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_lambda_list_functions.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_moto_integration_example.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_org_list_accounts.py +0 -0
- /runbooks/inventory/{Inventory_Modules.py → inventory_modules.py} +0 -0
- /runbooks/{aws → operate}/tags.json +0 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/WHEEL +0 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,72 @@
|
|
1
|
+
"""
|
2
|
+
EC2 Subnet Security - Disable automatic public IP assignment for enhanced security.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
|
7
|
+
import click
|
8
|
+
from botocore.exceptions import ClientError
|
9
|
+
|
10
|
+
from .commons import display_aws_account_info, get_client
|
11
|
+
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
@click.command()
|
16
|
+
@click.option("--dry-run", is_flag=True, default=True, help="Preview mode - show actions without making changes")
|
17
|
+
def disable_auto_public_ips(dry_run: bool = True):
|
18
|
+
"""Disable automatic public IP assignment on VPC subnets."""
|
19
|
+
logger.info(f"Checking subnet auto-assign public IP in {display_aws_account_info()}")
|
20
|
+
|
21
|
+
try:
|
22
|
+
ec2 = get_client("ec2")
|
23
|
+
|
24
|
+
# Find subnets with auto-assign public IP enabled
|
25
|
+
response = ec2.describe_subnets(Filters=[{"Name": "mapPublicIpOnLaunch", "Values": ["true"]}])
|
26
|
+
|
27
|
+
subnets_with_auto_ip = response.get("Subnets", [])
|
28
|
+
|
29
|
+
if not subnets_with_auto_ip:
|
30
|
+
logger.info("✓ No subnets found with automatic public IP assignment enabled")
|
31
|
+
return
|
32
|
+
|
33
|
+
logger.info(f"Found {len(subnets_with_auto_ip)} subnets with auto-assign public IP enabled")
|
34
|
+
|
35
|
+
# Track results
|
36
|
+
subnets_modified = []
|
37
|
+
|
38
|
+
# Process each subnet
|
39
|
+
for subnet in subnets_with_auto_ip:
|
40
|
+
subnet_id = subnet["SubnetId"]
|
41
|
+
vpc_id = subnet.get("VpcId", "Unknown")
|
42
|
+
az = subnet.get("AvailabilityZone", "Unknown")
|
43
|
+
|
44
|
+
logger.info(f"Subnet: {subnet_id} (VPC: {vpc_id}, AZ: {az})")
|
45
|
+
logger.info(f" ✗ Auto-assign public IP is enabled")
|
46
|
+
|
47
|
+
# Disable auto-assign if not in dry-run mode
|
48
|
+
if not dry_run:
|
49
|
+
try:
|
50
|
+
logger.info(f" → Disabling auto-assign public IP...")
|
51
|
+
ec2.modify_subnet_attribute(SubnetId=subnet_id, MapPublicIpOnLaunch={"Value": False})
|
52
|
+
subnets_modified.append(subnet_id)
|
53
|
+
logger.info(f" ✓ Successfully disabled auto-assign public IP")
|
54
|
+
|
55
|
+
except ClientError as e:
|
56
|
+
logger.error(f" ✗ Failed to modify subnet {subnet_id}: {e}")
|
57
|
+
|
58
|
+
# Summary
|
59
|
+
logger.info("\n=== SUMMARY ===")
|
60
|
+
logger.info(f"Subnets with auto-assign public IP: {len(subnets_with_auto_ip)}")
|
61
|
+
|
62
|
+
if dry_run and subnets_with_auto_ip:
|
63
|
+
logger.info(f"To disable auto-assign on {len(subnets_with_auto_ip)} subnets, run with --no-dry-run")
|
64
|
+
elif not dry_run:
|
65
|
+
logger.info(f"Successfully modified {len(subnets_modified)} subnets")
|
66
|
+
|
67
|
+
except ClientError as e:
|
68
|
+
logger.error(f"Failed to process subnet auto-assign settings: {e}")
|
69
|
+
raise
|
70
|
+
except Exception as e:
|
71
|
+
logger.error(f"Unexpected error: {e}")
|
72
|
+
raise
|
@@ -0,0 +1,448 @@
|
|
1
|
+
"""
|
2
|
+
Enterprise EBS Volume Cleanup - Unattached Volume Detection and Management
|
3
|
+
|
4
|
+
## Overview
|
5
|
+
|
6
|
+
This module provides capabilities for detecting and optionally cleaning up unattached
|
7
|
+
EBS volumes in AWS accounts. Unattached volumes incur unnecessary costs and represent
|
8
|
+
potential security risks if they contain sensitive data.
|
9
|
+
|
10
|
+
## Key Features
|
11
|
+
|
12
|
+
- **Safe Detection**: Identifies unattached EBS volumes with comprehensive metadata
|
13
|
+
- **CloudTrail Integration**: Tracks last attachment times for informed decisions
|
14
|
+
- **Dry-Run Support**: Safe preview mode before any destructive operations
|
15
|
+
- **Cost Optimization**: Helps reduce unnecessary EBS storage costs
|
16
|
+
- **Audit Trail**: Comprehensive logging of all detection and cleanup operations
|
17
|
+
|
18
|
+
## Usage Examples
|
19
|
+
|
20
|
+
```python
|
21
|
+
# Detection only (safe)
|
22
|
+
python ec2_unattached_ebs_volumes.py --dry-run
|
23
|
+
|
24
|
+
# Cleanup unattached volumes (destructive)
|
25
|
+
python ec2_unattached_ebs_volumes.py
|
26
|
+
```
|
27
|
+
|
28
|
+
## Important Safety Notes
|
29
|
+
|
30
|
+
⚠️ **WARNING**: This script can DELETE EBS volumes permanently
|
31
|
+
⚠️ **DATA LOSS**: Deleted volumes cannot be recovered
|
32
|
+
⚠️ **COST IMPACT**: Verify volumes are truly unused before deletion
|
33
|
+
|
34
|
+
Version: 0.7.6 - Enterprise Production Ready
|
35
|
+
Compliance: CIS AWS Foundations, Cost Optimization Best Practices
|
36
|
+
"""
|
37
|
+
|
38
|
+
import datetime
|
39
|
+
import logging
|
40
|
+
from typing import Any, Dict, List, Optional, Union
|
41
|
+
|
42
|
+
import click
|
43
|
+
from botocore.exceptions import BotoCoreError, ClientError
|
44
|
+
|
45
|
+
from .commons import get_client, write_to_csv
|
46
|
+
|
47
|
+
# Configure enterprise logging
|
48
|
+
logger = logging.getLogger(__name__)
|
49
|
+
logger.setLevel(logging.INFO)
|
50
|
+
|
51
|
+
|
52
|
+
def get_last_volume_attachment_time(volume_id: str) -> Optional[datetime.datetime]:
|
53
|
+
"""
|
54
|
+
Retrieve the last attachment time of an EBS volume from CloudTrail audit logs.
|
55
|
+
|
56
|
+
This function queries CloudTrail for AttachVolume events to determine when the
|
57
|
+
specified EBS volume was last attached to an EC2 instance. This information
|
58
|
+
is crucial for making informed decisions about volume cleanup.
|
59
|
+
|
60
|
+
## Implementation Details
|
61
|
+
|
62
|
+
- Searches CloudTrail events for the past 365 days
|
63
|
+
- Filters for 'AttachVolume' events associated with the specified volume
|
64
|
+
- Returns the most recent attachment timestamp if found
|
65
|
+
- Handles CloudTrail API pagination and rate limiting
|
66
|
+
|
67
|
+
## Security Considerations
|
68
|
+
|
69
|
+
- Requires CloudTrail to be enabled in the AWS account
|
70
|
+
- Requires appropriate IAM permissions for CloudTrail access
|
71
|
+
- May not find events if CloudTrail logging was disabled
|
72
|
+
|
73
|
+
Args:
|
74
|
+
volume_id (str): The EBS volume identifier (e.g., 'vol-1234567890abcdef0')
|
75
|
+
Must be a valid AWS EBS volume ID format
|
76
|
+
|
77
|
+
Returns:
|
78
|
+
Optional[datetime.datetime]: The UTC timestamp of the last attachment event,
|
79
|
+
or None if no attachment history is found
|
80
|
+
|
81
|
+
Raises:
|
82
|
+
ClientError: If CloudTrail API access fails due to permissions or service issues
|
83
|
+
ValueError: If volume_id format is invalid
|
84
|
+
|
85
|
+
Example:
|
86
|
+
>>> last_attached = get_last_volume_attachment_time('vol-1234567890abcdef0')
|
87
|
+
>>> if last_attached:
|
88
|
+
... print(f"Volume was last attached on {last_attached}")
|
89
|
+
... else:
|
90
|
+
... print("No attachment history found")
|
91
|
+
"""
|
92
|
+
|
93
|
+
# Input validation
|
94
|
+
if not volume_id or not isinstance(volume_id, str):
|
95
|
+
raise ValueError(f"Invalid volume_id: {volume_id}. Must be a non-empty string.")
|
96
|
+
|
97
|
+
if not volume_id.startswith("vol-"):
|
98
|
+
raise ValueError(f"Invalid volume_id format: {volume_id}. Must start with 'vol-'.")
|
99
|
+
|
100
|
+
logger.debug(f"Querying CloudTrail for attachment history of volume: {volume_id}")
|
101
|
+
|
102
|
+
try:
|
103
|
+
# Initialize CloudTrail client with error handling
|
104
|
+
cloudtrail = get_client("cloudtrail")
|
105
|
+
|
106
|
+
# Define search parameters with comprehensive time range
|
107
|
+
search_start_time = datetime.datetime.utcnow() - datetime.timedelta(days=365)
|
108
|
+
|
109
|
+
logger.debug(f"Searching CloudTrail from {search_start_time} to present for volume {volume_id}")
|
110
|
+
|
111
|
+
# Query CloudTrail for volume attachment events
|
112
|
+
response = cloudtrail.lookup_events(
|
113
|
+
LookupAttributes=[{"AttributeKey": "ResourceName", "AttributeValue": volume_id}],
|
114
|
+
MaxResults=1, # Only need the most recent attachment event
|
115
|
+
StartTime=search_start_time,
|
116
|
+
)
|
117
|
+
|
118
|
+
# Process CloudTrail response
|
119
|
+
events = response.get("Events", [])
|
120
|
+
|
121
|
+
if events:
|
122
|
+
event = events[0]
|
123
|
+
event_name = event.get("EventName", "")
|
124
|
+
|
125
|
+
logger.debug(f"Found CloudTrail event for volume {volume_id}: {event_name}")
|
126
|
+
|
127
|
+
# Verify this is an attachment event
|
128
|
+
if event_name == "AttachVolume":
|
129
|
+
timestamp = event.get("EventTime")
|
130
|
+
if timestamp:
|
131
|
+
logger.info(f"Volume {volume_id} was last attached on {timestamp}")
|
132
|
+
return timestamp
|
133
|
+
else:
|
134
|
+
logger.warning(f"AttachVolume event found for {volume_id} but no timestamp available")
|
135
|
+
else:
|
136
|
+
logger.debug(f"Most recent event for volume {volume_id} was {event_name}, not AttachVolume")
|
137
|
+
else:
|
138
|
+
logger.info(f"No CloudTrail events found for volume {volume_id} in the past 365 days")
|
139
|
+
|
140
|
+
return None
|
141
|
+
|
142
|
+
except ClientError as e:
|
143
|
+
error_code = e.response.get("Error", {}).get("Code", "Unknown")
|
144
|
+
error_message = e.response.get("Error", {}).get("Message", str(e))
|
145
|
+
|
146
|
+
logger.error(f"CloudTrail API error while querying volume {volume_id}: {error_code} - {error_message}")
|
147
|
+
|
148
|
+
# Handle specific CloudTrail errors gracefully
|
149
|
+
if error_code in ["AccessDenied", "UnauthorizedOperation"]:
|
150
|
+
logger.warning(f"Insufficient permissions to access CloudTrail for volume {volume_id}")
|
151
|
+
return None
|
152
|
+
elif error_code == "TrailNotFoundException":
|
153
|
+
logger.warning(f"CloudTrail not configured - cannot determine attachment history for volume {volume_id}")
|
154
|
+
return None
|
155
|
+
else:
|
156
|
+
# Re-raise unexpected errors
|
157
|
+
raise
|
158
|
+
|
159
|
+
except BotoCoreError as e:
|
160
|
+
logger.error(f"AWS service error while querying CloudTrail for volume {volume_id}: {e}")
|
161
|
+
return None
|
162
|
+
|
163
|
+
except Exception as e:
|
164
|
+
logger.error(f"Unexpected error while querying attachment history for volume {volume_id}: {e}")
|
165
|
+
raise
|
166
|
+
|
167
|
+
|
168
|
+
@click.command()
|
169
|
+
@click.option(
|
170
|
+
"--dry-run",
|
171
|
+
is_flag=True,
|
172
|
+
default=True,
|
173
|
+
help="Preview mode - show volumes that would be deleted without actually deleting them",
|
174
|
+
)
|
175
|
+
@click.option(
|
176
|
+
"--max-age-days", type=int, default=30, help="Only consider volumes unattached for this many days or more"
|
177
|
+
)
|
178
|
+
@click.option("--output-file", type=str, help="Save results to CSV file")
|
179
|
+
@click.option("--region", type=str, help="AWS region to scan (defaults to current region)")
|
180
|
+
def detect_and_delete_volumes(dry_run: bool, max_age_days: int, output_file: Optional[str], region: Optional[str]):
|
181
|
+
"""
|
182
|
+
Enterprise EBS Volume Cleanup - Detect and optionally remove unattached volumes.
|
183
|
+
|
184
|
+
This command provides comprehensive detection and optional cleanup of unattached
|
185
|
+
EBS volumes in your AWS account. Unattached volumes incur unnecessary costs and
|
186
|
+
may represent security risks if they contain sensitive data.
|
187
|
+
|
188
|
+
## Operation Modes
|
189
|
+
|
190
|
+
**Dry-Run Mode (Default - SAFE):**
|
191
|
+
- Detects and reports unattached volumes
|
192
|
+
- No destructive operations performed
|
193
|
+
- Generates detailed analysis reports
|
194
|
+
- Safe for production environments
|
195
|
+
|
196
|
+
**Cleanup Mode (DESTRUCTIVE):**
|
197
|
+
- Actually deletes identified unattached volumes
|
198
|
+
- Requires explicit --no-dry-run flag
|
199
|
+
- Creates comprehensive audit trail
|
200
|
+
- ⚠️ **WARNING: Data loss is permanent**
|
201
|
+
|
202
|
+
## Safety Features
|
203
|
+
|
204
|
+
- CloudTrail integration for attachment history analysis
|
205
|
+
- Configurable age thresholds for volume consideration
|
206
|
+
- Comprehensive logging and audit trails
|
207
|
+
- CSV export for external analysis
|
208
|
+
- Input validation and error recovery
|
209
|
+
|
210
|
+
## Cost Impact Analysis
|
211
|
+
|
212
|
+
This tool helps identify cost optimization opportunities by finding:
|
213
|
+
- Volumes incurring unnecessary storage charges
|
214
|
+
- Orphaned volumes from terminated instances
|
215
|
+
- Development/testing volumes left unattached
|
216
|
+
|
217
|
+
Args:
|
218
|
+
dry_run (bool): When True (default), only reports findings without deletion
|
219
|
+
max_age_days (int): Minimum age in days before considering volumes for cleanup
|
220
|
+
output_file (str): Optional CSV file path for saving detailed results
|
221
|
+
region (str): AWS region to scan (defaults to configured region)
|
222
|
+
|
223
|
+
Returns:
|
224
|
+
None: Results are logged and optionally saved to CSV
|
225
|
+
|
226
|
+
Raises:
|
227
|
+
ClientError: If AWS API access fails due to permissions or service issues
|
228
|
+
ValueError: If invalid parameters are provided
|
229
|
+
|
230
|
+
Examples:
|
231
|
+
# Safe detection only (recommended first step)
|
232
|
+
python ec2_unattached_ebs_volumes.py --dry-run
|
233
|
+
|
234
|
+
# Detection with custom age threshold and output
|
235
|
+
python ec2_unattached_ebs_volumes.py --dry-run --max-age-days 7 --output-file volumes.csv
|
236
|
+
|
237
|
+
# Actual cleanup (DESTRUCTIVE - use with extreme caution)
|
238
|
+
python ec2_unattached_ebs_volumes.py --no-dry-run --max-age-days 90
|
239
|
+
"""
|
240
|
+
|
241
|
+
# Input validation and configuration
|
242
|
+
if max_age_days < 0:
|
243
|
+
raise ValueError(f"max_age_days must be non-negative, got: {max_age_days}")
|
244
|
+
|
245
|
+
# Enhanced logging for operation start
|
246
|
+
operation_mode = "DRY-RUN (Safe Preview)" if dry_run else "CLEANUP (Destructive)"
|
247
|
+
logger.info(f"🚀 Starting EBS Volume Analysis - Mode: {operation_mode}")
|
248
|
+
logger.info(f"📊 Configuration: max_age_days={max_age_days}, region={region or 'default'}")
|
249
|
+
|
250
|
+
if not dry_run:
|
251
|
+
logger.warning("⚠️ DESTRUCTIVE MODE ENABLED - Volumes will be permanently deleted!")
|
252
|
+
logger.warning("⚠️ Ensure you have verified these volumes are truly unused!")
|
253
|
+
|
254
|
+
try:
|
255
|
+
# Initialize EC2 client with region support
|
256
|
+
ec2_client = get_client("ec2", region_name=region)
|
257
|
+
logger.debug(f"Initialized EC2 client for region: {region or 'default'}")
|
258
|
+
|
259
|
+
# Query for unattached volumes with comprehensive filtering
|
260
|
+
logger.info("🔍 Querying for unattached EBS volumes...")
|
261
|
+
|
262
|
+
response = ec2_client.describe_volumes(
|
263
|
+
Filters=[
|
264
|
+
{"Name": "status", "Values": ["available"]} # Only unattached volumes
|
265
|
+
]
|
266
|
+
)
|
267
|
+
|
268
|
+
volumes = response.get("Volumes", [])
|
269
|
+
total_volumes_found = len(volumes)
|
270
|
+
|
271
|
+
logger.info(f"📋 Found {total_volumes_found} unattached volumes for analysis")
|
272
|
+
|
273
|
+
if total_volumes_found == 0:
|
274
|
+
logger.info("✅ No unattached volumes found - account is optimized!")
|
275
|
+
return
|
276
|
+
|
277
|
+
# Analyze each volume with comprehensive metadata collection
|
278
|
+
analysis_results = []
|
279
|
+
deletion_candidates = []
|
280
|
+
total_cost_gb_month = 0.0
|
281
|
+
|
282
|
+
logger.info("🔬 Analyzing volume metadata and attachment history...")
|
283
|
+
|
284
|
+
for volume_index, volume in enumerate(volumes, 1):
|
285
|
+
volume_id = volume["VolumeId"]
|
286
|
+
volume_size = volume["Size"]
|
287
|
+
volume_type = volume["VolumeType"]
|
288
|
+
create_time = volume["CreateTime"]
|
289
|
+
|
290
|
+
logger.debug(f"Processing volume {volume_index}/{total_volumes_found}: {volume_id}")
|
291
|
+
|
292
|
+
try:
|
293
|
+
# Calculate volume age
|
294
|
+
volume_age_days = (datetime.datetime.now(datetime.timezone.utc) - create_time).days
|
295
|
+
|
296
|
+
# Get CloudTrail attachment history
|
297
|
+
last_attachment_time = get_last_volume_attachment_time(volume_id)
|
298
|
+
|
299
|
+
# Determine time since last use
|
300
|
+
if last_attachment_time:
|
301
|
+
days_since_detached = (
|
302
|
+
datetime.datetime.now(datetime.timezone.utc)
|
303
|
+
- last_attachment_time.replace(tzinfo=datetime.timezone.utc)
|
304
|
+
).days
|
305
|
+
else:
|
306
|
+
days_since_detached = volume_age_days # Never attached
|
307
|
+
|
308
|
+
# Estimate monthly cost (rough approximation)
|
309
|
+
# GP3: $0.08/GB/month, GP2: $0.10/GB/month, IO1/IO2: varies
|
310
|
+
cost_per_gb = 0.08 if volume_type == "gp3" else 0.10
|
311
|
+
monthly_cost = volume_size * cost_per_gb
|
312
|
+
total_cost_gb_month += monthly_cost
|
313
|
+
|
314
|
+
# Comprehensive volume analysis data
|
315
|
+
volume_analysis = {
|
316
|
+
"VolumeId": volume_id,
|
317
|
+
"Size": volume_size,
|
318
|
+
"VolumeType": volume_type,
|
319
|
+
"CreateTime": create_time.isoformat(),
|
320
|
+
"VolumeAgeDays": volume_age_days,
|
321
|
+
"LastAttachmentTime": last_attachment_time.isoformat()
|
322
|
+
if last_attachment_time
|
323
|
+
else "Never attached",
|
324
|
+
"DaysSinceDetached": days_since_detached,
|
325
|
+
"EstimatedMonthlyCost": f"${monthly_cost:.2f}",
|
326
|
+
"EligibleForCleanup": days_since_detached >= max_age_days,
|
327
|
+
"AvailabilityZone": volume.get("AvailabilityZone", "Unknown"),
|
328
|
+
"State": volume.get("State", "Unknown"),
|
329
|
+
"Encrypted": volume.get("Encrypted", False),
|
330
|
+
"Tags": volume.get("Tags", []),
|
331
|
+
}
|
332
|
+
|
333
|
+
analysis_results.append(volume_analysis)
|
334
|
+
|
335
|
+
# Log volume findings with appropriate level
|
336
|
+
if days_since_detached >= max_age_days:
|
337
|
+
logger.info(
|
338
|
+
f"🎯 CLEANUP CANDIDATE: {volume_id} ({volume_size}GB {volume_type}) - "
|
339
|
+
f"unattached for {days_since_detached} days, cost: ${monthly_cost:.2f}/month"
|
340
|
+
)
|
341
|
+
deletion_candidates.append(volume_analysis)
|
342
|
+
else:
|
343
|
+
logger.debug(
|
344
|
+
f"📅 TOO RECENT: {volume_id} - unattached for only {days_since_detached} days "
|
345
|
+
f"(threshold: {max_age_days} days)"
|
346
|
+
)
|
347
|
+
|
348
|
+
except Exception as e:
|
349
|
+
logger.error(f"❌ Error analyzing volume {volume_id}: {e}")
|
350
|
+
# Continue processing other volumes
|
351
|
+
continue
|
352
|
+
|
353
|
+
# Generate comprehensive summary report
|
354
|
+
eligible_count = len(deletion_candidates)
|
355
|
+
eligible_size_gb = sum(vol["Size"] for vol in deletion_candidates)
|
356
|
+
eligible_monthly_cost = sum(float(vol["EstimatedMonthlyCost"].replace("$", "")) for vol in deletion_candidates)
|
357
|
+
|
358
|
+
logger.info("📊 ANALYSIS SUMMARY:")
|
359
|
+
logger.info(f" 📋 Total unattached volumes: {total_volumes_found}")
|
360
|
+
logger.info(f" 🎯 Eligible for cleanup: {eligible_count}")
|
361
|
+
logger.info(f" 💾 Total eligible storage: {eligible_size_gb} GB")
|
362
|
+
logger.info(f" 💰 Potential monthly savings: ${eligible_monthly_cost:.2f}")
|
363
|
+
logger.info(f" 📈 Total monthly EBS cost: ${total_cost_gb_month:.2f}")
|
364
|
+
|
365
|
+
# Execute cleanup operations if not in dry-run mode
|
366
|
+
if not dry_run and deletion_candidates:
|
367
|
+
logger.warning(f"🗑️ EXECUTING CLEANUP: Deleting {eligible_count} volumes...")
|
368
|
+
|
369
|
+
successful_deletions = 0
|
370
|
+
failed_deletions = []
|
371
|
+
|
372
|
+
for volume_data in deletion_candidates:
|
373
|
+
volume_id = volume_data["VolumeId"]
|
374
|
+
|
375
|
+
try:
|
376
|
+
logger.info(f"🗑️ Deleting volume: {volume_id}")
|
377
|
+
|
378
|
+
ec2_client.delete_volume(VolumeId=volume_id)
|
379
|
+
successful_deletions += 1
|
380
|
+
|
381
|
+
logger.info(f"✅ Successfully deleted volume: {volume_id}")
|
382
|
+
|
383
|
+
except ClientError as e:
|
384
|
+
error_code = e.response.get("Error", {}).get("Code", "Unknown")
|
385
|
+
error_message = e.response.get("Error", {}).get("Message", str(e))
|
386
|
+
|
387
|
+
logger.error(f"❌ Failed to delete volume {volume_id}: {error_code} - {error_message}")
|
388
|
+
failed_deletions.append({"volume_id": volume_id, "error": error_message})
|
389
|
+
|
390
|
+
# Handle specific deletion errors
|
391
|
+
if error_code == "VolumeInUse":
|
392
|
+
logger.warning(f"⚠️ Volume {volume_id} is now in use - skipping deletion")
|
393
|
+
elif error_code in ["AccessDenied", "UnauthorizedOperation"]:
|
394
|
+
logger.error(f"🔒 Insufficient permissions to delete volume {volume_id}")
|
395
|
+
|
396
|
+
except Exception as e:
|
397
|
+
logger.error(f"❌ Unexpected error deleting volume {volume_id}: {e}")
|
398
|
+
failed_deletions.append({"volume_id": volume_id, "error": str(e)})
|
399
|
+
|
400
|
+
# Cleanup summary
|
401
|
+
logger.info("🏁 CLEANUP OPERATION COMPLETE:")
|
402
|
+
logger.info(f" ✅ Successfully deleted: {successful_deletions} volumes")
|
403
|
+
logger.info(f" ❌ Failed deletions: {len(failed_deletions)} volumes")
|
404
|
+
|
405
|
+
if failed_deletions:
|
406
|
+
logger.warning("❌ Failed deletions details:")
|
407
|
+
for failure in failed_deletions:
|
408
|
+
logger.warning(f" - {failure['volume_id']}: {failure['error']}")
|
409
|
+
|
410
|
+
# Save results to CSV if requested
|
411
|
+
if output_file and analysis_results:
|
412
|
+
try:
|
413
|
+
write_to_csv(analysis_results, output_file)
|
414
|
+
logger.info(f"💾 Results saved to: {output_file}")
|
415
|
+
except Exception as e:
|
416
|
+
logger.error(f"❌ Failed to save results to {output_file}: {e}")
|
417
|
+
|
418
|
+
# Final operation summary
|
419
|
+
if dry_run:
|
420
|
+
logger.info("✅ DRY-RUN COMPLETE - No volumes were deleted")
|
421
|
+
if eligible_count > 0:
|
422
|
+
logger.info(f"💡 To proceed with cleanup, run with --no-dry-run flag")
|
423
|
+
logger.info(f"💰 Potential monthly savings: ${eligible_monthly_cost:.2f}")
|
424
|
+
else:
|
425
|
+
logger.info("✅ CLEANUP OPERATION COMPLETE")
|
426
|
+
|
427
|
+
except ClientError as e:
|
428
|
+
error_code = e.response.get("Error", {}).get("Code", "Unknown")
|
429
|
+
error_message = e.response.get("Error", {}).get("Message", str(e))
|
430
|
+
|
431
|
+
logger.error(f"❌ AWS API error during volume analysis: {error_code} - {error_message}")
|
432
|
+
|
433
|
+
# Handle specific AWS errors gracefully
|
434
|
+
if error_code in ["AccessDenied", "UnauthorizedOperation"]:
|
435
|
+
logger.error("🔒 Insufficient IAM permissions for EC2 volume operations")
|
436
|
+
logger.error(" Required permissions: ec2:DescribeVolumes, ec2:DeleteVolume, cloudtrail:LookupEvents")
|
437
|
+
elif error_code == "InvalidRegion":
|
438
|
+
logger.error(f"🌍 Invalid AWS region specified: {region}")
|
439
|
+
else:
|
440
|
+
raise
|
441
|
+
|
442
|
+
except BotoCoreError as e:
|
443
|
+
logger.error(f"❌ AWS service error during volume analysis: {e}")
|
444
|
+
raise
|
445
|
+
|
446
|
+
except Exception as e:
|
447
|
+
logger.error(f"❌ Unexpected error during volume analysis: {e}")
|
448
|
+
raise
|