runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/base.py +5 -1
- runbooks/cfat/__init__.py +8 -4
- runbooks/cfat/assessment/collectors.py +171 -14
- runbooks/cfat/assessment/compliance.py +871 -0
- runbooks/cfat/assessment/runner.py +122 -11
- runbooks/cfat/models.py +6 -2
- runbooks/common/logger.py +14 -0
- runbooks/common/rich_utils.py +451 -0
- runbooks/enterprise/__init__.py +68 -0
- runbooks/enterprise/error_handling.py +411 -0
- runbooks/enterprise/logging.py +439 -0
- runbooks/enterprise/multi_tenant.py +583 -0
- runbooks/finops/README.md +468 -241
- runbooks/finops/__init__.py +39 -3
- runbooks/finops/cli.py +83 -18
- runbooks/finops/cross_validation.py +375 -0
- runbooks/finops/dashboard_runner.py +812 -164
- runbooks/finops/enhanced_dashboard_runner.py +525 -0
- runbooks/finops/finops_dashboard.py +1892 -0
- runbooks/finops/helpers.py +485 -51
- runbooks/finops/optimizer.py +823 -0
- runbooks/finops/tests/__init__.py +19 -0
- runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
- runbooks/finops/tests/run_comprehensive_tests.py +421 -0
- runbooks/finops/tests/run_tests.py +305 -0
- runbooks/finops/tests/test_finops_dashboard.py +705 -0
- runbooks/finops/tests/test_integration.py +477 -0
- runbooks/finops/tests/test_performance.py +380 -0
- runbooks/finops/tests/test_performance_benchmarks.py +500 -0
- runbooks/finops/tests/test_reference_images_validation.py +867 -0
- runbooks/finops/tests/test_single_account_features.py +715 -0
- runbooks/finops/tests/validate_test_suite.py +220 -0
- runbooks/finops/types.py +1 -1
- runbooks/hitl/enhanced_workflow_engine.py +725 -0
- runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
- runbooks/inventory/collectors/aws_comprehensive.py +442 -0
- runbooks/inventory/collectors/enterprise_scale.py +281 -0
- runbooks/inventory/core/collector.py +172 -13
- runbooks/inventory/discovery.md +1 -1
- runbooks/inventory/list_ec2_instances.py +18 -20
- runbooks/inventory/list_ssm_parameters.py +31 -3
- runbooks/inventory/organizations_discovery.py +1269 -0
- runbooks/inventory/rich_inventory_display.py +393 -0
- runbooks/inventory/run_on_multi_accounts.py +35 -19
- runbooks/inventory/runbooks.security.report_generator.log +0 -0
- runbooks/inventory/runbooks.security.run_script.log +0 -0
- runbooks/inventory/vpc_flow_analyzer.py +1030 -0
- runbooks/main.py +2215 -119
- runbooks/metrics/dora_metrics_engine.py +599 -0
- runbooks/operate/__init__.py +2 -2
- runbooks/operate/base.py +122 -10
- runbooks/operate/deployment_framework.py +1032 -0
- runbooks/operate/deployment_validator.py +853 -0
- runbooks/operate/dynamodb_operations.py +10 -6
- runbooks/operate/ec2_operations.py +319 -11
- runbooks/operate/executive_dashboard.py +779 -0
- runbooks/operate/mcp_integration.py +750 -0
- runbooks/operate/nat_gateway_operations.py +1120 -0
- runbooks/operate/networking_cost_heatmap.py +685 -0
- runbooks/operate/privatelink_operations.py +940 -0
- runbooks/operate/s3_operations.py +10 -6
- runbooks/operate/vpc_endpoints.py +644 -0
- runbooks/operate/vpc_operations.py +1038 -0
- runbooks/remediation/__init__.py +2 -2
- runbooks/remediation/acm_remediation.py +1 -1
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/cloudtrail_remediation.py +1 -1
- runbooks/remediation/cognito_remediation.py +1 -1
- runbooks/remediation/dynamodb_remediation.py +1 -1
- runbooks/remediation/ec2_remediation.py +1 -1
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
- runbooks/remediation/kms_enable_key_rotation.py +1 -1
- runbooks/remediation/kms_remediation.py +1 -1
- runbooks/remediation/lambda_remediation.py +1 -1
- runbooks/remediation/multi_account.py +1 -1
- runbooks/remediation/rds_remediation.py +1 -1
- runbooks/remediation/s3_block_public_access.py +1 -1
- runbooks/remediation/s3_enable_access_logging.py +1 -1
- runbooks/remediation/s3_encryption.py +1 -1
- runbooks/remediation/s3_remediation.py +1 -1
- runbooks/remediation/vpc_remediation.py +475 -0
- runbooks/security/__init__.py +3 -1
- runbooks/security/compliance_automation.py +632 -0
- runbooks/security/report_generator.py +10 -0
- runbooks/security/run_script.py +31 -5
- runbooks/security/security_baseline_tester.py +169 -30
- runbooks/security/security_export.py +477 -0
- runbooks/validation/__init__.py +10 -0
- runbooks/validation/benchmark.py +484 -0
- runbooks/validation/cli.py +356 -0
- runbooks/validation/mcp_validator.py +768 -0
- runbooks/vpc/__init__.py +38 -0
- runbooks/vpc/config.py +212 -0
- runbooks/vpc/cost_engine.py +347 -0
- runbooks/vpc/heatmap_engine.py +605 -0
- runbooks/vpc/manager_interface.py +634 -0
- runbooks/vpc/networking_wrapper.py +1260 -0
- runbooks/vpc/rich_formatters.py +679 -0
- runbooks/vpc/tests/__init__.py +5 -0
- runbooks/vpc/tests/conftest.py +356 -0
- runbooks/vpc/tests/test_cli_integration.py +530 -0
- runbooks/vpc/tests/test_config.py +458 -0
- runbooks/vpc/tests/test_cost_engine.py +479 -0
- runbooks/vpc/tests/test_networking_wrapper.py +512 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
runbooks/remediation/__init__.py
CHANGED
@@ -41,7 +41,7 @@ automation lifecycle by bridging assessment findings to automated fixes.
|
|
41
41
|
- CloudTrail Policy Analysis & Reversion, Resource Scanning
|
42
42
|
- Workspace Management, Cross-Service Utilities
|
43
43
|
|
44
|
-
Version: 0.7.
|
44
|
+
Version: 0.7.8 - Enterprise Production Ready
|
45
45
|
Compatibility: AWS SDK v3, Python 3.8+, Multi-deployment ready
|
46
46
|
"""
|
47
47
|
|
@@ -66,7 +66,7 @@ from runbooks.remediation.rds_remediation import RDSSecurityRemediation
|
|
66
66
|
from runbooks.remediation.s3_remediation import S3SecurityRemediation
|
67
67
|
|
68
68
|
# Version info
|
69
|
-
__version__ = "0.7.
|
69
|
+
__version__ = "0.7.8"
|
70
70
|
__author__ = "CloudOps Runbooks Team"
|
71
71
|
|
72
72
|
# Public API exports
|
runbooks/remediation/base.py
CHANGED
@@ -60,7 +60,7 @@ Each remediation operation includes compliance framework mapping:
|
|
60
60
|
- **AWS Well-Architected Framework**: Pillar and principle mapping
|
61
61
|
- **CheckPoint CloudGuard/Dome9**: Rule-by-rule remediation mapping
|
62
62
|
|
63
|
-
Version: 0.7.
|
63
|
+
Version: 0.7.8 - Enterprise Production Ready
|
64
64
|
Compatibility: AWS SDK v3, Python 3.8+, Multi-deployment ready
|
65
65
|
"""
|
66
66
|
|
@@ -31,7 +31,7 @@ python ec2_unattached_ebs_volumes.py
|
|
31
31
|
⚠️ **DATA LOSS**: Deleted volumes cannot be recovered
|
32
32
|
⚠️ **COST IMPACT**: Verify volumes are truly unused before deletion
|
33
33
|
|
34
|
-
Version: 0.7.
|
34
|
+
Version: 0.7.8 - Enterprise Production Ready
|
35
35
|
Compliance: CIS AWS Foundations, Cost Optimization Best Practices
|
36
36
|
"""
|
37
37
|
|
@@ -43,7 +43,7 @@ python kms_enable_key_rotation.py --rotation-days 365
|
|
43
43
|
⚠️ **COST IMPACT**: Key rotation may impact application performance
|
44
44
|
⚠️ **TESTING**: Verify applications handle key rotation gracefully
|
45
45
|
|
46
|
-
Version: 0.7.
|
46
|
+
Version: 0.7.8 - Enterprise Production Ready
|
47
47
|
Compliance: CIS AWS Foundations 3.8, NIST SP 800-57
|
48
48
|
"""
|
49
49
|
|
@@ -47,7 +47,7 @@ python s3_block_public_access.py --block
|
|
47
47
|
⚠️ **WEBSITE HOSTING**: Will disable S3 static website hosting features
|
48
48
|
⚠️ **CDN INTEGRATION**: May affect CloudFront and other CDN configurations
|
49
49
|
|
50
|
-
Version: 0.7.
|
50
|
+
Version: 0.7.8 - Enterprise Production Ready
|
51
51
|
Compliance: CIS AWS Foundations 2.1.5, NIST SP 800-53
|
52
52
|
"""
|
53
53
|
|
@@ -53,7 +53,7 @@ python s3_enable_access_logging.py --log-bucket audit-logs --log-prefix access-l
|
|
53
53
|
⚠️ **COST IMPACT**: Access logging incurs additional storage costs
|
54
54
|
⚠️ **RETENTION**: Consider lifecycle policies for log management
|
55
55
|
|
56
|
-
Version: 0.7.
|
56
|
+
Version: 0.7.8 - Enterprise Production Ready
|
57
57
|
Compliance: CIS AWS Foundations 3.1, SOC2 A1.1, PCI DSS 10.2
|
58
58
|
"""
|
59
59
|
|
@@ -53,7 +53,7 @@ python s3_encryption.py --encryption-type sse-kms --create-kms-key
|
|
53
53
|
⚠️ **KEY MANAGEMENT**: Customer-managed keys require proper lifecycle management
|
54
54
|
⚠️ **COMPLIANCE**: Some regulations require specific encryption types
|
55
55
|
|
56
|
-
Version: 0.7.
|
56
|
+
Version: 0.7.8 - Enterprise Production Ready
|
57
57
|
Compliance: CIS AWS Foundations 2.1.1, SOC2 A1.2, PCI DSS 3.4
|
58
58
|
"""
|
59
59
|
|
@@ -46,7 +46,7 @@ Migrated and enhanced from these original remediation scripts:
|
|
46
46
|
- **D9.AWS.S3.02**: S3 bucket public access prevention
|
47
47
|
- **D9.AWS.S3.03**: S3 bucket encryption enforcement
|
48
48
|
|
49
|
-
Version: 0.7.
|
49
|
+
Version: 0.7.8 - Enterprise Production Ready
|
50
50
|
"""
|
51
51
|
|
52
52
|
import json
|
@@ -0,0 +1,475 @@
|
|
1
|
+
"""
|
2
|
+
VPC Security and Cost Optimization Remediation Module.
|
3
|
+
|
4
|
+
This module provides automated remediation capabilities for VPC-related
|
5
|
+
security findings and cost optimization opportunities identified during
|
6
|
+
CFAT assessments.
|
7
|
+
|
8
|
+
Key Features:
|
9
|
+
- NAT Gateway cost optimization (GitHub Issue #96)
|
10
|
+
- VPC Flow Log enablement for security monitoring
|
11
|
+
- Security Group hardening
|
12
|
+
- Network ACL compliance enforcement
|
13
|
+
- Subnet auto-assign public IP remediation
|
14
|
+
|
15
|
+
Integration:
|
16
|
+
- Works with CFAT VPC assessments
|
17
|
+
- Leverages VPC Operations module
|
18
|
+
- Enterprise safety features (dry-run, confirmation, rollback)
|
19
|
+
"""
|
20
|
+
|
21
|
+
from typing import Any, Dict, List, Optional
|
22
|
+
|
23
|
+
from loguru import logger
|
24
|
+
|
25
|
+
from runbooks.remediation.base import (
|
26
|
+
BaseRemediation,
|
27
|
+
RemediationContext,
|
28
|
+
RemediationResult,
|
29
|
+
RemediationStatus,
|
30
|
+
)
|
31
|
+
|
32
|
+
|
33
|
+
class VPCRemediation(BaseRemediation):
|
34
|
+
"""
|
35
|
+
VPC security and cost optimization remediation with GitHub Issue #96 integration.
|
36
|
+
|
37
|
+
Handles automated fixes for:
|
38
|
+
- NAT Gateway cost optimization
|
39
|
+
- VPC Flow Log configuration
|
40
|
+
- Security Group hardening
|
41
|
+
- Network security compliance
|
42
|
+
"""
|
43
|
+
|
44
|
+
supported_operations = [
|
45
|
+
"optimize_nat_gateways",
|
46
|
+
"enable_vpc_flow_logs",
|
47
|
+
"disable_subnet_auto_assign_public_ip",
|
48
|
+
"remediate_open_security_groups",
|
49
|
+
]
|
50
|
+
|
51
|
+
def optimize_nat_gateways(
|
52
|
+
self, context: RemediationContext, vpc_id: Optional[str] = None, max_nat_per_az: int = 1
|
53
|
+
) -> List[RemediationResult]:
|
54
|
+
"""
|
55
|
+
Optimize NAT Gateway placement for cost reduction ($45/month per gateway).
|
56
|
+
|
57
|
+
This remediation addresses GitHub Issue #96 by:
|
58
|
+
- Analyzing NAT Gateway distribution across AZs
|
59
|
+
- Identifying consolidation opportunities
|
60
|
+
- Recommending cost-effective placement strategies
|
61
|
+
- Providing estimated monthly savings
|
62
|
+
|
63
|
+
Args:
|
64
|
+
context: Remediation execution context
|
65
|
+
vpc_id: Specific VPC to optimize (None for all VPCs)
|
66
|
+
max_nat_per_az: Maximum NAT Gateways per Availability Zone
|
67
|
+
|
68
|
+
Returns:
|
69
|
+
List of remediation results with cost savings information
|
70
|
+
"""
|
71
|
+
logger.info(f"Starting NAT Gateway cost optimization (target: {max_nat_per_az} per AZ)")
|
72
|
+
|
73
|
+
results = []
|
74
|
+
|
75
|
+
try:
|
76
|
+
ec2_client = self.get_client("ec2")
|
77
|
+
|
78
|
+
# Get all NAT Gateways (or for specific VPC)
|
79
|
+
describe_params = {"MaxResults": 100}
|
80
|
+
if vpc_id:
|
81
|
+
# Filter by VPC using subnet filter
|
82
|
+
subnets_response = ec2_client.describe_subnets(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
|
83
|
+
subnet_ids = [s["SubnetId"] for s in subnets_response.get("Subnets", [])]
|
84
|
+
if subnet_ids:
|
85
|
+
describe_params["Filter"] = [{"Name": "subnet-id", "Values": subnet_ids}]
|
86
|
+
|
87
|
+
nat_response = ec2_client.describe_nat_gateways(**describe_params)
|
88
|
+
nat_gateways = nat_response.get("NatGateways", [])
|
89
|
+
|
90
|
+
# Filter to active NAT Gateways only
|
91
|
+
active_nats = [nat for nat in nat_gateways if nat.get("State") == "available"]
|
92
|
+
|
93
|
+
if not active_nats:
|
94
|
+
result = self.create_remediation_result(
|
95
|
+
context, "optimize_nat_gateways", "ec2:nat_gateway", "no-nat-gateways"
|
96
|
+
)
|
97
|
+
result.status = RemediationStatus.SKIPPED
|
98
|
+
result.error_message = "No active NAT Gateways found for optimization"
|
99
|
+
results.append(result)
|
100
|
+
return results
|
101
|
+
|
102
|
+
# Get subnet information for AZ mapping
|
103
|
+
subnet_ids = [nat.get("SubnetId") for nat in active_nats if nat.get("SubnetId")]
|
104
|
+
subnets_response = ec2_client.describe_subnets(SubnetIds=subnet_ids)
|
105
|
+
subnets = subnets_response.get("Subnets", [])
|
106
|
+
|
107
|
+
# Create AZ to NAT Gateway mapping
|
108
|
+
az_nat_mapping = {}
|
109
|
+
for nat in active_nats:
|
110
|
+
subnet_id = nat.get("SubnetId")
|
111
|
+
nat_id = nat.get("NatGatewayId")
|
112
|
+
|
113
|
+
# Find AZ for this NAT Gateway
|
114
|
+
nat_az = None
|
115
|
+
for subnet in subnets:
|
116
|
+
if subnet.get("SubnetId") == subnet_id:
|
117
|
+
nat_az = subnet.get("AvailabilityZone")
|
118
|
+
break
|
119
|
+
|
120
|
+
if nat_az:
|
121
|
+
if nat_az not in az_nat_mapping:
|
122
|
+
az_nat_mapping[nat_az] = []
|
123
|
+
az_nat_mapping[nat_az].append(
|
124
|
+
{"nat_id": nat_id, "subnet_id": subnet_id, "created_time": nat.get("CreateTime")}
|
125
|
+
)
|
126
|
+
|
127
|
+
# Analyze optimization opportunities
|
128
|
+
total_current_cost = len(active_nats) * 45
|
129
|
+
potential_savings = 0
|
130
|
+
|
131
|
+
for az, nat_list in az_nat_mapping.items():
|
132
|
+
if len(nat_list) > max_nat_per_az:
|
133
|
+
excess_nats = len(nat_list) - max_nat_per_az
|
134
|
+
potential_savings += excess_nats * 45 # $45/month per NAT Gateway
|
135
|
+
|
136
|
+
# Sort by creation time to keep the oldest (most stable)
|
137
|
+
sorted_nats = sorted(nat_list, key=lambda x: x["created_time"])
|
138
|
+
nats_to_remove = sorted_nats[max_nat_per_az:] # Remove excess
|
139
|
+
|
140
|
+
for nat_info in nats_to_remove:
|
141
|
+
nat_id = nat_info["nat_id"]
|
142
|
+
|
143
|
+
result = self.create_remediation_result(
|
144
|
+
context, "optimize_nat_gateways", "ec2:nat_gateway", nat_id
|
145
|
+
)
|
146
|
+
|
147
|
+
result.response_data = {
|
148
|
+
"availability_zone": az,
|
149
|
+
"current_az_nat_count": len(nat_list),
|
150
|
+
"target_az_nat_count": max_nat_per_az,
|
151
|
+
"monthly_savings": 45,
|
152
|
+
"optimization_reason": "excess_nat_gateway",
|
153
|
+
}
|
154
|
+
|
155
|
+
if not context.dry_run:
|
156
|
+
try:
|
157
|
+
# Create backup information
|
158
|
+
if context.backup_enabled:
|
159
|
+
backup_info = {
|
160
|
+
"nat_gateway_info": nat_info,
|
161
|
+
"availability_zone": az,
|
162
|
+
"removal_reason": "cost_optimization",
|
163
|
+
}
|
164
|
+
result.backup_locations["nat_gateway"] = f"backup-{nat_id}"
|
165
|
+
|
166
|
+
# Delete NAT Gateway
|
167
|
+
delete_response = self.execute_aws_call(
|
168
|
+
ec2_client, "delete_nat_gateway", NatGatewayId=nat_id
|
169
|
+
)
|
170
|
+
|
171
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
172
|
+
result.response_data.update(delete_response)
|
173
|
+
|
174
|
+
logger.info(f"Deleted NAT Gateway {nat_id} for cost optimization")
|
175
|
+
|
176
|
+
except Exception as e:
|
177
|
+
result.mark_completed(RemediationStatus.FAILED, str(e))
|
178
|
+
logger.error(f"Failed to delete NAT Gateway {nat_id}: {e}")
|
179
|
+
else:
|
180
|
+
result.mark_completed(RemediationStatus.DRY_RUN)
|
181
|
+
|
182
|
+
results.append(result)
|
183
|
+
|
184
|
+
# Create summary result
|
185
|
+
summary_result = self.create_remediation_result(
|
186
|
+
context, "optimize_nat_gateways", "ec2:vpc", "nat-optimization-summary"
|
187
|
+
)
|
188
|
+
summary_result.response_data = {
|
189
|
+
"total_nat_gateways": len(active_nats),
|
190
|
+
"current_monthly_cost": total_current_cost,
|
191
|
+
"potential_monthly_savings": potential_savings,
|
192
|
+
"optimization_percentage": round((potential_savings / total_current_cost) * 100, 1)
|
193
|
+
if total_current_cost > 0
|
194
|
+
else 0,
|
195
|
+
"availability_zones_analyzed": len(az_nat_mapping),
|
196
|
+
"github_issue": "#96",
|
197
|
+
}
|
198
|
+
summary_result.mark_completed(RemediationStatus.SUCCESS)
|
199
|
+
results.append(summary_result)
|
200
|
+
|
201
|
+
except Exception as e:
|
202
|
+
error_result = self.create_remediation_result(
|
203
|
+
context, "optimize_nat_gateways", "ec2:vpc", "nat-optimization-error"
|
204
|
+
)
|
205
|
+
error_result.mark_completed(RemediationStatus.FAILED, f"NAT Gateway optimization failed: {str(e)}")
|
206
|
+
results.append(error_result)
|
207
|
+
logger.error(f"NAT Gateway optimization failed: {e}")
|
208
|
+
|
209
|
+
return results
|
210
|
+
|
211
|
+
def enable_vpc_flow_logs(
|
212
|
+
self,
|
213
|
+
context: RemediationContext,
|
214
|
+
vpc_ids: Optional[List[str]] = None,
|
215
|
+
log_destination_type: str = "cloud-watch-logs",
|
216
|
+
) -> List[RemediationResult]:
|
217
|
+
"""
|
218
|
+
Enable VPC Flow Logs for security monitoring and compliance.
|
219
|
+
|
220
|
+
Args:
|
221
|
+
context: Remediation execution context
|
222
|
+
vpc_ids: List of VPC IDs to enable flow logs for (None for all VPCs)
|
223
|
+
log_destination_type: Destination type ('cloud-watch-logs', 's3', 'kinesis-data-firehose')
|
224
|
+
|
225
|
+
Returns:
|
226
|
+
List of remediation results
|
227
|
+
"""
|
228
|
+
logger.info("Enabling VPC Flow Logs for security monitoring")
|
229
|
+
|
230
|
+
results = []
|
231
|
+
|
232
|
+
try:
|
233
|
+
ec2_client = self.get_client("ec2")
|
234
|
+
|
235
|
+
# Get VPCs to process
|
236
|
+
if vpc_ids:
|
237
|
+
vpcs_response = ec2_client.describe_vpcs(VpcIds=vpc_ids)
|
238
|
+
else:
|
239
|
+
vpcs_response = ec2_client.describe_vpcs()
|
240
|
+
|
241
|
+
vpcs = vpcs_response.get("Vpcs", [])
|
242
|
+
|
243
|
+
# Get existing flow logs
|
244
|
+
flow_logs_response = ec2_client.describe_flow_logs()
|
245
|
+
existing_flow_logs = flow_logs_response.get("FlowLogs", [])
|
246
|
+
existing_vpc_ids = {fl.get("ResourceId") for fl in existing_flow_logs if fl.get("ResourceType") == "VPC"}
|
247
|
+
|
248
|
+
for vpc in vpcs:
|
249
|
+
vpc_id = vpc.get("VpcId")
|
250
|
+
vpc_name = next(
|
251
|
+
(tag.get("Value", "") for tag in vpc.get("Tags", []) if tag.get("Key") == "Name"), vpc_id
|
252
|
+
)
|
253
|
+
|
254
|
+
result = self.create_remediation_result(context, "enable_vpc_flow_logs", "ec2:vpc", vpc_id)
|
255
|
+
|
256
|
+
if vpc_id in existing_vpc_ids:
|
257
|
+
result.mark_completed(RemediationStatus.SKIPPED)
|
258
|
+
result.error_message = f"VPC Flow Logs already enabled for {vpc_name}"
|
259
|
+
else:
|
260
|
+
try:
|
261
|
+
if not context.dry_run:
|
262
|
+
# Create CloudWatch Logs group if using CloudWatch destination
|
263
|
+
if log_destination_type == "cloud-watch-logs":
|
264
|
+
logs_client = self.get_client("logs")
|
265
|
+
log_group_name = f"/aws/vpc/flowlogs/{vpc_id}"
|
266
|
+
|
267
|
+
try:
|
268
|
+
logs_client.create_log_group(logGroupName=log_group_name)
|
269
|
+
logger.info(f"Created CloudWatch log group: {log_group_name}")
|
270
|
+
except logs_client.exceptions.ResourceAlreadyExistsException:
|
271
|
+
logger.info(f"CloudWatch log group already exists: {log_group_name}")
|
272
|
+
|
273
|
+
# Create flow log
|
274
|
+
flow_log_response = self.execute_aws_call(
|
275
|
+
ec2_client,
|
276
|
+
"create_flow_logs",
|
277
|
+
ResourceIds=[vpc_id],
|
278
|
+
ResourceType="VPC",
|
279
|
+
TrafficType="ALL",
|
280
|
+
LogDestinationType=log_destination_type,
|
281
|
+
LogGroupName=log_group_name,
|
282
|
+
Tags=[
|
283
|
+
{"Key": "Name", "Value": f"FlowLog-{vpc_name}"},
|
284
|
+
{"Key": "Purpose", "Value": "SecurityMonitoring"},
|
285
|
+
{"Key": "CreatedBy", "Value": "CloudOps-Runbooks-VPC-Remediation"},
|
286
|
+
],
|
287
|
+
)
|
288
|
+
|
289
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
290
|
+
result.response_data = flow_log_response
|
291
|
+
|
292
|
+
else:
|
293
|
+
result.mark_completed(RemediationStatus.DRY_RUN)
|
294
|
+
|
295
|
+
except Exception as e:
|
296
|
+
result.mark_completed(RemediationStatus.FAILED, f"Failed to enable Flow Logs: {str(e)}")
|
297
|
+
logger.error(f"Failed to enable VPC Flow Logs for {vpc_id}: {e}")
|
298
|
+
|
299
|
+
results.append(result)
|
300
|
+
|
301
|
+
except Exception as e:
|
302
|
+
error_result = self.create_remediation_result(
|
303
|
+
context, "enable_vpc_flow_logs", "ec2:vpc", "vpc-flow-logs-error"
|
304
|
+
)
|
305
|
+
error_result.mark_completed(RemediationStatus.FAILED, f"VPC Flow Logs remediation failed: {str(e)}")
|
306
|
+
results.append(error_result)
|
307
|
+
logger.error(f"VPC Flow Logs remediation failed: {e}")
|
308
|
+
|
309
|
+
return results
|
310
|
+
|
311
|
+
def disable_subnet_auto_assign_public_ip(
|
312
|
+
self, context: RemediationContext, vpc_ids: Optional[List[str]] = None
|
313
|
+
) -> List[RemediationResult]:
|
314
|
+
"""
|
315
|
+
Disable auto-assign public IP for subnets to improve security posture.
|
316
|
+
|
317
|
+
Args:
|
318
|
+
context: Remediation execution context
|
319
|
+
vpc_ids: List of VPC IDs to process (None for all VPCs)
|
320
|
+
|
321
|
+
Returns:
|
322
|
+
List of remediation results
|
323
|
+
"""
|
324
|
+
logger.info("Disabling subnet auto-assign public IP for security hardening")
|
325
|
+
|
326
|
+
results = []
|
327
|
+
|
328
|
+
try:
|
329
|
+
ec2_client = self.get_client("ec2")
|
330
|
+
|
331
|
+
# Get subnets to process
|
332
|
+
describe_params = {}
|
333
|
+
if vpc_ids:
|
334
|
+
describe_params["Filters"] = [{"Name": "vpc-id", "Values": vpc_ids}]
|
335
|
+
|
336
|
+
subnets_response = ec2_client.describe_subnets(**describe_params)
|
337
|
+
subnets = subnets_response.get("Subnets", [])
|
338
|
+
|
339
|
+
for subnet in subnets:
|
340
|
+
subnet_id = subnet.get("SubnetId")
|
341
|
+
subnet_name = next(
|
342
|
+
(tag.get("Value", "") for tag in subnet.get("Tags", []) if tag.get("Key") == "Name"), subnet_id
|
343
|
+
)
|
344
|
+
auto_assign_public_ip = subnet.get("MapPublicIpOnLaunch", False)
|
345
|
+
|
346
|
+
result = self.create_remediation_result(
|
347
|
+
context, "disable_subnet_auto_assign_public_ip", "ec2:subnet", subnet_id
|
348
|
+
)
|
349
|
+
|
350
|
+
if not auto_assign_public_ip:
|
351
|
+
result.mark_completed(RemediationStatus.SKIPPED)
|
352
|
+
result.error_message = f"Subnet {subnet_name} already has auto-assign public IP disabled"
|
353
|
+
else:
|
354
|
+
try:
|
355
|
+
if not context.dry_run:
|
356
|
+
# Disable auto-assign public IP
|
357
|
+
self.execute_aws_call(
|
358
|
+
ec2_client,
|
359
|
+
"modify_subnet_attribute",
|
360
|
+
SubnetId=subnet_id,
|
361
|
+
MapPublicIpOnLaunch={"Value": False},
|
362
|
+
)
|
363
|
+
|
364
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
365
|
+
else:
|
366
|
+
result.mark_completed(RemediationStatus.DRY_RUN)
|
367
|
+
|
368
|
+
except Exception as e:
|
369
|
+
result.mark_completed(RemediationStatus.FAILED, f"Failed to modify subnet attribute: {str(e)}")
|
370
|
+
logger.error(f"Failed to disable auto-assign public IP for {subnet_id}: {e}")
|
371
|
+
|
372
|
+
results.append(result)
|
373
|
+
|
374
|
+
except Exception as e:
|
375
|
+
error_result = self.create_remediation_result(
|
376
|
+
context, "disable_subnet_auto_assign_public_ip", "ec2:subnet", "subnet-auto-ip-error"
|
377
|
+
)
|
378
|
+
error_result.mark_completed(RemediationStatus.FAILED, f"Subnet auto-assign IP remediation failed: {str(e)}")
|
379
|
+
results.append(error_result)
|
380
|
+
logger.error(f"Subnet auto-assign IP remediation failed: {e}")
|
381
|
+
|
382
|
+
return results
|
383
|
+
|
384
|
+
def remediate_open_security_groups(
|
385
|
+
self, context: RemediationContext, security_group_ids: Optional[List[str]] = None
|
386
|
+
) -> List[RemediationResult]:
|
387
|
+
"""
|
388
|
+
Remediate security groups with overly permissive rules (0.0.0.0/0).
|
389
|
+
|
390
|
+
Args:
|
391
|
+
context: Remediation execution context
|
392
|
+
security_group_ids: Specific security group IDs to remediate (None for all)
|
393
|
+
|
394
|
+
Returns:
|
395
|
+
List of remediation results
|
396
|
+
"""
|
397
|
+
logger.info("Remediating overly permissive security groups")
|
398
|
+
|
399
|
+
results = []
|
400
|
+
|
401
|
+
try:
|
402
|
+
ec2_client = self.get_client("ec2")
|
403
|
+
|
404
|
+
# Get security groups to analyze
|
405
|
+
describe_params = {}
|
406
|
+
if security_group_ids:
|
407
|
+
describe_params["GroupIds"] = security_group_ids
|
408
|
+
|
409
|
+
sg_response = ec2_client.describe_security_groups(**describe_params)
|
410
|
+
security_groups = sg_response.get("SecurityGroups", [])
|
411
|
+
|
412
|
+
for sg in security_groups:
|
413
|
+
sg_id = sg.get("GroupId")
|
414
|
+
sg_name = sg.get("GroupName", sg_id)
|
415
|
+
|
416
|
+
result = self.create_remediation_result(
|
417
|
+
context, "remediate_open_security_groups", "ec2:security_group", sg_id
|
418
|
+
)
|
419
|
+
|
420
|
+
# Check for overly permissive inbound rules
|
421
|
+
risky_inbound_rules = []
|
422
|
+
for rule in sg.get("IpPermissions", []):
|
423
|
+
for ip_range in rule.get("IpRanges", []):
|
424
|
+
if ip_range.get("CidrIp") == "0.0.0.0/0":
|
425
|
+
risky_inbound_rules.append(rule)
|
426
|
+
break
|
427
|
+
|
428
|
+
if not risky_inbound_rules:
|
429
|
+
result.mark_completed(RemediationStatus.SKIPPED)
|
430
|
+
result.error_message = f"Security group {sg_name} has no overly permissive rules"
|
431
|
+
else:
|
432
|
+
try:
|
433
|
+
if not context.dry_run:
|
434
|
+
# Create backup
|
435
|
+
if context.backup_enabled:
|
436
|
+
result.backup_locations["security_group"] = f"backup-{sg_id}"
|
437
|
+
|
438
|
+
rules_modified = 0
|
439
|
+
for rule in risky_inbound_rules:
|
440
|
+
# Remove the overly permissive rule
|
441
|
+
try:
|
442
|
+
self.execute_aws_call(
|
443
|
+
ec2_client, "revoke_security_group_ingress", GroupId=sg_id, IpPermissions=[rule]
|
444
|
+
)
|
445
|
+
rules_modified += 1
|
446
|
+
logger.info(f"Removed permissive rule from security group {sg_id}")
|
447
|
+
except Exception as rule_error:
|
448
|
+
logger.error(f"Failed to remove rule from {sg_id}: {rule_error}")
|
449
|
+
|
450
|
+
if rules_modified > 0:
|
451
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
452
|
+
result.response_data = {"rules_modified": rules_modified}
|
453
|
+
else:
|
454
|
+
result.mark_completed(
|
455
|
+
RemediationStatus.FAILED, f"Failed to remove any rules from {sg_name}"
|
456
|
+
)
|
457
|
+
else:
|
458
|
+
result.mark_completed(RemediationStatus.DRY_RUN)
|
459
|
+
result.response_data = {"risky_rules_count": len(risky_inbound_rules)}
|
460
|
+
|
461
|
+
except Exception as e:
|
462
|
+
result.mark_completed(RemediationStatus.FAILED, f"Failed to remediate security group: {str(e)}")
|
463
|
+
logger.error(f"Failed to remediate security group {sg_id}: {e}")
|
464
|
+
|
465
|
+
results.append(result)
|
466
|
+
|
467
|
+
except Exception as e:
|
468
|
+
error_result = self.create_remediation_result(
|
469
|
+
context, "remediate_open_security_groups", "ec2:security_group", "security-group-error"
|
470
|
+
)
|
471
|
+
error_result.mark_completed(RemediationStatus.FAILED, f"Security group remediation failed: {str(e)}")
|
472
|
+
results.append(error_result)
|
473
|
+
logger.error(f"Security group remediation failed: {e}")
|
474
|
+
|
475
|
+
return results
|
runbooks/security/__init__.py
CHANGED
@@ -50,15 +50,17 @@ from .report_generator import ReportGenerator, generate_html_report
|
|
50
50
|
from .run_script import main as run_security_script
|
51
51
|
from .run_script import parse_arguments
|
52
52
|
from .security_baseline_tester import SecurityBaselineTester
|
53
|
+
from .security_export import SecurityExporter
|
53
54
|
|
54
55
|
# Version info
|
55
|
-
__version__ = "0.7.
|
56
|
+
__version__ = "0.7.8"
|
56
57
|
__author__ = "CloudOps Runbooks Team"
|
57
58
|
|
58
59
|
# Public API
|
59
60
|
__all__ = [
|
60
61
|
# Core functionality
|
61
62
|
"SecurityBaselineTester",
|
63
|
+
"SecurityExporter",
|
62
64
|
"ReportGenerator",
|
63
65
|
"generate_html_report",
|
64
66
|
# CLI functions
|