runbooks 0.7.0__py3-none-any.whl → 0.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +87 -37
- runbooks/cfat/README.md +300 -49
- runbooks/cfat/__init__.py +2 -2
- runbooks/finops/__init__.py +1 -1
- runbooks/finops/cli.py +1 -1
- runbooks/inventory/collectors/__init__.py +8 -0
- runbooks/inventory/collectors/aws_management.py +791 -0
- runbooks/inventory/collectors/aws_networking.py +3 -3
- runbooks/main.py +3389 -782
- runbooks/operate/__init__.py +207 -0
- runbooks/operate/base.py +311 -0
- runbooks/operate/cloudformation_operations.py +619 -0
- runbooks/operate/cloudwatch_operations.py +496 -0
- runbooks/operate/dynamodb_operations.py +812 -0
- runbooks/operate/ec2_operations.py +926 -0
- runbooks/operate/iam_operations.py +569 -0
- runbooks/operate/s3_operations.py +1211 -0
- runbooks/operate/tagging_operations.py +655 -0
- runbooks/remediation/CLAUDE.md +100 -0
- runbooks/remediation/DOME9.md +218 -0
- runbooks/remediation/README.md +26 -0
- runbooks/remediation/Tests/__init__.py +0 -0
- runbooks/remediation/Tests/update_policy.py +74 -0
- runbooks/remediation/__init__.py +95 -0
- runbooks/remediation/acm_cert_expired_unused.py +98 -0
- runbooks/remediation/acm_remediation.py +875 -0
- runbooks/remediation/api_gateway_list.py +167 -0
- runbooks/remediation/base.py +643 -0
- runbooks/remediation/cloudtrail_remediation.py +908 -0
- runbooks/remediation/cloudtrail_s3_modifications.py +296 -0
- runbooks/remediation/cognito_active_users.py +78 -0
- runbooks/remediation/cognito_remediation.py +856 -0
- runbooks/remediation/cognito_user_password_reset.py +163 -0
- runbooks/remediation/commons.py +455 -0
- runbooks/remediation/dynamodb_optimize.py +155 -0
- runbooks/remediation/dynamodb_remediation.py +744 -0
- runbooks/remediation/dynamodb_server_side_encryption.py +108 -0
- runbooks/remediation/ec2_public_ips.py +134 -0
- runbooks/remediation/ec2_remediation.py +892 -0
- runbooks/remediation/ec2_subnet_disable_auto_ip_assignment.py +72 -0
- runbooks/remediation/ec2_unattached_ebs_volumes.py +448 -0
- runbooks/remediation/ec2_unused_security_groups.py +202 -0
- runbooks/remediation/kms_enable_key_rotation.py +651 -0
- runbooks/remediation/kms_remediation.py +717 -0
- runbooks/remediation/lambda_list.py +243 -0
- runbooks/remediation/lambda_remediation.py +971 -0
- runbooks/remediation/multi_account.py +569 -0
- runbooks/remediation/rds_instance_list.py +199 -0
- runbooks/remediation/rds_remediation.py +873 -0
- runbooks/remediation/rds_snapshot_list.py +192 -0
- runbooks/remediation/requirements.txt +118 -0
- runbooks/remediation/s3_block_public_access.py +159 -0
- runbooks/remediation/s3_bucket_public_access.py +143 -0
- runbooks/remediation/s3_disable_static_website_hosting.py +74 -0
- runbooks/remediation/s3_downloader.py +215 -0
- runbooks/remediation/s3_enable_access_logging.py +562 -0
- runbooks/remediation/s3_encryption.py +526 -0
- runbooks/remediation/s3_force_ssl_secure_policy.py +143 -0
- runbooks/remediation/s3_list.py +141 -0
- runbooks/remediation/s3_object_search.py +201 -0
- runbooks/remediation/s3_remediation.py +816 -0
- runbooks/remediation/scan_for_phrase.py +425 -0
- runbooks/remediation/workspaces_list.py +220 -0
- runbooks/security/__init__.py +9 -10
- runbooks/security/security_baseline_tester.py +4 -2
- runbooks-0.7.6.dist-info/METADATA +608 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/RECORD +84 -76
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/entry_points.txt +0 -1
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/top_level.txt +0 -1
- jupyter-agent/.env +0 -2
- jupyter-agent/.env.template +0 -2
- jupyter-agent/.gitattributes +0 -35
- jupyter-agent/.gradio/certificate.pem +0 -31
- jupyter-agent/README.md +0 -16
- jupyter-agent/__main__.log +0 -8
- jupyter-agent/app.py +0 -256
- jupyter-agent/cloudops-agent.png +0 -0
- jupyter-agent/ds-system-prompt.txt +0 -154
- jupyter-agent/jupyter-agent.png +0 -0
- jupyter-agent/llama3_template.jinja +0 -123
- jupyter-agent/requirements.txt +0 -9
- jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +0 -68
- jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +0 -91
- jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +0 -91
- jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +0 -57
- jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +0 -53
- jupyter-agent/tmp/jupyter-agent.ipynb +0 -27
- jupyter-agent/utils.py +0 -409
- runbooks/aws/__init__.py +0 -58
- runbooks/aws/dynamodb_operations.py +0 -231
- runbooks/aws/ec2_copy_image_cross-region.py +0 -195
- runbooks/aws/ec2_describe_instances.py +0 -202
- runbooks/aws/ec2_ebs_snapshots_delete.py +0 -186
- runbooks/aws/ec2_run_instances.py +0 -213
- runbooks/aws/ec2_start_stop_instances.py +0 -212
- runbooks/aws/ec2_terminate_instances.py +0 -143
- runbooks/aws/ec2_unused_eips.py +0 -196
- runbooks/aws/ec2_unused_volumes.py +0 -188
- runbooks/aws/s3_create_bucket.py +0 -142
- runbooks/aws/s3_list_buckets.py +0 -152
- runbooks/aws/s3_list_objects.py +0 -156
- runbooks/aws/s3_object_operations.py +0 -183
- runbooks/aws/tagging_lambda_handler.py +0 -183
- runbooks/inventory/FAILED_SCRIPTS_TROUBLESHOOTING.md +0 -619
- runbooks/inventory/PASSED_SCRIPTS_GUIDE.md +0 -738
- runbooks/inventory/aws_organization.png +0 -0
- runbooks/inventory/cfn_move_stack_instances.py +0 -1526
- runbooks/inventory/delete_s3_buckets_objects.py +0 -169
- runbooks/inventory/lockdown_cfn_stackset_role.py +0 -224
- runbooks/inventory/update_aws_actions.py +0 -173
- runbooks/inventory/update_cfn_stacksets.py +0 -1215
- runbooks/inventory/update_cloudwatch_logs_retention_policy.py +0 -294
- runbooks/inventory/update_iam_roles_cross_accounts.py +0 -478
- runbooks/inventory/update_s3_public_access_block.py +0 -539
- runbooks/organizations/__init__.py +0 -12
- runbooks/organizations/manager.py +0 -374
- runbooks-0.7.0.dist-info/METADATA +0 -375
- /runbooks/inventory/{tests → Tests}/common_test_data.py +0 -0
- /runbooks/inventory/{tests → Tests}/common_test_functions.py +0 -0
- /runbooks/inventory/{tests → Tests}/script_test_data.py +0 -0
- /runbooks/inventory/{tests → Tests}/setup.py +0 -0
- /runbooks/inventory/{tests → Tests}/src.py +0 -0
- /runbooks/inventory/{tests/test_inventory_modules.py → Tests/test_Inventory_Modules.py} +0 -0
- /runbooks/inventory/{tests → Tests}/test_cfn_describe_stacks.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_ec2_describe_instances.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_lambda_list_functions.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_moto_integration_example.py +0 -0
- /runbooks/inventory/{tests → Tests}/test_org_list_accounts.py +0 -0
- /runbooks/inventory/{Inventory_Modules.py → inventory_modules.py} +0 -0
- /runbooks/{aws → operate}/tags.json +0 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/WHEEL +0 -0
- {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,908 @@
|
|
1
|
+
"""
|
2
|
+
Enterprise CloudTrail Security Remediation - Production-Ready Audit Trail Management
|
3
|
+
|
4
|
+
## CRITICAL WARNING
|
5
|
+
|
6
|
+
This module contains DESTRUCTIVE OPERATIONS that can revert S3 bucket policies and
|
7
|
+
modify security configurations. These operations can EXPOSE DATA PUBLICLY or BREAK
|
8
|
+
APPLICATION ACCESS if used incorrectly. EXTREME CAUTION must be exercised.
|
9
|
+
|
10
|
+
## Overview
|
11
|
+
|
12
|
+
This module provides comprehensive AWS CloudTrail security remediation capabilities,
|
13
|
+
migrating and enhancing the critical policy reversion functionality from the original
|
14
|
+
remediation scripts with enterprise-grade safety features.
|
15
|
+
|
16
|
+
## Original Scripts Enhanced
|
17
|
+
|
18
|
+
Migrated and enhanced from these CRITICAL original remediation scripts:
|
19
|
+
- cloudtrail_s3_modifications.py - S3 policy change tracking and reversion
|
20
|
+
|
21
|
+
## Enterprise Safety Enhancements
|
22
|
+
|
23
|
+
- **CRITICAL SAFETY CHECKS**: Multi-level verification before policy reverts
|
24
|
+
- **Policy Impact Assessment**: Analysis of policy changes and security implications
|
25
|
+
- **Backup Creation**: Complete policy state backup before any modifications
|
26
|
+
- **Dry-Run Mandatory**: All destructive operations require explicit confirmation
|
27
|
+
- **Rollback Capability**: Policy restoration and recovery procedures
|
28
|
+
- **Audit Logging**: Comprehensive logging of all policy operations
|
29
|
+
- **Security Validation**: Policy security analysis before and after changes
|
30
|
+
|
31
|
+
## Compliance Framework Mapping
|
32
|
+
|
33
|
+
### CIS AWS Foundations Benchmark
|
34
|
+
- **CIS 3.1**: CloudTrail configuration and monitoring
|
35
|
+
- **CIS 3.6**: S3 bucket access logging and policy management
|
36
|
+
|
37
|
+
### NIST Cybersecurity Framework
|
38
|
+
- **PR.PT-1**: Audit and log records are maintained
|
39
|
+
- **DE.AE-3**: Event data are collected and correlated
|
40
|
+
|
41
|
+
### SOC2 Security Framework
|
42
|
+
- **CC7.2**: System monitoring and logging controls
|
43
|
+
- **CC6.1**: Logical access security controls
|
44
|
+
|
45
|
+
## CRITICAL USAGE WARNINGS
|
46
|
+
|
47
|
+
⚠️ **PRODUCTION IMPACT WARNING**: These operations can expose data or break access
|
48
|
+
⚠️ **VERIFICATION REQUIRED**: Always verify policy impact before reversion
|
49
|
+
⚠️ **DRY-RUN FIRST**: Always test with --dry-run before actual execution
|
50
|
+
⚠️ **BACKUP ENABLED**: Ensure backup_enabled=True for all operations
|
51
|
+
|
52
|
+
## Example Usage
|
53
|
+
|
54
|
+
```python
|
55
|
+
from runbooks.remediation import CloudTrailRemediation, RemediationContext
|
56
|
+
|
57
|
+
# Initialize with MAXIMUM SAFETY settings
|
58
|
+
cloudtrail_remediation = CloudTrailRemediation(
|
59
|
+
profile="production",
|
60
|
+
backup_enabled=True, # MANDATORY
|
61
|
+
impact_verification=True, # MANDATORY
|
62
|
+
require_confirmation=True # MANDATORY
|
63
|
+
)
|
64
|
+
|
65
|
+
# ALWAYS start with dry-run
|
66
|
+
results = cloudtrail_remediation.analyze_s3_policy_changes(
|
67
|
+
context,
|
68
|
+
user_email="user@example.com",
|
69
|
+
dry_run=True, # MANDATORY for first run
|
70
|
+
verify_impact=True
|
71
|
+
)
|
72
|
+
```
|
73
|
+
|
74
|
+
Version: 0.7.6 - Enterprise Production Ready with CRITICAL SAFETY FEATURES
|
75
|
+
"""
|
76
|
+
|
77
|
+
import json
|
78
|
+
import os
|
79
|
+
import time
|
80
|
+
from datetime import datetime, timedelta, timezone
|
81
|
+
from typing import Any, Dict, List, Optional
|
82
|
+
|
83
|
+
import boto3
|
84
|
+
from botocore.exceptions import BotoCoreError, ClientError
|
85
|
+
from loguru import logger
|
86
|
+
|
87
|
+
from runbooks.remediation.base import (
|
88
|
+
BaseRemediation,
|
89
|
+
ComplianceMapping,
|
90
|
+
RemediationContext,
|
91
|
+
RemediationResult,
|
92
|
+
RemediationStatus,
|
93
|
+
)
|
94
|
+
|
95
|
+
|
96
|
+
class CloudTrailRemediation(BaseRemediation):
|
97
|
+
"""
|
98
|
+
Enterprise CloudTrail Security Remediation Operations.
|
99
|
+
|
100
|
+
⚠️ CRITICAL WARNING: This class contains DESTRUCTIVE policy operations
|
101
|
+
that can EXPOSE DATA PUBLICLY or break application access patterns.
|
102
|
+
|
103
|
+
Provides comprehensive CloudTrail analysis and S3 policy management including
|
104
|
+
safe policy change tracking, analysis, and selective reversion with extensive
|
105
|
+
safety verification.
|
106
|
+
|
107
|
+
## Key Safety Features
|
108
|
+
|
109
|
+
- **Policy Impact Assessment**: Analyzes security implications of policy changes
|
110
|
+
- **Multi-Service Verification**: Checks policy usage across all AWS services
|
111
|
+
- **Historical Analysis**: Complete audit trail of policy modifications
|
112
|
+
- **Selective Reversion**: Granular control over which policies to revert
|
113
|
+
- **Confirmation Prompts**: Multiple confirmation levels for destructive operations
|
114
|
+
- **Rollback Support**: Policy restoration and recovery procedures
|
115
|
+
|
116
|
+
## CRITICAL USAGE REQUIREMENTS
|
117
|
+
|
118
|
+
1. **ALWAYS** use dry_run=True for initial testing
|
119
|
+
2. **ALWAYS** enable backup_enabled=True
|
120
|
+
3. **VERIFY** policy impact and active usage before reversion
|
121
|
+
4. **TEST** in non-production environment first
|
122
|
+
5. **HAVE** rollback plan before executing
|
123
|
+
|
124
|
+
## Example Usage
|
125
|
+
|
126
|
+
```python
|
127
|
+
# SAFE initialization
|
128
|
+
cloudtrail_remediation = CloudTrailRemediation(
|
129
|
+
profile="production",
|
130
|
+
backup_enabled=True, # CRITICAL
|
131
|
+
impact_verification=True, # CRITICAL
|
132
|
+
require_confirmation=True # CRITICAL
|
133
|
+
)
|
134
|
+
|
135
|
+
# MANDATORY dry-run first
|
136
|
+
results = cloudtrail_remediation.analyze_s3_policy_changes(
|
137
|
+
context,
|
138
|
+
user_email="user@example.com",
|
139
|
+
dry_run=True, # CRITICAL
|
140
|
+
verify_impact=True
|
141
|
+
)
|
142
|
+
```
|
143
|
+
"""
|
144
|
+
|
145
|
+
supported_operations = [
|
146
|
+
"analyze_s3_policy_changes",
|
147
|
+
"revert_s3_policy_changes",
|
148
|
+
"audit_cloudtrail_events",
|
149
|
+
"verify_policy_security",
|
150
|
+
"comprehensive_cloudtrail_security",
|
151
|
+
]
|
152
|
+
|
153
|
+
def __init__(self, **kwargs):
|
154
|
+
"""
|
155
|
+
Initialize CloudTrail remediation with CRITICAL SAFETY settings.
|
156
|
+
|
157
|
+
Args:
|
158
|
+
**kwargs: Configuration parameters with MANDATORY safety settings
|
159
|
+
"""
|
160
|
+
super().__init__(**kwargs)
|
161
|
+
|
162
|
+
# CRITICAL SAFETY CONFIGURATION
|
163
|
+
self.impact_verification = kwargs.get("impact_verification", True) # MANDATORY
|
164
|
+
self.require_confirmation = kwargs.get("require_confirmation", True) # MANDATORY
|
165
|
+
self.backup_enabled = True # FORCE ENABLE - CRITICAL for policy operations
|
166
|
+
|
167
|
+
# CloudTrail-specific configuration
|
168
|
+
self.check_config_history = kwargs.get("check_config_history", True)
|
169
|
+
self.validate_policy_security = kwargs.get("validate_policy_security", True)
|
170
|
+
self.max_events_per_lookup = kwargs.get("max_events_per_lookup", 50)
|
171
|
+
self.default_lookback_days = kwargs.get("default_lookback_days", 7)
|
172
|
+
|
173
|
+
logger.warning("CloudTrail Remediation initialized - DESTRUCTIVE operations enabled")
|
174
|
+
logger.warning(
|
175
|
+
f"Safety settings: backup_enabled={self.backup_enabled}, "
|
176
|
+
f"impact_verification={self.impact_verification}, "
|
177
|
+
f"require_confirmation={self.require_confirmation}"
|
178
|
+
)
|
179
|
+
|
180
|
+
def _create_resource_backup(self, resource_id: str, backup_key: str, backup_type: str) -> str:
|
181
|
+
"""
|
182
|
+
Create CRITICAL backup of S3 policy configuration.
|
183
|
+
|
184
|
+
This is MANDATORY for policy operations as policy changes can expose
|
185
|
+
data or break application access patterns.
|
186
|
+
|
187
|
+
Args:
|
188
|
+
resource_id: S3 bucket name or policy identifier
|
189
|
+
backup_key: Backup identifier
|
190
|
+
backup_type: Type of backup (bucket_policy, policy_history, etc.)
|
191
|
+
|
192
|
+
Returns:
|
193
|
+
Backup location identifier
|
194
|
+
"""
|
195
|
+
try:
|
196
|
+
s3_client = self.get_client("s3")
|
197
|
+
|
198
|
+
# Create COMPREHENSIVE backup of policy state
|
199
|
+
backup_data = {
|
200
|
+
"resource_id": resource_id,
|
201
|
+
"backup_key": backup_key,
|
202
|
+
"backup_type": backup_type,
|
203
|
+
"timestamp": backup_key.split("_")[-1],
|
204
|
+
"backup_critical": True, # Mark as critical backup
|
205
|
+
"configurations": {},
|
206
|
+
}
|
207
|
+
|
208
|
+
if backup_type == "bucket_policy":
|
209
|
+
bucket_name = resource_id
|
210
|
+
|
211
|
+
# Backup COMPLETE bucket policy configuration
|
212
|
+
try:
|
213
|
+
# Get current bucket policy
|
214
|
+
try:
|
215
|
+
policy_response = self.execute_aws_call(s3_client, "get_bucket_policy", Bucket=bucket_name)
|
216
|
+
current_policy = json.loads(policy_response["Policy"])
|
217
|
+
backup_data["configurations"]["current_policy"] = current_policy
|
218
|
+
except ClientError as e:
|
219
|
+
if "NoSuchBucketPolicy" in str(e):
|
220
|
+
backup_data["configurations"]["current_policy"] = None
|
221
|
+
else:
|
222
|
+
raise
|
223
|
+
|
224
|
+
# Get bucket ACL
|
225
|
+
try:
|
226
|
+
acl_response = self.execute_aws_call(s3_client, "get_bucket_acl", Bucket=bucket_name)
|
227
|
+
backup_data["configurations"]["bucket_acl"] = acl_response
|
228
|
+
except ClientError:
|
229
|
+
backup_data["configurations"]["bucket_acl"] = None
|
230
|
+
|
231
|
+
# Get public access block settings
|
232
|
+
try:
|
233
|
+
pab_response = self.execute_aws_call(s3_client, "get_public_access_block", Bucket=bucket_name)
|
234
|
+
backup_data["configurations"]["public_access_block"] = pab_response[
|
235
|
+
"PublicAccessBlockConfiguration"
|
236
|
+
]
|
237
|
+
except ClientError:
|
238
|
+
backup_data["configurations"]["public_access_block"] = None
|
239
|
+
|
240
|
+
except ClientError as e:
|
241
|
+
logger.error(f"Could not backup bucket policy for {resource_id}: {e}")
|
242
|
+
raise
|
243
|
+
|
244
|
+
# Store backup with CRITICAL flag (simplified for MVP - would use S3 in production)
|
245
|
+
backup_location = f"cloudtrail-backup-CRITICAL://{backup_key}.json"
|
246
|
+
logger.critical(f"CRITICAL BACKUP created for S3 policy {resource_id}: {backup_location}")
|
247
|
+
|
248
|
+
return backup_location
|
249
|
+
|
250
|
+
except Exception as e:
|
251
|
+
logger.critical(f"FAILED to create CRITICAL backup for S3 policy {resource_id}: {e}")
|
252
|
+
raise
|
253
|
+
|
254
|
+
def _verify_policy_impact(
|
255
|
+
self, bucket_name: str, old_policy: Dict[str, Any], new_policy: Dict[str, Any]
|
256
|
+
) -> Dict[str, Any]:
|
257
|
+
"""
|
258
|
+
CRITICAL: Comprehensive verification of policy change impact.
|
259
|
+
|
260
|
+
This function prevents DATA EXPOSURE by analyzing the security implications
|
261
|
+
of policy changes before reversion.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
bucket_name: S3 bucket name
|
265
|
+
old_policy: Previous policy configuration
|
266
|
+
new_policy: New policy configuration
|
267
|
+
|
268
|
+
Returns:
|
269
|
+
Dictionary with impact analysis
|
270
|
+
"""
|
271
|
+
impact_analysis = {
|
272
|
+
"bucket_name": bucket_name,
|
273
|
+
"high_risk": False,
|
274
|
+
"impact_details": {},
|
275
|
+
"security_changes": [],
|
276
|
+
"verification_timestamp": datetime.now(tz=timezone.utc).isoformat(),
|
277
|
+
}
|
278
|
+
|
279
|
+
try:
|
280
|
+
# Analyze policy statements for security implications
|
281
|
+
old_statements = old_policy.get("Statement", []) if old_policy else []
|
282
|
+
new_statements = new_policy.get("Statement", []) if new_policy else []
|
283
|
+
|
284
|
+
# Check for public access changes
|
285
|
+
old_public_access = self._analyze_public_access(old_statements)
|
286
|
+
new_public_access = self._analyze_public_access(new_statements)
|
287
|
+
|
288
|
+
impact_analysis["impact_details"]["old_public_access"] = old_public_access
|
289
|
+
impact_analysis["impact_details"]["new_public_access"] = new_public_access
|
290
|
+
|
291
|
+
# Detect critical security changes
|
292
|
+
if old_public_access["allows_public_read"] != new_public_access["allows_public_read"]:
|
293
|
+
change = "PUBLIC_READ_ENABLED" if new_public_access["allows_public_read"] else "PUBLIC_READ_DISABLED"
|
294
|
+
impact_analysis["security_changes"].append(change)
|
295
|
+
if new_public_access["allows_public_read"]:
|
296
|
+
impact_analysis["high_risk"] = True
|
297
|
+
|
298
|
+
if old_public_access["allows_public_write"] != new_public_access["allows_public_write"]:
|
299
|
+
change = "PUBLIC_WRITE_ENABLED" if new_public_access["allows_public_write"] else "PUBLIC_WRITE_DISABLED"
|
300
|
+
impact_analysis["security_changes"].append(change)
|
301
|
+
if new_public_access["allows_public_write"]:
|
302
|
+
impact_analysis["high_risk"] = True
|
303
|
+
|
304
|
+
# Check for principal changes
|
305
|
+
old_principals = self._extract_principals(old_statements)
|
306
|
+
new_principals = self._extract_principals(new_statements)
|
307
|
+
|
308
|
+
if "*" in new_principals and "*" not in old_principals:
|
309
|
+
impact_analysis["security_changes"].append("WILDCARD_PRINCIPAL_ADDED")
|
310
|
+
impact_analysis["high_risk"] = True
|
311
|
+
|
312
|
+
# Check for action changes
|
313
|
+
old_actions = self._extract_actions(old_statements)
|
314
|
+
new_actions = self._extract_actions(new_statements)
|
315
|
+
|
316
|
+
dangerous_actions = ["s3:*", "s3:GetObject", "s3:PutObject", "s3:DeleteObject"]
|
317
|
+
for action in dangerous_actions:
|
318
|
+
if action in new_actions and action not in old_actions:
|
319
|
+
impact_analysis["security_changes"].append(f"DANGEROUS_ACTION_ADDED: {action}")
|
320
|
+
impact_analysis["high_risk"] = True
|
321
|
+
|
322
|
+
# Additional security verification
|
323
|
+
if self.validate_policy_security:
|
324
|
+
security_analysis = self._analyze_policy_security(new_policy)
|
325
|
+
impact_analysis["impact_details"]["security_analysis"] = security_analysis
|
326
|
+
if security_analysis.get("has_security_issues", False):
|
327
|
+
impact_analysis["high_risk"] = True
|
328
|
+
|
329
|
+
logger.info(
|
330
|
+
f"Policy impact verification completed for {bucket_name}: High risk: {impact_analysis['high_risk']}"
|
331
|
+
)
|
332
|
+
|
333
|
+
except Exception as e:
|
334
|
+
logger.error(f"Error during policy impact verification: {e}")
|
335
|
+
# FAIL SAFE: If verification fails, assume high risk
|
336
|
+
impact_analysis["high_risk"] = True
|
337
|
+
impact_analysis["verification_error"] = str(e)
|
338
|
+
|
339
|
+
return impact_analysis
|
340
|
+
|
341
|
+
def _analyze_public_access(self, statements: List[Dict[str, Any]]) -> Dict[str, Any]:
|
342
|
+
"""Analyze policy statements for public access patterns."""
|
343
|
+
public_access = {"allows_public_read": False, "allows_public_write": False, "public_statements": []}
|
344
|
+
|
345
|
+
for statement in statements:
|
346
|
+
effect = statement.get("Effect", "")
|
347
|
+
principals = statement.get("Principal", {})
|
348
|
+
actions = statement.get("Action", [])
|
349
|
+
|
350
|
+
if effect == "Allow":
|
351
|
+
# Check for wildcard principals
|
352
|
+
if principals == "*" or (isinstance(principals, dict) and principals.get("AWS") == "*"):
|
353
|
+
if isinstance(actions, str):
|
354
|
+
actions = [actions]
|
355
|
+
|
356
|
+
read_actions = ["s3:GetObject", "s3:ListBucket", "s3:GetBucketLocation"]
|
357
|
+
write_actions = ["s3:PutObject", "s3:DeleteObject", "s3:PutObjectAcl"]
|
358
|
+
|
359
|
+
if any(action in actions or action == "s3:*" for action in read_actions):
|
360
|
+
public_access["allows_public_read"] = True
|
361
|
+
|
362
|
+
if any(action in actions or action == "s3:*" for action in write_actions):
|
363
|
+
public_access["allows_public_write"] = True
|
364
|
+
|
365
|
+
public_access["public_statements"].append(statement)
|
366
|
+
|
367
|
+
return public_access
|
368
|
+
|
369
|
+
def _extract_principals(self, statements: List[Dict[str, Any]]) -> List[str]:
|
370
|
+
"""Extract all principals from policy statements."""
|
371
|
+
principals = set()
|
372
|
+
|
373
|
+
for statement in statements:
|
374
|
+
principal = statement.get("Principal", {})
|
375
|
+
|
376
|
+
if isinstance(principal, str):
|
377
|
+
principals.add(principal)
|
378
|
+
elif isinstance(principal, dict):
|
379
|
+
for key, value in principal.items():
|
380
|
+
if isinstance(value, list):
|
381
|
+
principals.update(value)
|
382
|
+
else:
|
383
|
+
principals.add(value)
|
384
|
+
|
385
|
+
return list(principals)
|
386
|
+
|
387
|
+
def _extract_actions(self, statements: List[Dict[str, Any]]) -> List[str]:
|
388
|
+
"""Extract all actions from policy statements."""
|
389
|
+
actions = set()
|
390
|
+
|
391
|
+
for statement in statements:
|
392
|
+
action = statement.get("Action", [])
|
393
|
+
|
394
|
+
if isinstance(action, str):
|
395
|
+
actions.add(action)
|
396
|
+
elif isinstance(action, list):
|
397
|
+
actions.update(action)
|
398
|
+
|
399
|
+
return list(actions)
|
400
|
+
|
401
|
+
def _analyze_policy_security(self, policy: Dict[str, Any]) -> Dict[str, Any]:
|
402
|
+
"""Analyze policy for security issues and best practices."""
|
403
|
+
security_analysis = {
|
404
|
+
"has_security_issues": False,
|
405
|
+
"security_issues": [],
|
406
|
+
"best_practice_violations": [],
|
407
|
+
"recommendations": [],
|
408
|
+
}
|
409
|
+
|
410
|
+
if not policy:
|
411
|
+
return security_analysis
|
412
|
+
|
413
|
+
statements = policy.get("Statement", [])
|
414
|
+
|
415
|
+
for i, statement in enumerate(statements):
|
416
|
+
statement_id = f"Statement_{i}"
|
417
|
+
|
418
|
+
# Check for overly permissive principals
|
419
|
+
principal = statement.get("Principal", {})
|
420
|
+
if principal == "*":
|
421
|
+
security_analysis["has_security_issues"] = True
|
422
|
+
security_analysis["security_issues"].append(
|
423
|
+
f"{statement_id}: Wildcard principal (*) allows public access"
|
424
|
+
)
|
425
|
+
|
426
|
+
# Check for overly permissive actions
|
427
|
+
actions = statement.get("Action", [])
|
428
|
+
if isinstance(actions, str):
|
429
|
+
actions = [actions]
|
430
|
+
|
431
|
+
if "s3:*" in actions:
|
432
|
+
security_analysis["best_practice_violations"].append(
|
433
|
+
f"{statement_id}: Wildcard action (s3:*) is overly permissive"
|
434
|
+
)
|
435
|
+
|
436
|
+
# Check for missing conditions
|
437
|
+
conditions = statement.get("Condition", {})
|
438
|
+
if not conditions and statement.get("Effect") == "Allow":
|
439
|
+
security_analysis["best_practice_violations"].append(
|
440
|
+
f"{statement_id}: No conditions specified for Allow statement"
|
441
|
+
)
|
442
|
+
|
443
|
+
# Generate recommendations
|
444
|
+
if security_analysis["security_issues"]:
|
445
|
+
security_analysis["recommendations"].append("Review and restrict wildcard principals")
|
446
|
+
|
447
|
+
if security_analysis["best_practice_violations"]:
|
448
|
+
security_analysis["recommendations"].append("Add conditions and restrict overly permissive actions")
|
449
|
+
|
450
|
+
return security_analysis
|
451
|
+
|
452
|
+
def execute_remediation(self, context: RemediationContext, **kwargs) -> List[RemediationResult]:
|
453
|
+
"""
|
454
|
+
Execute CloudTrail remediation operation with CRITICAL SAFETY CHECKS.
|
455
|
+
|
456
|
+
Args:
|
457
|
+
context: Remediation execution context
|
458
|
+
**kwargs: Operation-specific parameters
|
459
|
+
|
460
|
+
Returns:
|
461
|
+
List of remediation results
|
462
|
+
"""
|
463
|
+
operation_type = kwargs.get("operation_type", context.operation_type)
|
464
|
+
|
465
|
+
if operation_type == "analyze_s3_policy_changes":
|
466
|
+
return self.analyze_s3_policy_changes(context, **kwargs)
|
467
|
+
elif operation_type == "revert_s3_policy_changes":
|
468
|
+
return self.revert_s3_policy_changes(context, **kwargs)
|
469
|
+
elif operation_type == "audit_cloudtrail_events":
|
470
|
+
return self.audit_cloudtrail_events(context, **kwargs)
|
471
|
+
elif operation_type == "comprehensive_cloudtrail_security":
|
472
|
+
return self.comprehensive_cloudtrail_security(context, **kwargs)
|
473
|
+
else:
|
474
|
+
raise ValueError(f"Unsupported CloudTrail remediation operation: {operation_type}")
|
475
|
+
|
476
|
+
def analyze_s3_policy_changes(self, context: RemediationContext, **kwargs) -> List[RemediationResult]:
|
477
|
+
"""
|
478
|
+
Analyze S3 policy changes made by specific users via CloudTrail.
|
479
|
+
|
480
|
+
Enhanced from original cloudtrail_s3_modifications.py with comprehensive
|
481
|
+
analysis and security impact assessment.
|
482
|
+
|
483
|
+
Args:
|
484
|
+
context: Remediation execution context
|
485
|
+
user_email: Email of user to analyze policy changes for
|
486
|
+
start_time: Start time for analysis (optional)
|
487
|
+
end_time: End time for analysis (optional)
|
488
|
+
**kwargs: Additional parameters
|
489
|
+
|
490
|
+
Returns:
|
491
|
+
List of remediation results with analysis data
|
492
|
+
"""
|
493
|
+
result = self.create_remediation_result(
|
494
|
+
context, "analyze_s3_policy_changes", "cloudtrail:events", kwargs.get("user_email", "unknown")
|
495
|
+
)
|
496
|
+
|
497
|
+
# Add compliance mapping
|
498
|
+
result.context.compliance_mapping = ComplianceMapping(
|
499
|
+
cis_controls=["CIS 3.1", "CIS 3.6"], nist_categories=["PR.PT-1", "DE.AE-3"], severity="medium"
|
500
|
+
)
|
501
|
+
|
502
|
+
try:
|
503
|
+
user_email = kwargs.get("user_email")
|
504
|
+
if not user_email:
|
505
|
+
raise ValueError("user_email is required")
|
506
|
+
|
507
|
+
# Set default time range
|
508
|
+
end_time = kwargs.get("end_time", datetime.now(tz=timezone.utc))
|
509
|
+
start_time = kwargs.get("start_time", end_time - timedelta(days=self.default_lookback_days))
|
510
|
+
|
511
|
+
cloudtrail_client = self.get_client("cloudtrail", context.region)
|
512
|
+
config_client = self.get_client("config", context.region)
|
513
|
+
|
514
|
+
# Get S3 policy modification events
|
515
|
+
policy_modifications = self._get_s3_policy_modifications(
|
516
|
+
cloudtrail_client, config_client, user_email, start_time, end_time
|
517
|
+
)
|
518
|
+
|
519
|
+
# Analyze each modification for security impact
|
520
|
+
analysis_results = []
|
521
|
+
high_risk_changes = []
|
522
|
+
|
523
|
+
for modification in policy_modifications:
|
524
|
+
try:
|
525
|
+
# Analyze the policy change impact
|
526
|
+
impact_analysis = self._verify_policy_impact(
|
527
|
+
modification["BucketName"], modification["OldPolicy"], modification["NewPolicy"]
|
528
|
+
)
|
529
|
+
|
530
|
+
modification["impact_analysis"] = impact_analysis
|
531
|
+
analysis_results.append(modification)
|
532
|
+
|
533
|
+
if impact_analysis["high_risk"]:
|
534
|
+
high_risk_changes.append(modification)
|
535
|
+
|
536
|
+
except Exception as e:
|
537
|
+
logger.warning(
|
538
|
+
f"Could not analyze modification for bucket {modification.get('BucketName', 'unknown')}: {e}"
|
539
|
+
)
|
540
|
+
|
541
|
+
# Generate overall security assessment
|
542
|
+
security_assessment = {
|
543
|
+
"total_modifications": len(policy_modifications),
|
544
|
+
"analyzed_modifications": len(analysis_results),
|
545
|
+
"high_risk_changes": len(high_risk_changes),
|
546
|
+
"user_email": user_email,
|
547
|
+
"analysis_period": {"start_time": start_time.isoformat(), "end_time": end_time.isoformat()},
|
548
|
+
}
|
549
|
+
|
550
|
+
result.response_data = {
|
551
|
+
"policy_modifications": analysis_results,
|
552
|
+
"security_assessment": security_assessment,
|
553
|
+
"high_risk_changes": high_risk_changes,
|
554
|
+
"analysis_timestamp": result.start_time.isoformat(),
|
555
|
+
}
|
556
|
+
|
557
|
+
# Add compliance evidence
|
558
|
+
result.add_compliance_evidence(
|
559
|
+
"cis_aws",
|
560
|
+
{
|
561
|
+
"controls": ["3.1", "3.6"],
|
562
|
+
"policy_changes_analyzed": len(analysis_results),
|
563
|
+
"security_issues_identified": len(high_risk_changes),
|
564
|
+
"remediation_timestamp": result.start_time.isoformat(),
|
565
|
+
},
|
566
|
+
)
|
567
|
+
|
568
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
569
|
+
logger.info(
|
570
|
+
f"S3 policy analysis completed: {len(analysis_results)} modifications analyzed, "
|
571
|
+
f"{len(high_risk_changes)} high-risk changes identified"
|
572
|
+
)
|
573
|
+
|
574
|
+
except ClientError as e:
|
575
|
+
error_msg = f"Failed to analyze S3 policy changes: {e}"
|
576
|
+
logger.error(error_msg)
|
577
|
+
result.mark_completed(RemediationStatus.FAILED, error_msg)
|
578
|
+
except Exception as e:
|
579
|
+
error_msg = f"Unexpected error during policy analysis: {e}"
|
580
|
+
logger.error(error_msg)
|
581
|
+
result.mark_completed(RemediationStatus.FAILED, error_msg)
|
582
|
+
|
583
|
+
return [result]
|
584
|
+
|
585
|
+
def _get_s3_policy_modifications(
|
586
|
+
self, cloudtrail_client: Any, config_client: Any, user_email: str, start_time: datetime, end_time: datetime
|
587
|
+
) -> List[Dict[str, Any]]:
|
588
|
+
"""
|
589
|
+
Get S3 policy modifications from CloudTrail events.
|
590
|
+
|
591
|
+
Enhanced from original get_s3_policy_modifications function.
|
592
|
+
"""
|
593
|
+
modifications = []
|
594
|
+
|
595
|
+
try:
|
596
|
+
# Define CloudTrail lookup parameters
|
597
|
+
lookup_params = {
|
598
|
+
"LookupAttributes": [{"AttributeKey": "EventName", "AttributeValue": "PutBucketPolicy"}],
|
599
|
+
"StartTime": start_time,
|
600
|
+
"EndTime": end_time,
|
601
|
+
"MaxItems": self.max_events_per_lookup,
|
602
|
+
}
|
603
|
+
|
604
|
+
# Get events from CloudTrail
|
605
|
+
response = self.execute_aws_call(cloudtrail_client, "lookup_events", **lookup_params)
|
606
|
+
events = response.get("Events", [])
|
607
|
+
|
608
|
+
for event in events:
|
609
|
+
try:
|
610
|
+
cloudtrail_event = json.loads(event["CloudTrailEvent"])
|
611
|
+
|
612
|
+
# Check if modification was made by the specified user
|
613
|
+
user_identity = cloudtrail_event.get("userIdentity", {})
|
614
|
+
principal_id = user_identity.get("principalId", "")
|
615
|
+
user_name = user_identity.get("userName", "")
|
616
|
+
arn = user_identity.get("arn", "")
|
617
|
+
|
618
|
+
# Check multiple fields for user identification
|
619
|
+
if user_email in principal_id or user_email in user_name or user_email in arn:
|
620
|
+
# Extract bucket and policy information
|
621
|
+
request_params = cloudtrail_event.get("requestParameters", {})
|
622
|
+
bucket_name = request_params.get("bucketName")
|
623
|
+
new_policy_str = request_params.get("bucketPolicy")
|
624
|
+
|
625
|
+
if bucket_name and new_policy_str:
|
626
|
+
try:
|
627
|
+
new_policy = json.loads(new_policy_str)
|
628
|
+
except json.JSONDecodeError:
|
629
|
+
logger.warning(f"Could not parse new policy for bucket {bucket_name}")
|
630
|
+
continue
|
631
|
+
|
632
|
+
# Get previous policy from AWS Config
|
633
|
+
old_policy = self._get_previous_policy_from_config(
|
634
|
+
config_client, bucket_name, event["EventTime"]
|
635
|
+
)
|
636
|
+
|
637
|
+
modifications.append(
|
638
|
+
{
|
639
|
+
"BucketName": bucket_name,
|
640
|
+
"NewPolicy": new_policy,
|
641
|
+
"OldPolicy": old_policy,
|
642
|
+
"EventTime": event["EventTime"],
|
643
|
+
"UserIdentity": user_identity,
|
644
|
+
"EventId": event.get("EventId"),
|
645
|
+
"EventSource": cloudtrail_event.get("eventSource"),
|
646
|
+
"SourceIPAddress": cloudtrail_event.get("sourceIPAddress"),
|
647
|
+
}
|
648
|
+
)
|
649
|
+
|
650
|
+
except Exception as e:
|
651
|
+
logger.debug(f"Could not process CloudTrail event: {e}")
|
652
|
+
continue
|
653
|
+
|
654
|
+
except Exception as e:
|
655
|
+
logger.error(f"Error retrieving CloudTrail events: {e}")
|
656
|
+
raise
|
657
|
+
|
658
|
+
return modifications
|
659
|
+
|
660
|
+
def _get_previous_policy_from_config(
|
661
|
+
self, config_client: Any, bucket_name: str, event_time: datetime
|
662
|
+
) -> Optional[Dict[str, Any]]:
|
663
|
+
"""Get previous bucket policy from AWS Config history."""
|
664
|
+
try:
|
665
|
+
# Query AWS Config for the previous policy
|
666
|
+
config_response = self.execute_aws_call(
|
667
|
+
config_client,
|
668
|
+
"get_resource_config_history",
|
669
|
+
resourceType="AWS::S3::Bucket",
|
670
|
+
resourceId=bucket_name,
|
671
|
+
laterTime=event_time,
|
672
|
+
limit=1,
|
673
|
+
)
|
674
|
+
|
675
|
+
config_items = config_response.get("configurationItems", [])
|
676
|
+
if config_items:
|
677
|
+
old_config = config_items[0]
|
678
|
+
supplementary_config = old_config.get("supplementaryConfiguration", {})
|
679
|
+
bucket_policy_config = supplementary_config.get("BucketPolicy")
|
680
|
+
|
681
|
+
if bucket_policy_config and bucket_policy_config.get("policyText"):
|
682
|
+
try:
|
683
|
+
return json.loads(bucket_policy_config["policyText"])
|
684
|
+
except json.JSONDecodeError:
|
685
|
+
logger.warning(f"Could not parse old policy for bucket {bucket_name}")
|
686
|
+
return None
|
687
|
+
|
688
|
+
return None # No previous policy found
|
689
|
+
|
690
|
+
except ClientError as e:
|
691
|
+
if "ResourceNotDiscoveredException" in str(e):
|
692
|
+
logger.debug(f"Bucket {bucket_name} not tracked in AWS Config")
|
693
|
+
return None
|
694
|
+
else:
|
695
|
+
logger.warning(f"Error retrieving previous policy from Config: {e}")
|
696
|
+
return None
|
697
|
+
except Exception as e:
|
698
|
+
logger.warning(f"Unexpected error retrieving previous policy: {e}")
|
699
|
+
return None
|
700
|
+
|
701
|
+
def revert_s3_policy_changes(self, context: RemediationContext, **kwargs) -> List[RemediationResult]:
|
702
|
+
"""
|
703
|
+
CRITICAL OPERATION: Revert S3 bucket policy changes.
|
704
|
+
|
705
|
+
⚠️ WARNING: This operation can EXPOSE DATA PUBLICLY or break application access
|
706
|
+
if policies are reverted incorrectly.
|
707
|
+
|
708
|
+
Enhanced from original apply_policy function with enterprise safety features.
|
709
|
+
|
710
|
+
Args:
|
711
|
+
context: Remediation execution context
|
712
|
+
bucket_name: S3 bucket name to revert policy for
|
713
|
+
target_policy: Policy to revert to (if not provided, removes policy)
|
714
|
+
**kwargs: Additional parameters
|
715
|
+
|
716
|
+
Returns:
|
717
|
+
List of remediation results
|
718
|
+
"""
|
719
|
+
result = self.create_remediation_result(
|
720
|
+
context, "revert_s3_policy_changes", "s3:bucket", kwargs.get("bucket_name", "unknown")
|
721
|
+
)
|
722
|
+
|
723
|
+
# Add compliance mapping
|
724
|
+
result.context.compliance_mapping = ComplianceMapping(
|
725
|
+
cis_controls=["CIS 3.6"], nist_categories=["PR.PT-1"], severity="high"
|
726
|
+
)
|
727
|
+
|
728
|
+
try:
|
729
|
+
bucket_name = kwargs.get("bucket_name")
|
730
|
+
target_policy = kwargs.get("target_policy")
|
731
|
+
|
732
|
+
if not bucket_name:
|
733
|
+
raise ValueError("bucket_name is required")
|
734
|
+
|
735
|
+
s3_client = self.get_client("s3", context.region)
|
736
|
+
|
737
|
+
# Get current bucket policy for comparison
|
738
|
+
current_policy = None
|
739
|
+
try:
|
740
|
+
policy_response = self.execute_aws_call(s3_client, "get_bucket_policy", Bucket=bucket_name)
|
741
|
+
current_policy = json.loads(policy_response["Policy"])
|
742
|
+
except ClientError as e:
|
743
|
+
if "NoSuchBucketPolicy" not in str(e):
|
744
|
+
raise
|
745
|
+
|
746
|
+
# CRITICAL: Verify policy impact before reversion
|
747
|
+
if self.impact_verification and target_policy:
|
748
|
+
impact_analysis = self._verify_policy_impact(bucket_name, current_policy, target_policy)
|
749
|
+
result.response_data = {"impact_analysis": impact_analysis}
|
750
|
+
|
751
|
+
if impact_analysis["high_risk"] and not kwargs.get("force_high_risk", False):
|
752
|
+
result.mark_completed(
|
753
|
+
RemediationStatus.REQUIRES_MANUAL, "High risk policy change detected - manual approval required"
|
754
|
+
)
|
755
|
+
return [result]
|
756
|
+
|
757
|
+
if context.dry_run:
|
758
|
+
logger.info(f"[DRY-RUN] Would revert policy for bucket {bucket_name}")
|
759
|
+
result.response_data.update(
|
760
|
+
{
|
761
|
+
"bucket_name": bucket_name,
|
762
|
+
"current_policy": current_policy,
|
763
|
+
"target_policy": target_policy,
|
764
|
+
"action": "dry_run",
|
765
|
+
}
|
766
|
+
)
|
767
|
+
result.mark_completed(RemediationStatus.DRY_RUN)
|
768
|
+
return [result]
|
769
|
+
|
770
|
+
# CRITICAL SAFETY CHECK: Require explicit confirmation for policy reversion
|
771
|
+
if self.require_confirmation:
|
772
|
+
logger.critical(f"ABOUT TO REVERT POLICY for bucket {bucket_name}!")
|
773
|
+
logger.critical("This can EXPOSE DATA or BREAK APPLICATION ACCESS!")
|
774
|
+
|
775
|
+
# In a real implementation, this would prompt for confirmation
|
776
|
+
# For now, we'll skip reversion unless explicitly forced
|
777
|
+
if not kwargs.get("force_revert", False):
|
778
|
+
result.response_data.update(
|
779
|
+
{
|
780
|
+
"bucket_name": bucket_name,
|
781
|
+
"action": "confirmation_required",
|
782
|
+
"warning": "Policy reversion requires explicit confirmation with force_revert=True",
|
783
|
+
}
|
784
|
+
)
|
785
|
+
result.mark_completed(
|
786
|
+
RemediationStatus.REQUIRES_MANUAL, "Policy reversion requires explicit confirmation"
|
787
|
+
)
|
788
|
+
return [result]
|
789
|
+
|
790
|
+
# CRITICAL: Create backup before policy reversion
|
791
|
+
backup_location = self.create_backup(context, bucket_name, "bucket_policy")
|
792
|
+
result.backup_locations[bucket_name] = backup_location
|
793
|
+
|
794
|
+
# Execute policy reversion
|
795
|
+
try:
|
796
|
+
if target_policy:
|
797
|
+
# Apply the target policy
|
798
|
+
policy_json_str = json.dumps(target_policy)
|
799
|
+
self.execute_aws_call(s3_client, "put_bucket_policy", Bucket=bucket_name, Policy=policy_json_str)
|
800
|
+
logger.critical(f"Reverted policy for bucket {bucket_name}")
|
801
|
+
action_taken = "policy_reverted"
|
802
|
+
else:
|
803
|
+
# Remove the bucket policy entirely
|
804
|
+
self.execute_aws_call(s3_client, "delete_bucket_policy", Bucket=bucket_name)
|
805
|
+
logger.critical(f"Removed policy for bucket {bucket_name}")
|
806
|
+
action_taken = "policy_removed"
|
807
|
+
|
808
|
+
# Add to affected resources
|
809
|
+
result.affected_resources.append(f"s3:bucket:{bucket_name}")
|
810
|
+
|
811
|
+
result.response_data.update(
|
812
|
+
{
|
813
|
+
"bucket_name": bucket_name,
|
814
|
+
"action_taken": action_taken,
|
815
|
+
"previous_policy": current_policy,
|
816
|
+
"applied_policy": target_policy,
|
817
|
+
}
|
818
|
+
)
|
819
|
+
|
820
|
+
except ClientError as e:
|
821
|
+
error_msg = f"Failed to revert policy for bucket {bucket_name}: {e}"
|
822
|
+
logger.error(error_msg)
|
823
|
+
result.mark_completed(RemediationStatus.FAILED, error_msg)
|
824
|
+
return [result]
|
825
|
+
|
826
|
+
# Add compliance evidence
|
827
|
+
result.add_compliance_evidence(
|
828
|
+
"cis_aws",
|
829
|
+
{
|
830
|
+
"controls": ["3.6"],
|
831
|
+
"policy_reversion_completed": True,
|
832
|
+
"bucket_secured": True,
|
833
|
+
"remediation_timestamp": result.start_time.isoformat(),
|
834
|
+
},
|
835
|
+
)
|
836
|
+
|
837
|
+
result.mark_completed(RemediationStatus.SUCCESS)
|
838
|
+
logger.critical(f"Policy reversion completed successfully for bucket {bucket_name}")
|
839
|
+
|
840
|
+
except ClientError as e:
|
841
|
+
error_msg = f"Failed to revert S3 policy: {e}"
|
842
|
+
logger.error(error_msg)
|
843
|
+
result.mark_completed(RemediationStatus.FAILED, error_msg)
|
844
|
+
except Exception as e:
|
845
|
+
error_msg = f"Unexpected error during policy reversion: {e}"
|
846
|
+
logger.error(error_msg)
|
847
|
+
result.mark_completed(RemediationStatus.FAILED, error_msg)
|
848
|
+
|
849
|
+
return [result]
|
850
|
+
|
851
|
+
def comprehensive_cloudtrail_security(self, context: RemediationContext, **kwargs) -> List[RemediationResult]:
|
852
|
+
"""
|
853
|
+
Apply comprehensive CloudTrail security analysis.
|
854
|
+
|
855
|
+
Combines policy analysis and security operations for complete audit trail management.
|
856
|
+
|
857
|
+
Args:
|
858
|
+
context: Remediation execution context
|
859
|
+
**kwargs: Additional parameters
|
860
|
+
|
861
|
+
Returns:
|
862
|
+
List of remediation results from all operations
|
863
|
+
"""
|
864
|
+
logger.info("Starting comprehensive CloudTrail security remediation")
|
865
|
+
|
866
|
+
all_results = []
|
867
|
+
|
868
|
+
# Execute all security operations
|
869
|
+
security_operations = [("analyze_s3_policy_changes", self.analyze_s3_policy_changes)]
|
870
|
+
|
871
|
+
# Only add policy reversion if explicitly requested
|
872
|
+
if kwargs.get("include_policy_reversion", False):
|
873
|
+
security_operations.append(("revert_s3_policy_changes", self.revert_s3_policy_changes))
|
874
|
+
|
875
|
+
for operation_name, operation_method in security_operations:
|
876
|
+
try:
|
877
|
+
logger.info(f"Executing {operation_name}")
|
878
|
+
operation_results = operation_method(context, **kwargs)
|
879
|
+
all_results.extend(operation_results)
|
880
|
+
|
881
|
+
# Check if operation failed and handle accordingly
|
882
|
+
if any(r.failed for r in operation_results):
|
883
|
+
logger.warning(f"Operation {operation_name} failed")
|
884
|
+
if kwargs.get("fail_fast", False):
|
885
|
+
break
|
886
|
+
|
887
|
+
except Exception as e:
|
888
|
+
logger.error(f"Error in {operation_name}: {e}")
|
889
|
+
# Create error result
|
890
|
+
error_result = self.create_remediation_result(
|
891
|
+
context, operation_name, "cloudtrail:events", "comprehensive"
|
892
|
+
)
|
893
|
+
error_result.mark_completed(RemediationStatus.FAILED, str(e))
|
894
|
+
all_results.append(error_result)
|
895
|
+
|
896
|
+
if kwargs.get("fail_fast", False):
|
897
|
+
break
|
898
|
+
|
899
|
+
# Generate comprehensive summary
|
900
|
+
successful_operations = [r for r in all_results if r.success]
|
901
|
+
failed_operations = [r for r in all_results if r.failed]
|
902
|
+
|
903
|
+
logger.info(
|
904
|
+
f"Comprehensive CloudTrail security remediation completed: "
|
905
|
+
f"{len(successful_operations)} successful, {len(failed_operations)} failed"
|
906
|
+
)
|
907
|
+
|
908
|
+
return all_results
|