runbooks 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +31 -2
- runbooks/__init___optimized.py +18 -4
- runbooks/_platform/__init__.py +1 -5
- runbooks/_platform/core/runbooks_wrapper.py +141 -138
- runbooks/aws2/accuracy_validator.py +812 -0
- runbooks/base.py +7 -0
- runbooks/cfat/assessment/compliance.py +1 -1
- runbooks/cfat/assessment/runner.py +1 -0
- runbooks/cfat/cloud_foundations_assessment.py +227 -239
- runbooks/cli/__init__.py +1 -1
- runbooks/cli/commands/cfat.py +64 -23
- runbooks/cli/commands/finops.py +1005 -54
- runbooks/cli/commands/inventory.py +138 -35
- runbooks/cli/commands/operate.py +9 -36
- runbooks/cli/commands/security.py +42 -18
- runbooks/cli/commands/validation.py +432 -18
- runbooks/cli/commands/vpc.py +81 -17
- runbooks/cli/registry.py +22 -10
- runbooks/cloudops/__init__.py +20 -27
- runbooks/cloudops/base.py +96 -107
- runbooks/cloudops/cost_optimizer.py +544 -542
- runbooks/cloudops/infrastructure_optimizer.py +5 -4
- runbooks/cloudops/interfaces.py +224 -225
- runbooks/cloudops/lifecycle_manager.py +5 -4
- runbooks/cloudops/mcp_cost_validation.py +252 -235
- runbooks/cloudops/models.py +78 -53
- runbooks/cloudops/monitoring_automation.py +5 -4
- runbooks/cloudops/notebook_framework.py +177 -213
- runbooks/cloudops/security_enforcer.py +125 -159
- runbooks/common/accuracy_validator.py +11 -0
- runbooks/common/aws_pricing.py +349 -326
- runbooks/common/aws_pricing_api.py +211 -212
- runbooks/common/aws_profile_manager.py +40 -36
- runbooks/common/aws_utils.py +74 -79
- runbooks/common/business_logic.py +126 -104
- runbooks/common/cli_decorators.py +36 -60
- runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
- runbooks/common/cross_account_manager.py +197 -204
- runbooks/common/date_utils.py +27 -39
- runbooks/common/decorators.py +29 -19
- runbooks/common/dry_run_examples.py +173 -208
- runbooks/common/dry_run_framework.py +157 -155
- runbooks/common/enhanced_exception_handler.py +15 -4
- runbooks/common/enhanced_logging_example.py +50 -64
- runbooks/common/enhanced_logging_integration_example.py +65 -37
- runbooks/common/env_utils.py +16 -16
- runbooks/common/error_handling.py +40 -38
- runbooks/common/lazy_loader.py +41 -23
- runbooks/common/logging_integration_helper.py +79 -86
- runbooks/common/mcp_cost_explorer_integration.py +476 -493
- runbooks/common/mcp_integration.py +63 -74
- runbooks/common/memory_optimization.py +140 -118
- runbooks/common/module_cli_base.py +37 -58
- runbooks/common/organizations_client.py +175 -193
- runbooks/common/patterns.py +23 -25
- runbooks/common/performance_monitoring.py +67 -71
- runbooks/common/performance_optimization_engine.py +283 -274
- runbooks/common/profile_utils.py +111 -37
- runbooks/common/rich_utils.py +201 -141
- runbooks/common/sre_performance_suite.py +177 -186
- runbooks/enterprise/__init__.py +1 -1
- runbooks/enterprise/logging.py +144 -106
- runbooks/enterprise/security.py +187 -204
- runbooks/enterprise/validation.py +43 -56
- runbooks/finops/__init__.py +26 -30
- runbooks/finops/account_resolver.py +1 -1
- runbooks/finops/advanced_optimization_engine.py +980 -0
- runbooks/finops/automation_core.py +268 -231
- runbooks/finops/business_case_config.py +184 -179
- runbooks/finops/cli.py +660 -139
- runbooks/finops/commvault_ec2_analysis.py +157 -164
- runbooks/finops/compute_cost_optimizer.py +336 -320
- runbooks/finops/config.py +20 -20
- runbooks/finops/cost_optimizer.py +484 -618
- runbooks/finops/cost_processor.py +332 -214
- runbooks/finops/dashboard_runner.py +1006 -172
- runbooks/finops/ebs_cost_optimizer.py +991 -657
- runbooks/finops/elastic_ip_optimizer.py +317 -257
- runbooks/finops/enhanced_mcp_integration.py +340 -0
- runbooks/finops/enhanced_progress.py +32 -29
- runbooks/finops/enhanced_trend_visualization.py +3 -2
- runbooks/finops/enterprise_wrappers.py +223 -285
- runbooks/finops/executive_export.py +203 -160
- runbooks/finops/helpers.py +130 -288
- runbooks/finops/iam_guidance.py +1 -1
- runbooks/finops/infrastructure/__init__.py +80 -0
- runbooks/finops/infrastructure/commands.py +506 -0
- runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
- runbooks/finops/markdown_exporter.py +337 -174
- runbooks/finops/mcp_validator.py +1952 -0
- runbooks/finops/nat_gateway_optimizer.py +1512 -481
- runbooks/finops/network_cost_optimizer.py +657 -587
- runbooks/finops/notebook_utils.py +226 -188
- runbooks/finops/optimization_engine.py +1136 -0
- runbooks/finops/optimizer.py +19 -23
- runbooks/finops/rds_snapshot_optimizer.py +367 -411
- runbooks/finops/reservation_optimizer.py +427 -363
- runbooks/finops/scenario_cli_integration.py +64 -65
- runbooks/finops/scenarios.py +1277 -438
- runbooks/finops/schemas.py +218 -182
- runbooks/finops/snapshot_manager.py +2289 -0
- runbooks/finops/types.py +3 -3
- runbooks/finops/validation_framework.py +259 -265
- runbooks/finops/vpc_cleanup_exporter.py +189 -144
- runbooks/finops/vpc_cleanup_optimizer.py +591 -573
- runbooks/finops/workspaces_analyzer.py +171 -182
- runbooks/integration/__init__.py +89 -0
- runbooks/integration/mcp_integration.py +1920 -0
- runbooks/inventory/CLAUDE.md +816 -0
- runbooks/inventory/__init__.py +2 -2
- runbooks/inventory/cloud_foundations_integration.py +144 -149
- runbooks/inventory/collectors/aws_comprehensive.py +1 -1
- runbooks/inventory/collectors/aws_networking.py +109 -99
- runbooks/inventory/collectors/base.py +4 -0
- runbooks/inventory/core/collector.py +495 -313
- runbooks/inventory/drift_detection_cli.py +69 -96
- runbooks/inventory/inventory_mcp_cli.py +48 -46
- runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
- runbooks/inventory/mcp_inventory_validator.py +549 -465
- runbooks/inventory/mcp_vpc_validator.py +359 -442
- runbooks/inventory/organizations_discovery.py +55 -51
- runbooks/inventory/rich_inventory_display.py +33 -32
- runbooks/inventory/unified_validation_engine.py +278 -251
- runbooks/inventory/vpc_analyzer.py +732 -695
- runbooks/inventory/vpc_architecture_validator.py +293 -348
- runbooks/inventory/vpc_dependency_analyzer.py +382 -378
- runbooks/inventory/vpc_flow_analyzer.py +1 -1
- runbooks/main.py +49 -34
- runbooks/main_final.py +91 -60
- runbooks/main_minimal.py +22 -10
- runbooks/main_optimized.py +131 -100
- runbooks/main_ultra_minimal.py +7 -2
- runbooks/mcp/__init__.py +36 -0
- runbooks/mcp/integration.py +679 -0
- runbooks/monitoring/performance_monitor.py +9 -4
- runbooks/operate/dynamodb_operations.py +3 -1
- runbooks/operate/ec2_operations.py +145 -137
- runbooks/operate/iam_operations.py +146 -152
- runbooks/operate/networking_cost_heatmap.py +29 -8
- runbooks/operate/rds_operations.py +223 -254
- runbooks/operate/s3_operations.py +107 -118
- runbooks/operate/vpc_operations.py +646 -616
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/commons.py +10 -7
- runbooks/remediation/commvault_ec2_analysis.py +70 -66
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
- runbooks/remediation/multi_account.py +24 -21
- runbooks/remediation/rds_snapshot_list.py +86 -60
- runbooks/remediation/remediation_cli.py +92 -146
- runbooks/remediation/universal_account_discovery.py +83 -79
- runbooks/remediation/workspaces_list.py +46 -41
- runbooks/security/__init__.py +19 -0
- runbooks/security/assessment_runner.py +1150 -0
- runbooks/security/baseline_checker.py +812 -0
- runbooks/security/cloudops_automation_security_validator.py +509 -535
- runbooks/security/compliance_automation_engine.py +17 -17
- runbooks/security/config/__init__.py +2 -2
- runbooks/security/config/compliance_config.py +50 -50
- runbooks/security/config_template_generator.py +63 -76
- runbooks/security/enterprise_security_framework.py +1 -1
- runbooks/security/executive_security_dashboard.py +519 -508
- runbooks/security/multi_account_security_controls.py +959 -1210
- runbooks/security/real_time_security_monitor.py +422 -444
- runbooks/security/security_baseline_tester.py +1 -1
- runbooks/security/security_cli.py +143 -112
- runbooks/security/test_2way_validation.py +439 -0
- runbooks/security/two_way_validation_framework.py +852 -0
- runbooks/sre/production_monitoring_framework.py +167 -177
- runbooks/tdd/__init__.py +15 -0
- runbooks/tdd/cli.py +1071 -0
- runbooks/utils/__init__.py +14 -17
- runbooks/utils/logger.py +7 -2
- runbooks/utils/version_validator.py +50 -47
- runbooks/validation/__init__.py +6 -6
- runbooks/validation/cli.py +9 -3
- runbooks/validation/comprehensive_2way_validator.py +745 -704
- runbooks/validation/mcp_validator.py +906 -228
- runbooks/validation/terraform_citations_validator.py +104 -115
- runbooks/validation/terraform_drift_detector.py +447 -451
- runbooks/vpc/README.md +617 -0
- runbooks/vpc/__init__.py +8 -1
- runbooks/vpc/analyzer.py +577 -0
- runbooks/vpc/cleanup_wrapper.py +476 -413
- runbooks/vpc/cli_cloudtrail_commands.py +339 -0
- runbooks/vpc/cli_mcp_validation_commands.py +480 -0
- runbooks/vpc/cloudtrail_audit_integration.py +717 -0
- runbooks/vpc/config.py +92 -97
- runbooks/vpc/cost_engine.py +411 -148
- runbooks/vpc/cost_explorer_integration.py +553 -0
- runbooks/vpc/cross_account_session.py +101 -106
- runbooks/vpc/enhanced_mcp_validation.py +917 -0
- runbooks/vpc/eni_gate_validator.py +961 -0
- runbooks/vpc/heatmap_engine.py +185 -160
- runbooks/vpc/mcp_no_eni_validator.py +680 -639
- runbooks/vpc/nat_gateway_optimizer.py +358 -0
- runbooks/vpc/networking_wrapper.py +15 -8
- runbooks/vpc/pdca_remediation_planner.py +528 -0
- runbooks/vpc/performance_optimized_analyzer.py +219 -231
- runbooks/vpc/runbooks_adapter.py +1167 -241
- runbooks/vpc/tdd_red_phase_stubs.py +601 -0
- runbooks/vpc/test_data_loader.py +358 -0
- runbooks/vpc/tests/conftest.py +314 -4
- runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
- runbooks/vpc/tests/test_cost_engine.py +0 -2
- runbooks/vpc/topology_generator.py +326 -0
- runbooks/vpc/unified_scenarios.py +1297 -1124
- runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
- runbooks-1.1.5.dist-info/METADATA +328 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/RECORD +214 -193
- runbooks/finops/README.md +0 -414
- runbooks/finops/accuracy_cross_validator.py +0 -647
- runbooks/finops/business_cases.py +0 -950
- runbooks/finops/dashboard_router.py +0 -922
- runbooks/finops/ebs_optimizer.py +0 -973
- runbooks/finops/embedded_mcp_validator.py +0 -1629
- runbooks/finops/enhanced_dashboard_runner.py +0 -527
- runbooks/finops/finops_dashboard.py +0 -584
- runbooks/finops/finops_scenarios.py +0 -1218
- runbooks/finops/legacy_migration.py +0 -730
- runbooks/finops/multi_dashboard.py +0 -1519
- runbooks/finops/single_dashboard.py +0 -1113
- runbooks/finops/unlimited_scenarios.py +0 -393
- runbooks-1.1.4.dist-info/METADATA +0 -800
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/WHEEL +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ Real-Time AWS Security Monitoring with MCP Integration
|
|
5
5
|
Enterprise-grade real-time security monitoring framework integrated with MCP servers
|
6
6
|
for continuous security validation and automated threat response.
|
7
7
|
|
8
|
-
Author: DevOps Security Engineer (Claude Code Enterprise Team)
|
8
|
+
Author: DevOps Security Engineer (Claude Code Enterprise Team)
|
9
9
|
Framework: Real-time security validation with 61-account support
|
10
10
|
Status: Production-ready with MCP integration and automated remediation
|
11
11
|
|
@@ -48,17 +48,17 @@ from runbooks.common.rich_utils import (
|
|
48
48
|
|
49
49
|
class ThreatLevel(Enum):
|
50
50
|
"""Real-time threat severity levels."""
|
51
|
-
|
52
|
-
CRITICAL = "CRITICAL"
|
53
|
-
HIGH = "HIGH"
|
54
|
-
MEDIUM = "MEDIUM"
|
55
|
-
LOW = "LOW"
|
56
|
-
INFO = "INFO"
|
51
|
+
|
52
|
+
CRITICAL = "CRITICAL" # Immediate response required
|
53
|
+
HIGH = "HIGH" # Response within 1 hour
|
54
|
+
MEDIUM = "MEDIUM" # Response within 4 hours
|
55
|
+
LOW = "LOW" # Response within 24 hours
|
56
|
+
INFO = "INFO" # Informational only
|
57
57
|
|
58
58
|
|
59
59
|
class SecurityEventType(Enum):
|
60
60
|
"""Types of security events monitored in real-time."""
|
61
|
-
|
61
|
+
|
62
62
|
UNAUTHORIZED_ACCESS = "UNAUTHORIZED_ACCESS"
|
63
63
|
PRIVILEGE_ESCALATION = "PRIVILEGE_ESCALATION"
|
64
64
|
DATA_EXFILTRATION = "DATA_EXFILTRATION"
|
@@ -72,7 +72,7 @@ class SecurityEventType(Enum):
|
|
72
72
|
@dataclass
|
73
73
|
class SecurityEvent:
|
74
74
|
"""Real-time security event with automated response capability."""
|
75
|
-
|
75
|
+
|
76
76
|
event_id: str
|
77
77
|
timestamp: datetime
|
78
78
|
event_type: SecurityEventType
|
@@ -95,7 +95,7 @@ class SecurityEvent:
|
|
95
95
|
@dataclass
|
96
96
|
class SecurityDashboard:
|
97
97
|
"""Executive security dashboard with business metrics."""
|
98
|
-
|
98
|
+
|
99
99
|
dashboard_id: str
|
100
100
|
timestamp: datetime
|
101
101
|
accounts_monitored: int
|
@@ -116,10 +116,10 @@ class RealTimeSecurityMonitor:
|
|
116
116
|
"""
|
117
117
|
Real-Time AWS Security Monitoring Framework
|
118
118
|
===========================================
|
119
|
-
|
119
|
+
|
120
120
|
Provides continuous security monitoring across multi-account AWS environments
|
121
121
|
with real-time threat detection, automated response, and executive reporting.
|
122
|
-
|
122
|
+
|
123
123
|
Enterprise Features:
|
124
124
|
- 61-account concurrent monitoring via AWS Organizations
|
125
125
|
- Real-time event processing with <30 second detection time
|
@@ -130,31 +130,31 @@ class RealTimeSecurityMonitor:
|
|
130
130
|
"""
|
131
131
|
|
132
132
|
def __init__(
|
133
|
-
self,
|
134
|
-
profile: str = "default",
|
133
|
+
self,
|
134
|
+
profile: str = "default",
|
135
135
|
output_dir: str = "./artifacts/security-monitoring",
|
136
|
-
max_concurrent_accounts: int = 61
|
136
|
+
max_concurrent_accounts: int = 61,
|
137
137
|
):
|
138
138
|
self.profile = profile
|
139
139
|
self.output_dir = Path(output_dir)
|
140
140
|
self.output_dir.mkdir(parents=True, exist_ok=True)
|
141
141
|
self.max_concurrent_accounts = max_concurrent_accounts
|
142
|
-
|
142
|
+
|
143
143
|
# Initialize secure session
|
144
144
|
self.session = self._create_secure_session()
|
145
|
-
|
145
|
+
|
146
146
|
# Real-time monitoring components
|
147
147
|
self.event_processor = SecurityEventProcessor(self.session, self.output_dir)
|
148
148
|
self.threat_detector = ThreatDetectionEngine(self.session)
|
149
149
|
self.response_engine = AutomatedResponseEngine(self.session, self.output_dir)
|
150
150
|
self.mcp_connector = MCPSecurityConnector()
|
151
|
-
|
151
|
+
|
152
152
|
# Monitoring state
|
153
153
|
self.monitoring_active = False
|
154
154
|
self.monitored_accounts = []
|
155
155
|
self.event_queue = asyncio.Queue()
|
156
156
|
self.response_queue = asyncio.Queue()
|
157
|
-
|
157
|
+
|
158
158
|
print_header("Real-Time Security Monitor", "1.0.0")
|
159
159
|
print_info(f"Profile: {profile}")
|
160
160
|
print_info(f"Max concurrent accounts: {max_concurrent_accounts}")
|
@@ -163,46 +163,46 @@ class RealTimeSecurityMonitor:
|
|
163
163
|
def _create_secure_session(self) -> boto3.Session:
|
164
164
|
"""Create secure AWS session for monitoring."""
|
165
165
|
try:
|
166
|
-
session = create_management_session(
|
167
|
-
|
166
|
+
session = create_management_session(profile_name=self.profile)
|
167
|
+
|
168
168
|
# Validate session credentials
|
169
169
|
sts_client = session.client("sts")
|
170
170
|
identity = sts_client.get_caller_identity()
|
171
|
-
|
171
|
+
|
172
172
|
print_info(f"Secure monitoring session established for: {identity.get('Arn', 'Unknown')}")
|
173
173
|
return session
|
174
|
-
|
174
|
+
|
175
175
|
except (ClientError, NoCredentialsError) as e:
|
176
176
|
print_error(f"Failed to establish secure session: {str(e)}")
|
177
177
|
raise
|
178
178
|
|
179
179
|
async def start_real_time_monitoring(
|
180
|
-
self,
|
180
|
+
self,
|
181
181
|
target_accounts: Optional[List[str]] = None,
|
182
|
-
monitoring_duration: Optional[int] = None # minutes, None for continuous
|
182
|
+
monitoring_duration: Optional[int] = None, # minutes, None for continuous
|
183
183
|
) -> SecurityDashboard:
|
184
184
|
"""
|
185
185
|
Start real-time security monitoring across organization accounts.
|
186
|
-
|
186
|
+
|
187
187
|
Args:
|
188
188
|
target_accounts: Specific accounts to monitor (None for all organization accounts)
|
189
189
|
monitoring_duration: Duration in minutes (None for continuous monitoring)
|
190
|
-
|
190
|
+
|
191
191
|
Returns:
|
192
192
|
SecurityDashboard with real-time security metrics
|
193
193
|
"""
|
194
|
-
|
194
|
+
|
195
195
|
if not target_accounts:
|
196
196
|
target_accounts = await self._discover_organization_accounts()
|
197
|
-
|
197
|
+
|
198
198
|
# Limit to max concurrent accounts
|
199
199
|
if len(target_accounts) > self.max_concurrent_accounts:
|
200
200
|
print_warning(f"Limiting monitoring to {self.max_concurrent_accounts} accounts")
|
201
|
-
target_accounts = target_accounts[:self.max_concurrent_accounts]
|
202
|
-
|
201
|
+
target_accounts = target_accounts[: self.max_concurrent_accounts]
|
202
|
+
|
203
203
|
self.monitored_accounts = target_accounts
|
204
204
|
self.monitoring_active = True
|
205
|
-
|
205
|
+
|
206
206
|
console.print(
|
207
207
|
create_panel(
|
208
208
|
f"[bold cyan]Real-Time Security Monitoring Activated[/bold cyan]\n\n"
|
@@ -213,462 +213,458 @@ class RealTimeSecurityMonitor:
|
|
213
213
|
border_style="cyan",
|
214
214
|
)
|
215
215
|
)
|
216
|
-
|
216
|
+
|
217
217
|
# Start monitoring tasks
|
218
218
|
monitoring_tasks = [
|
219
|
-
asyncio.create_task(self._monitor_account_security(account_id))
|
220
|
-
for account_id in target_accounts
|
219
|
+
asyncio.create_task(self._monitor_account_security(account_id)) for account_id in target_accounts
|
221
220
|
]
|
222
|
-
|
221
|
+
|
223
222
|
# Start event processing
|
224
223
|
event_processing_task = asyncio.create_task(self._process_security_events())
|
225
|
-
|
224
|
+
|
226
225
|
# Start response engine
|
227
226
|
response_task = asyncio.create_task(self._execute_automated_responses())
|
228
|
-
|
227
|
+
|
229
228
|
# Start dashboard updates
|
230
229
|
dashboard_task = asyncio.create_task(self._update_security_dashboard())
|
231
|
-
|
230
|
+
|
232
231
|
all_tasks = monitoring_tasks + [event_processing_task, response_task, dashboard_task]
|
233
|
-
|
232
|
+
|
234
233
|
try:
|
235
234
|
if monitoring_duration:
|
236
235
|
# Run for specified duration
|
237
236
|
await asyncio.wait_for(
|
238
|
-
asyncio.gather(*all_tasks, return_exceptions=True),
|
239
|
-
timeout=monitoring_duration * 60
|
237
|
+
asyncio.gather(*all_tasks, return_exceptions=True), timeout=monitoring_duration * 60
|
240
238
|
)
|
241
239
|
else:
|
242
240
|
# Run continuously
|
243
241
|
await asyncio.gather(*all_tasks, return_exceptions=True)
|
244
|
-
|
242
|
+
|
245
243
|
except asyncio.TimeoutError:
|
246
244
|
print_info(f"Monitoring duration completed: {monitoring_duration} minutes")
|
247
245
|
except KeyboardInterrupt:
|
248
246
|
print_warning("Monitoring interrupted by user")
|
249
247
|
finally:
|
250
248
|
self.monitoring_active = False
|
251
|
-
|
249
|
+
|
252
250
|
# Cancel all tasks
|
253
251
|
for task in all_tasks:
|
254
252
|
if not task.done():
|
255
253
|
task.cancel()
|
256
|
-
|
254
|
+
|
257
255
|
# Generate final dashboard
|
258
256
|
final_dashboard = await self._generate_final_dashboard()
|
259
|
-
|
257
|
+
|
260
258
|
print_success("Real-time monitoring session completed")
|
261
259
|
return final_dashboard
|
262
260
|
|
263
261
|
async def _monitor_account_security(self, account_id: str):
|
264
262
|
"""Monitor security events for a specific account."""
|
265
|
-
|
263
|
+
|
266
264
|
print_info(f"Starting security monitoring for account: {account_id}")
|
267
|
-
|
265
|
+
|
268
266
|
while self.monitoring_active:
|
269
267
|
try:
|
270
268
|
# Monitor multiple security event sources
|
271
|
-
|
269
|
+
|
272
270
|
# 1. CloudTrail events for API activity
|
273
271
|
cloudtrail_events = await self._monitor_cloudtrail_events(account_id)
|
274
272
|
for event in cloudtrail_events:
|
275
273
|
await self.event_queue.put(event)
|
276
|
-
|
274
|
+
|
277
275
|
# 2. Config compliance changes
|
278
276
|
config_events = await self._monitor_config_compliance(account_id)
|
279
277
|
for event in config_events:
|
280
278
|
await self.event_queue.put(event)
|
281
|
-
|
279
|
+
|
282
280
|
# 3. Security Hub findings
|
283
281
|
security_hub_events = await self._monitor_security_hub(account_id)
|
284
282
|
for event in security_hub_events:
|
285
283
|
await self.event_queue.put(event)
|
286
|
-
|
284
|
+
|
287
285
|
# 4. Real-time resource changes
|
288
286
|
resource_events = await self._monitor_resource_changes(account_id)
|
289
287
|
for event in resource_events:
|
290
288
|
await self.event_queue.put(event)
|
291
|
-
|
289
|
+
|
292
290
|
# Monitor every 30 seconds for real-time detection
|
293
291
|
await asyncio.sleep(30)
|
294
|
-
|
292
|
+
|
295
293
|
except Exception as e:
|
296
294
|
print_error(f"Error monitoring account {account_id}: {str(e)}")
|
297
295
|
await asyncio.sleep(60) # Back off on errors
|
298
296
|
|
299
297
|
async def _monitor_cloudtrail_events(self, account_id: str) -> List[SecurityEvent]:
|
300
298
|
"""Monitor CloudTrail for security-relevant API events."""
|
301
|
-
|
299
|
+
|
302
300
|
events = []
|
303
|
-
|
301
|
+
|
304
302
|
try:
|
305
303
|
# Assume cross-account role if needed
|
306
304
|
session = await self._get_account_session(account_id)
|
307
|
-
cloudtrail = session.client(
|
308
|
-
|
305
|
+
cloudtrail = session.client("cloudtrail")
|
306
|
+
|
309
307
|
# Look for events in the last minute
|
310
308
|
end_time = datetime.utcnow()
|
311
309
|
start_time = end_time - timedelta(minutes=1)
|
312
|
-
|
310
|
+
|
313
311
|
# Get recent events
|
314
312
|
response = cloudtrail.lookup_events(
|
315
313
|
LookupAttributes=[
|
316
|
-
{
|
317
|
-
'AttributeKey': 'EventTime',
|
318
|
-
'AttributeValue': start_time.strftime('%Y-%m-%d %H:%M:%S')
|
319
|
-
}
|
314
|
+
{"AttributeKey": "EventTime", "AttributeValue": start_time.strftime("%Y-%m-%d %H:%M:%S")}
|
320
315
|
],
|
321
316
|
StartTime=start_time,
|
322
317
|
EndTime=end_time,
|
323
|
-
MaxItems=50 # Limit for real-time processing
|
318
|
+
MaxItems=50, # Limit for real-time processing
|
324
319
|
)
|
325
|
-
|
326
|
-
for event_record in response.get(
|
327
|
-
event_name = event_record.get(
|
328
|
-
|
320
|
+
|
321
|
+
for event_record in response.get("Events", []):
|
322
|
+
event_name = event_record.get("EventName", "")
|
323
|
+
|
329
324
|
# Check for high-risk events
|
330
325
|
if self._is_high_risk_event(event_name):
|
331
|
-
security_event = self._create_security_event_from_cloudtrail(
|
332
|
-
event_record, account_id
|
333
|
-
)
|
326
|
+
security_event = self._create_security_event_from_cloudtrail(event_record, account_id)
|
334
327
|
events.append(security_event)
|
335
|
-
|
328
|
+
|
336
329
|
except ClientError as e:
|
337
330
|
print_warning(f"CloudTrail monitoring failed for {account_id}: {str(e)}")
|
338
|
-
|
331
|
+
|
339
332
|
return events
|
340
333
|
|
341
334
|
def _is_high_risk_event(self, event_name: str) -> bool:
|
342
335
|
"""Determine if CloudTrail event represents high security risk."""
|
343
|
-
|
336
|
+
|
344
337
|
high_risk_events = [
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
338
|
+
"CreateUser",
|
339
|
+
"DeleteUser",
|
340
|
+
"AttachUserPolicy",
|
341
|
+
"DetachUserPolicy",
|
342
|
+
"CreateRole",
|
343
|
+
"DeleteRole",
|
344
|
+
"AttachRolePolicy",
|
345
|
+
"DetachRolePolicy",
|
346
|
+
"PutBucketAcl",
|
347
|
+
"PutBucketPolicy",
|
348
|
+
"DeleteBucketPolicy",
|
349
|
+
"AuthorizeSecurityGroupIngress",
|
350
|
+
"AuthorizeSecurityGroupEgress",
|
351
|
+
"RevokeSecurityGroupIngress",
|
352
|
+
"RevokeSecurityGroupEgress",
|
353
|
+
"CreateSecurityGroup",
|
354
|
+
"DeleteSecurityGroup",
|
355
|
+
"ConsoleLogin",
|
356
|
+
"AssumeRole",
|
357
|
+
"AssumeRoleWithSAML",
|
352
358
|
]
|
353
|
-
|
359
|
+
|
354
360
|
return event_name in high_risk_events
|
355
361
|
|
356
|
-
def _create_security_event_from_cloudtrail(
|
357
|
-
self,
|
358
|
-
event_record: Dict[str, Any],
|
359
|
-
account_id: str
|
360
|
-
) -> SecurityEvent:
|
362
|
+
def _create_security_event_from_cloudtrail(self, event_record: Dict[str, Any], account_id: str) -> SecurityEvent:
|
361
363
|
"""Create SecurityEvent from CloudTrail event record."""
|
362
|
-
|
363
|
-
event_name = event_record.get(
|
364
|
-
event_time = event_record.get(
|
365
|
-
|
364
|
+
|
365
|
+
event_name = event_record.get("EventName", "Unknown")
|
366
|
+
event_time = event_record.get("EventTime", datetime.utcnow())
|
367
|
+
|
366
368
|
# Determine event type and threat level
|
367
369
|
event_type = self._classify_event_type(event_name)
|
368
370
|
threat_level = self._assess_threat_level(event_name, event_record)
|
369
|
-
|
371
|
+
|
370
372
|
# Extract resource information
|
371
|
-
resources = event_record.get(
|
372
|
-
resource_arn = resources[0].get(
|
373
|
-
|
373
|
+
resources = event_record.get("Resources", [])
|
374
|
+
resource_arn = resources[0].get("ResourceName", "") if resources else f"arn:aws::{account_id}:unknown"
|
375
|
+
|
374
376
|
# Extract user information
|
375
|
-
user_identity = event_record.get(
|
376
|
-
source_ip = event_record.get(
|
377
|
-
|
377
|
+
user_identity = event_record.get("Username", "Unknown")
|
378
|
+
source_ip = event_record.get("SourceIPAddress", None)
|
379
|
+
|
378
380
|
return SecurityEvent(
|
379
381
|
event_id=f"ct-{int(time.time())}-{account_id}",
|
380
382
|
timestamp=event_time if isinstance(event_time, datetime) else datetime.utcnow(),
|
381
383
|
event_type=event_type,
|
382
384
|
threat_level=threat_level,
|
383
385
|
account_id=account_id,
|
384
|
-
region=event_record.get(
|
386
|
+
region=event_record.get("AwsRegion", "unknown"),
|
385
387
|
resource_arn=resource_arn,
|
386
|
-
event_details={
|
387
|
-
'event_name': event_name,
|
388
|
-
'cloudtrail_record': event_record
|
389
|
-
},
|
388
|
+
event_details={"event_name": event_name, "cloudtrail_record": event_record},
|
390
389
|
source_ip=source_ip,
|
391
390
|
user_identity=user_identity,
|
392
391
|
auto_response_available=self._has_auto_response(event_name),
|
393
392
|
auto_response_command=self._get_auto_response_command(event_name, event_record),
|
394
393
|
compliance_impact=self._assess_compliance_impact(event_name),
|
395
|
-
business_impact=self._assess_business_impact(threat_level)
|
394
|
+
business_impact=self._assess_business_impact(threat_level),
|
396
395
|
)
|
397
396
|
|
398
397
|
def _classify_event_type(self, event_name: str) -> SecurityEventType:
|
399
398
|
"""Classify CloudTrail event into security event type."""
|
400
|
-
|
399
|
+
|
401
400
|
event_name_lower = event_name.lower()
|
402
|
-
|
403
|
-
if
|
401
|
+
|
402
|
+
if "login" in event_name_lower or "assume" in event_name_lower:
|
404
403
|
return SecurityEventType.UNAUTHORIZED_ACCESS
|
405
|
-
elif
|
404
|
+
elif "policy" in event_name_lower or "role" in event_name_lower:
|
406
405
|
return SecurityEventType.IAM_POLICY_CHANGE
|
407
|
-
elif
|
406
|
+
elif "securitygroup" in event_name_lower:
|
408
407
|
return SecurityEventType.SECURITY_GROUP_CHANGE
|
409
|
-
elif
|
408
|
+
elif "attach" in event_name_lower or "detach" in event_name_lower:
|
410
409
|
return SecurityEventType.PRIVILEGE_ESCALATION
|
411
410
|
else:
|
412
411
|
return SecurityEventType.CONFIGURATION_DRIFT
|
413
412
|
|
414
413
|
def _assess_threat_level(self, event_name: str, event_record: Dict[str, Any]) -> ThreatLevel:
|
415
414
|
"""Assess threat level based on event characteristics."""
|
416
|
-
|
415
|
+
|
417
416
|
# Critical events requiring immediate response
|
418
417
|
critical_events = [
|
419
|
-
|
420
|
-
|
418
|
+
"DeleteUser",
|
419
|
+
"DeleteRole",
|
420
|
+
"DetachUserPolicy",
|
421
|
+
"DetachRolePolicy",
|
422
|
+
"PutBucketAcl",
|
423
|
+
"DeleteBucketPolicy",
|
421
424
|
]
|
422
|
-
|
425
|
+
|
423
426
|
# High-risk events requiring response within 1 hour
|
424
427
|
high_risk_events = [
|
425
|
-
|
426
|
-
|
428
|
+
"CreateUser",
|
429
|
+
"CreateRole",
|
430
|
+
"AttachUserPolicy",
|
431
|
+
"AttachRolePolicy",
|
432
|
+
"AuthorizeSecurityGroupIngress",
|
433
|
+
"CreateSecurityGroup",
|
427
434
|
]
|
428
|
-
|
435
|
+
|
429
436
|
if event_name in critical_events:
|
430
437
|
return ThreatLevel.CRITICAL
|
431
438
|
elif event_name in high_risk_events:
|
432
439
|
return ThreatLevel.HIGH
|
433
|
-
elif
|
440
|
+
elif "error" in event_record.get("ErrorCode", "").lower():
|
434
441
|
return ThreatLevel.MEDIUM # Failed attempts are medium risk
|
435
442
|
else:
|
436
443
|
return ThreatLevel.LOW
|
437
444
|
|
438
445
|
def _has_auto_response(self, event_name: str) -> bool:
|
439
446
|
"""Check if event has automated response available."""
|
440
|
-
|
447
|
+
|
441
448
|
auto_response_events = [
|
442
|
-
|
443
|
-
|
444
|
-
|
449
|
+
"AuthorizeSecurityGroupIngress", # Can auto-restrict
|
450
|
+
"PutBucketAcl", # Can auto-remediate public access
|
451
|
+
"AttachUserPolicy", # Can auto-review and potentially detach
|
445
452
|
]
|
446
|
-
|
453
|
+
|
447
454
|
return event_name in auto_response_events
|
448
455
|
|
449
|
-
def _get_auto_response_command(
|
450
|
-
self,
|
451
|
-
event_name: str,
|
452
|
-
event_record: Dict[str, Any]
|
453
|
-
) -> Optional[str]:
|
456
|
+
def _get_auto_response_command(self, event_name: str, event_record: Dict[str, Any]) -> Optional[str]:
|
454
457
|
"""Get automated response command for event."""
|
455
|
-
|
456
|
-
if event_name ==
|
458
|
+
|
459
|
+
if event_name == "AuthorizeSecurityGroupIngress":
|
457
460
|
# Extract security group ID from event
|
458
|
-
resources = event_record.get(
|
461
|
+
resources = event_record.get("Resources", [])
|
459
462
|
if resources:
|
460
|
-
sg_id = resources[0].get(
|
463
|
+
sg_id = resources[0].get("ResourceName", "").split("/")[-1]
|
461
464
|
return f"runbooks security remediate --type security_group --resource-id {sg_id} --action restrict"
|
462
|
-
|
463
|
-
elif event_name ==
|
465
|
+
|
466
|
+
elif event_name == "PutBucketAcl":
|
464
467
|
# Extract bucket name from event
|
465
|
-
resources = event_record.get(
|
468
|
+
resources = event_record.get("Resources", [])
|
466
469
|
if resources:
|
467
|
-
bucket_name = resources[0].get(
|
470
|
+
bucket_name = resources[0].get("ResourceName", "").split("/")[-1]
|
468
471
|
return f"runbooks security remediate --type s3_public_access --resource-id {bucket_name} --action block"
|
469
|
-
|
472
|
+
|
470
473
|
return None
|
471
474
|
|
472
475
|
def _assess_compliance_impact(self, event_name: str) -> List[str]:
|
473
476
|
"""Assess which compliance frameworks are impacted by event."""
|
474
|
-
|
477
|
+
|
475
478
|
compliance_impact = []
|
476
|
-
|
479
|
+
|
477
480
|
# IAM events impact multiple frameworks
|
478
|
-
if
|
479
|
-
compliance_impact.extend([
|
480
|
-
|
481
|
+
if "user" in event_name.lower() or "role" in event_name.lower() or "policy" in event_name.lower():
|
482
|
+
compliance_impact.extend(["SOC2", "AWS Well-Architected", "CIS Benchmarks"])
|
483
|
+
|
481
484
|
# S3 events impact data protection frameworks
|
482
|
-
if
|
483
|
-
compliance_impact.extend([
|
484
|
-
|
485
|
+
if "bucket" in event_name.lower():
|
486
|
+
compliance_impact.extend(["SOC2", "PCI-DSS", "HIPAA"])
|
487
|
+
|
485
488
|
# Network events impact security frameworks
|
486
|
-
if
|
487
|
-
compliance_impact.extend([
|
488
|
-
|
489
|
+
if "securitygroup" in event_name.lower():
|
490
|
+
compliance_impact.extend(["AWS Well-Architected", "CIS Benchmarks"])
|
491
|
+
|
489
492
|
return compliance_impact
|
490
493
|
|
491
494
|
def _assess_business_impact(self, threat_level: ThreatLevel) -> str:
|
492
495
|
"""Assess business impact of security event."""
|
493
|
-
|
496
|
+
|
494
497
|
impact_mapping = {
|
495
498
|
ThreatLevel.CRITICAL: "high",
|
496
|
-
ThreatLevel.HIGH: "medium",
|
499
|
+
ThreatLevel.HIGH: "medium",
|
497
500
|
ThreatLevel.MEDIUM: "low",
|
498
501
|
ThreatLevel.LOW: "minimal",
|
499
|
-
ThreatLevel.INFO: "none"
|
502
|
+
ThreatLevel.INFO: "none",
|
500
503
|
}
|
501
|
-
|
504
|
+
|
502
505
|
return impact_mapping.get(threat_level, "unknown")
|
503
506
|
|
504
507
|
async def _monitor_config_compliance(self, account_id: str) -> List[SecurityEvent]:
|
505
508
|
"""Monitor AWS Config for compliance changes."""
|
506
|
-
|
509
|
+
|
507
510
|
events = []
|
508
|
-
|
511
|
+
|
509
512
|
try:
|
510
513
|
session = await self._get_account_session(account_id)
|
511
|
-
config = session.client(
|
512
|
-
|
514
|
+
config = session.client("config")
|
515
|
+
|
513
516
|
# Get compliance changes in the last minute
|
514
517
|
end_time = datetime.utcnow()
|
515
518
|
start_time = end_time - timedelta(minutes=1)
|
516
|
-
|
519
|
+
|
517
520
|
# Check for compliance evaluation results
|
518
521
|
response = config.get_compliance_details_by_config_rule(
|
519
|
-
ConfigRuleName=
|
520
|
-
ComplianceTypes=[
|
521
|
-
Limit=20
|
522
|
+
ConfigRuleName="securityhub-*", # Security Hub rules
|
523
|
+
ComplianceTypes=["NON_COMPLIANT"],
|
524
|
+
Limit=20,
|
522
525
|
)
|
523
|
-
|
524
|
-
for evaluation_result in response.get(
|
525
|
-
if evaluation_result.get(
|
526
|
-
|
526
|
+
|
527
|
+
for evaluation_result in response.get("EvaluationResults", []):
|
528
|
+
if evaluation_result.get("ConfigRuleInvokedTime", datetime.min) >= start_time:
|
527
529
|
security_event = SecurityEvent(
|
528
530
|
event_id=f"config-{int(time.time())}-{account_id}",
|
529
|
-
timestamp=evaluation_result.get(
|
531
|
+
timestamp=evaluation_result.get("ResultRecordedTime", datetime.utcnow()),
|
530
532
|
event_type=SecurityEventType.COMPLIANCE_VIOLATION,
|
531
533
|
threat_level=ThreatLevel.MEDIUM,
|
532
534
|
account_id=account_id,
|
533
|
-
region=session.region_name or
|
534
|
-
resource_arn=evaluation_result.get(
|
535
|
+
region=session.region_name or "us-east-1",
|
536
|
+
resource_arn=evaluation_result.get("EvaluationResultIdentifier", {})
|
537
|
+
.get("EvaluationResultQualifier", {})
|
538
|
+
.get("ResourceId", ""),
|
535
539
|
event_details={
|
536
|
-
|
537
|
-
|
540
|
+
"config_rule": evaluation_result.get("EvaluationResultIdentifier", {})
|
541
|
+
.get("EvaluationResultQualifier", {})
|
542
|
+
.get("ConfigRuleName", ""),
|
543
|
+
"compliance_type": evaluation_result.get("ComplianceType", "UNKNOWN"),
|
538
544
|
},
|
539
|
-
compliance_impact=[
|
540
|
-
business_impact="medium"
|
545
|
+
compliance_impact=["AWS Config", "Security Hub"],
|
546
|
+
business_impact="medium",
|
541
547
|
)
|
542
|
-
|
548
|
+
|
543
549
|
events.append(security_event)
|
544
|
-
|
550
|
+
|
545
551
|
except ClientError as e:
|
546
552
|
print_warning(f"Config monitoring failed for {account_id}: {str(e)}")
|
547
|
-
|
553
|
+
|
548
554
|
return events
|
549
555
|
|
550
556
|
async def _monitor_security_hub(self, account_id: str) -> List[SecurityEvent]:
|
551
557
|
"""Monitor Security Hub for new findings."""
|
552
|
-
|
558
|
+
|
553
559
|
events = []
|
554
|
-
|
560
|
+
|
555
561
|
try:
|
556
562
|
session = await self._get_account_session(account_id)
|
557
|
-
security_hub = session.client(
|
558
|
-
|
563
|
+
security_hub = session.client("securityhub")
|
564
|
+
|
559
565
|
# Get findings from the last minute
|
560
566
|
end_time = datetime.utcnow()
|
561
567
|
start_time = end_time - timedelta(minutes=1)
|
562
|
-
|
568
|
+
|
563
569
|
response = security_hub.get_findings(
|
564
570
|
Filters={
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
}
|
571
|
+
"UpdatedAt": [{"Start": start_time.isoformat() + "Z", "End": end_time.isoformat() + "Z"}],
|
572
|
+
"SeverityLabel": [
|
573
|
+
{"Value": "HIGH", "Comparison": "EQUALS"},
|
574
|
+
{"Value": "CRITICAL", "Comparison": "EQUALS"},
|
570
575
|
],
|
571
|
-
'SeverityLabel': [
|
572
|
-
{'Value': 'HIGH', 'Comparison': 'EQUALS'},
|
573
|
-
{'Value': 'CRITICAL', 'Comparison': 'EQUALS'}
|
574
|
-
]
|
575
576
|
},
|
576
|
-
MaxResults=20
|
577
|
+
MaxResults=20,
|
577
578
|
)
|
578
|
-
|
579
|
-
for finding in response.get(
|
580
|
-
|
579
|
+
|
580
|
+
for finding in response.get("Findings", []):
|
581
581
|
# Map Security Hub severity to threat level
|
582
|
-
severity = finding.get(
|
583
|
-
threat_level = ThreatLevel.CRITICAL if severity ==
|
584
|
-
|
582
|
+
severity = finding.get("Severity", {}).get("Label", "MEDIUM")
|
583
|
+
threat_level = ThreatLevel.CRITICAL if severity == "CRITICAL" else ThreatLevel.HIGH
|
584
|
+
|
585
585
|
security_event = SecurityEvent(
|
586
586
|
event_id=f"sh-{finding.get('Id', 'unknown')}",
|
587
|
-
timestamp=datetime.fromisoformat(finding.get(
|
587
|
+
timestamp=datetime.fromisoformat(finding.get("UpdatedAt", "").replace("Z", "+00:00")),
|
588
588
|
event_type=SecurityEventType.COMPLIANCE_VIOLATION,
|
589
589
|
threat_level=threat_level,
|
590
590
|
account_id=account_id,
|
591
|
-
region=finding.get(
|
592
|
-
resource_arn=finding.get(
|
591
|
+
region=finding.get("Region", "unknown"),
|
592
|
+
resource_arn=finding.get("Resources", [{}])[0].get("Id", ""),
|
593
593
|
event_details={
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
594
|
+
"title": finding.get("Title", ""),
|
595
|
+
"description": finding.get("Description", ""),
|
596
|
+
"finding_id": finding.get("Id", ""),
|
597
|
+
"generator_id": finding.get("GeneratorId", ""),
|
598
598
|
},
|
599
|
-
compliance_impact=[
|
600
|
-
business_impact=self._assess_business_impact(threat_level)
|
599
|
+
compliance_impact=["Security Hub", "AWS Well-Architected"],
|
600
|
+
business_impact=self._assess_business_impact(threat_level),
|
601
601
|
)
|
602
|
-
|
602
|
+
|
603
603
|
events.append(security_event)
|
604
|
-
|
604
|
+
|
605
605
|
except ClientError as e:
|
606
606
|
print_warning(f"Security Hub monitoring failed for {account_id}: {str(e)}")
|
607
|
-
|
607
|
+
|
608
608
|
return events
|
609
609
|
|
610
610
|
async def _monitor_resource_changes(self, account_id: str) -> List[SecurityEvent]:
|
611
611
|
"""Monitor real-time resource configuration changes."""
|
612
|
-
|
612
|
+
|
613
613
|
events = []
|
614
|
-
|
614
|
+
|
615
615
|
try:
|
616
616
|
session = await self._get_account_session(account_id)
|
617
|
-
|
617
|
+
|
618
618
|
# Monitor S3 bucket policy changes
|
619
619
|
s3_events = await self._monitor_s3_changes(session, account_id)
|
620
620
|
events.extend(s3_events)
|
621
|
-
|
621
|
+
|
622
622
|
# Monitor EC2 security group changes
|
623
623
|
ec2_events = await self._monitor_ec2_changes(session, account_id)
|
624
624
|
events.extend(ec2_events)
|
625
|
-
|
625
|
+
|
626
626
|
except Exception as e:
|
627
627
|
print_warning(f"Resource monitoring failed for {account_id}: {str(e)}")
|
628
|
-
|
628
|
+
|
629
629
|
return events
|
630
630
|
|
631
631
|
async def _monitor_s3_changes(self, session: boto3.Session, account_id: str) -> List[SecurityEvent]:
|
632
632
|
"""Monitor S3 bucket configuration changes."""
|
633
|
-
|
633
|
+
|
634
634
|
events = []
|
635
|
-
|
635
|
+
|
636
636
|
try:
|
637
|
-
s3 = session.client(
|
638
|
-
|
637
|
+
s3 = session.client("s3")
|
638
|
+
|
639
639
|
# Check for buckets with public access (simplified check)
|
640
|
-
buckets = s3.list_buckets().get(
|
641
|
-
|
640
|
+
buckets = s3.list_buckets().get("Buckets", [])
|
641
|
+
|
642
642
|
for bucket in buckets[:10]: # Limit for real-time processing
|
643
|
-
bucket_name = bucket[
|
644
|
-
|
643
|
+
bucket_name = bucket["Name"]
|
644
|
+
|
645
645
|
try:
|
646
646
|
# Check if bucket allows public access
|
647
647
|
public_access_block = s3.get_public_access_block(Bucket=bucket_name)
|
648
|
-
|
649
|
-
config = public_access_block[
|
648
|
+
|
649
|
+
config = public_access_block["PublicAccessBlockConfiguration"]
|
650
650
|
if not all(config.values()): # If any setting is False
|
651
|
-
|
652
651
|
security_event = SecurityEvent(
|
653
652
|
event_id=f"s3-public-{int(time.time())}-{account_id}",
|
654
653
|
timestamp=datetime.utcnow(),
|
655
654
|
event_type=SecurityEventType.CONFIGURATION_DRIFT,
|
656
655
|
threat_level=ThreatLevel.HIGH,
|
657
656
|
account_id=account_id,
|
658
|
-
region=
|
657
|
+
region="us-east-1", # S3 is global
|
659
658
|
resource_arn=f"arn:aws:s3:::{bucket_name}",
|
660
|
-
event_details={
|
661
|
-
'bucket_name': bucket_name,
|
662
|
-
'public_access_config': config
|
663
|
-
},
|
659
|
+
event_details={"bucket_name": bucket_name, "public_access_config": config},
|
664
660
|
auto_response_available=True,
|
665
661
|
auto_response_command=f"runbooks security remediate --type s3_public_access --resource-id {bucket_name}",
|
666
|
-
compliance_impact=[
|
667
|
-
business_impact="high"
|
662
|
+
compliance_impact=["SOC2", "PCI-DSS", "HIPAA"],
|
663
|
+
business_impact="high",
|
668
664
|
)
|
669
|
-
|
665
|
+
|
670
666
|
events.append(security_event)
|
671
|
-
|
667
|
+
|
672
668
|
except ClientError:
|
673
669
|
# Bucket doesn't have public access block configured - potential issue
|
674
670
|
security_event = SecurityEvent(
|
@@ -677,147 +673,143 @@ class RealTimeSecurityMonitor:
|
|
677
673
|
event_type=SecurityEventType.CONFIGURATION_DRIFT,
|
678
674
|
threat_level=ThreatLevel.MEDIUM,
|
679
675
|
account_id=account_id,
|
680
|
-
region=
|
676
|
+
region="us-east-1",
|
681
677
|
resource_arn=f"arn:aws:s3:::{bucket_name}",
|
682
|
-
event_details={
|
683
|
-
'bucket_name': bucket_name,
|
684
|
-
'issue': 'No public access block configured'
|
685
|
-
},
|
678
|
+
event_details={"bucket_name": bucket_name, "issue": "No public access block configured"},
|
686
679
|
auto_response_available=True,
|
687
680
|
auto_response_command=f"runbooks security remediate --type s3_enable_pab --resource-id {bucket_name}",
|
688
|
-
compliance_impact=[
|
689
|
-
business_impact="medium"
|
681
|
+
compliance_impact=["AWS Well-Architected"],
|
682
|
+
business_impact="medium",
|
690
683
|
)
|
691
|
-
|
684
|
+
|
692
685
|
events.append(security_event)
|
693
|
-
|
686
|
+
|
694
687
|
except ClientError as e:
|
695
688
|
print_warning(f"S3 monitoring failed: {str(e)}")
|
696
|
-
|
689
|
+
|
697
690
|
return events
|
698
691
|
|
699
692
|
async def _monitor_ec2_changes(self, session: boto3.Session, account_id: str) -> List[SecurityEvent]:
|
700
693
|
"""Monitor EC2 security group changes."""
|
701
|
-
|
694
|
+
|
702
695
|
events = []
|
703
|
-
|
696
|
+
|
704
697
|
try:
|
705
|
-
ec2 = session.client(
|
706
|
-
|
698
|
+
ec2 = session.client("ec2")
|
699
|
+
|
707
700
|
# Get security groups and check for open access
|
708
|
-
security_groups = ec2.describe_security_groups().get(
|
709
|
-
|
701
|
+
security_groups = ec2.describe_security_groups().get("SecurityGroups", [])
|
702
|
+
|
710
703
|
for sg in security_groups[:20]: # Limit for real-time processing
|
711
|
-
sg_id = sg[
|
712
|
-
|
704
|
+
sg_id = sg["GroupId"]
|
705
|
+
|
713
706
|
# Check for overly permissive rules
|
714
|
-
for rule in sg.get(
|
715
|
-
for ip_range in rule.get(
|
716
|
-
if ip_range.get(
|
717
|
-
|
718
|
-
port = rule.get('FromPort', 'unknown')
|
707
|
+
for rule in sg.get("IpPermissions", []):
|
708
|
+
for ip_range in rule.get("IpRanges", []):
|
709
|
+
if ip_range.get("CidrIp") == "0.0.0.0/0":
|
710
|
+
port = rule.get("FromPort", "unknown")
|
719
711
|
threat_level = ThreatLevel.CRITICAL if port in [22, 3389] else ThreatLevel.HIGH
|
720
|
-
|
712
|
+
|
721
713
|
security_event = SecurityEvent(
|
722
714
|
event_id=f"sg-open-{int(time.time())}-{sg_id}",
|
723
715
|
timestamp=datetime.utcnow(),
|
724
716
|
event_type=SecurityEventType.SECURITY_GROUP_CHANGE,
|
725
717
|
threat_level=threat_level,
|
726
718
|
account_id=account_id,
|
727
|
-
region=session.region_name or
|
719
|
+
region=session.region_name or "us-east-1",
|
728
720
|
resource_arn=f"arn:aws:ec2:*:{account_id}:security-group/{sg_id}",
|
729
721
|
event_details={
|
730
|
-
|
731
|
-
|
732
|
-
|
722
|
+
"security_group_id": sg_id,
|
723
|
+
"port": port,
|
724
|
+
"protocol": rule.get("IpProtocol", "unknown"),
|
733
725
|
},
|
734
726
|
auto_response_available=True,
|
735
727
|
auto_response_command=f"runbooks security remediate --type security_group --resource-id {sg_id} --action restrict",
|
736
|
-
compliance_impact=[
|
737
|
-
business_impact=self._assess_business_impact(threat_level)
|
728
|
+
compliance_impact=["AWS Well-Architected", "CIS Benchmarks"],
|
729
|
+
business_impact=self._assess_business_impact(threat_level),
|
738
730
|
)
|
739
|
-
|
731
|
+
|
740
732
|
events.append(security_event)
|
741
733
|
break # One event per security group
|
742
|
-
|
734
|
+
|
743
735
|
except ClientError as e:
|
744
736
|
print_warning(f"EC2 monitoring failed: {str(e)}")
|
745
|
-
|
737
|
+
|
746
738
|
return events
|
747
739
|
|
748
740
|
async def _get_account_session(self, account_id: str) -> boto3.Session:
|
749
741
|
"""Get AWS session for specific account (with cross-account role assumption)."""
|
750
|
-
|
742
|
+
|
751
743
|
# For now, return current session
|
752
744
|
# In production, this would assume cross-account roles
|
753
745
|
return self.session
|
754
746
|
|
755
747
|
async def _process_security_events(self):
|
756
748
|
"""Process security events from the event queue."""
|
757
|
-
|
749
|
+
|
758
750
|
print_info("Starting security event processor")
|
759
|
-
|
751
|
+
|
760
752
|
while self.monitoring_active:
|
761
753
|
try:
|
762
754
|
# Get events from queue with timeout
|
763
755
|
try:
|
764
756
|
event = await asyncio.wait_for(self.event_queue.get(), timeout=5.0)
|
765
|
-
|
757
|
+
|
766
758
|
# Process the event
|
767
759
|
await self._handle_security_event(event)
|
768
|
-
|
760
|
+
|
769
761
|
# Mark task as done
|
770
762
|
self.event_queue.task_done()
|
771
|
-
|
763
|
+
|
772
764
|
except asyncio.TimeoutError:
|
773
765
|
continue # No events in queue, continue monitoring
|
774
|
-
|
766
|
+
|
775
767
|
except Exception as e:
|
776
768
|
print_error(f"Error processing security events: {str(e)}")
|
777
769
|
await asyncio.sleep(1)
|
778
770
|
|
779
771
|
async def _handle_security_event(self, event: SecurityEvent):
|
780
772
|
"""Handle individual security event."""
|
781
|
-
|
773
|
+
|
782
774
|
# Log the event
|
783
775
|
self._log_security_event(event)
|
784
|
-
|
776
|
+
|
785
777
|
# Display real-time alert for high/critical events
|
786
778
|
if event.threat_level in [ThreatLevel.CRITICAL, ThreatLevel.HIGH]:
|
787
779
|
self._display_security_alert(event)
|
788
|
-
|
780
|
+
|
789
781
|
# Queue for automated response if available
|
790
782
|
if event.auto_response_available:
|
791
783
|
await self.response_queue.put(event)
|
792
|
-
|
784
|
+
|
793
785
|
# Store event for dashboard and reporting
|
794
786
|
await self._store_security_event(event)
|
795
787
|
|
796
788
|
def _log_security_event(self, event: SecurityEvent):
|
797
789
|
"""Log security event to file and console."""
|
798
|
-
|
790
|
+
|
799
791
|
log_entry = {
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
792
|
+
"timestamp": event.timestamp.isoformat(),
|
793
|
+
"event_id": event.event_id,
|
794
|
+
"event_type": event.event_type.value,
|
795
|
+
"threat_level": event.threat_level.value,
|
796
|
+
"account_id": event.account_id,
|
797
|
+
"resource_arn": event.resource_arn,
|
798
|
+
"user_identity": event.user_identity,
|
799
|
+
"source_ip": event.source_ip,
|
800
|
+
"business_impact": event.business_impact,
|
809
801
|
}
|
810
|
-
|
802
|
+
|
811
803
|
# Write to log file
|
812
804
|
log_file = self.output_dir / "security_events.jsonl"
|
813
|
-
with open(log_file,
|
814
|
-
f.write(json.dumps(log_entry) +
|
805
|
+
with open(log_file, "a") as f:
|
806
|
+
f.write(json.dumps(log_entry) + "\n")
|
815
807
|
|
816
808
|
def _display_security_alert(self, event: SecurityEvent):
|
817
809
|
"""Display real-time security alert."""
|
818
|
-
|
810
|
+
|
819
811
|
threat_emoji = "🚨" if event.threat_level == ThreatLevel.CRITICAL else "⚠️"
|
820
|
-
|
812
|
+
|
821
813
|
alert_content = (
|
822
814
|
f"[bold red]{threat_emoji} SECURITY ALERT[/bold red]\n\n"
|
823
815
|
f"[bold]Event Type:[/bold] {event.event_type.value}\n"
|
@@ -828,41 +820,43 @@ class RealTimeSecurityMonitor:
|
|
828
820
|
f"[bold]Source IP:[/bold] {event.source_ip or 'Unknown'}\n"
|
829
821
|
f"[bold]Auto Response:[/bold] {'Available' if event.auto_response_available else 'Manual Required'}"
|
830
822
|
)
|
831
|
-
|
832
|
-
console.print(
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
823
|
+
|
824
|
+
console.print(
|
825
|
+
create_panel(
|
826
|
+
alert_content,
|
827
|
+
title=f"{threat_emoji} Security Event Detected",
|
828
|
+
border_style="red" if event.threat_level == ThreatLevel.CRITICAL else "yellow",
|
829
|
+
)
|
830
|
+
)
|
837
831
|
|
838
832
|
async def _store_security_event(self, event: SecurityEvent):
|
839
833
|
"""Store security event for dashboard and analysis."""
|
840
|
-
|
834
|
+
|
841
835
|
# Store in memory for dashboard (in production, would use database)
|
842
|
-
if not hasattr(self,
|
836
|
+
if not hasattr(self, "_recent_events"):
|
843
837
|
self._recent_events = []
|
844
|
-
|
838
|
+
|
845
839
|
self._recent_events.append(event)
|
846
|
-
|
840
|
+
|
847
841
|
# Keep only recent events (last 1000)
|
848
842
|
if len(self._recent_events) > 1000:
|
849
843
|
self._recent_events = self._recent_events[-1000:]
|
850
844
|
|
851
845
|
async def _execute_automated_responses(self):
|
852
846
|
"""Execute automated responses from the response queue."""
|
853
|
-
|
847
|
+
|
854
848
|
print_info("Starting automated response engine")
|
855
|
-
|
849
|
+
|
856
850
|
while self.monitoring_active:
|
857
851
|
try:
|
858
852
|
# Get response requests from queue
|
859
853
|
try:
|
860
854
|
event = await asyncio.wait_for(self.response_queue.get(), timeout=5.0)
|
861
|
-
|
855
|
+
|
862
856
|
# Execute automated response
|
863
857
|
response_result = await self.response_engine.execute_response(event)
|
864
|
-
|
865
|
-
if response_result[
|
858
|
+
|
859
|
+
if response_result["success"]:
|
866
860
|
print_success(f"Automated response executed for event: {event.event_id}")
|
867
861
|
event.response_status = "automated_success"
|
868
862
|
event.response_timestamp = datetime.utcnow()
|
@@ -870,66 +864,63 @@ class RealTimeSecurityMonitor:
|
|
870
864
|
print_warning(f"Automated response failed for event: {event.event_id}")
|
871
865
|
event.response_status = "automated_failed"
|
872
866
|
event.manual_response_required = True
|
873
|
-
|
867
|
+
|
874
868
|
self.response_queue.task_done()
|
875
|
-
|
869
|
+
|
876
870
|
except asyncio.TimeoutError:
|
877
871
|
continue
|
878
|
-
|
872
|
+
|
879
873
|
except Exception as e:
|
880
874
|
print_error(f"Error in automated response: {str(e)}")
|
881
875
|
await asyncio.sleep(1)
|
882
876
|
|
883
877
|
async def _update_security_dashboard(self):
|
884
878
|
"""Update security dashboard in real-time."""
|
885
|
-
|
879
|
+
|
886
880
|
print_info("Starting dashboard updates")
|
887
|
-
|
881
|
+
|
888
882
|
while self.monitoring_active:
|
889
883
|
try:
|
890
884
|
# Update dashboard every 60 seconds
|
891
885
|
await asyncio.sleep(60)
|
892
|
-
|
886
|
+
|
893
887
|
dashboard = await self._generate_current_dashboard()
|
894
888
|
await self._display_dashboard_update(dashboard)
|
895
|
-
|
889
|
+
|
896
890
|
except Exception as e:
|
897
891
|
print_error(f"Error updating dashboard: {str(e)}")
|
898
892
|
await asyncio.sleep(60)
|
899
893
|
|
900
894
|
async def _generate_current_dashboard(self) -> SecurityDashboard:
|
901
895
|
"""Generate current security dashboard."""
|
902
|
-
|
903
|
-
if not hasattr(self,
|
896
|
+
|
897
|
+
if not hasattr(self, "_recent_events"):
|
904
898
|
self._recent_events = []
|
905
|
-
|
899
|
+
|
906
900
|
# Calculate metrics for last 24 hours
|
907
901
|
now = datetime.utcnow()
|
908
|
-
events_24h = [
|
909
|
-
|
910
|
-
if (now - event.timestamp).total_seconds() < 86400
|
911
|
-
]
|
912
|
-
|
902
|
+
events_24h = [event for event in self._recent_events if (now - event.timestamp).total_seconds() < 86400]
|
903
|
+
|
913
904
|
critical_events_24h = len([e for e in events_24h if e.threat_level == ThreatLevel.CRITICAL])
|
914
905
|
high_events_24h = len([e for e in events_24h if e.threat_level == ThreatLevel.HIGH])
|
915
906
|
automated_responses_24h = len([e for e in events_24h if e.response_status == "automated_success"])
|
916
|
-
|
907
|
+
|
917
908
|
# Calculate top threats
|
918
909
|
threat_counts = {}
|
919
910
|
for event in events_24h:
|
920
911
|
threat_type = event.event_type.value
|
921
912
|
threat_counts[threat_type] = threat_counts.get(threat_type, 0) + 1
|
922
|
-
|
913
|
+
|
923
914
|
top_threats = [
|
924
|
-
{
|
915
|
+
{"threat_type": threat, "count": count}
|
925
916
|
for threat, count in sorted(threat_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
926
917
|
]
|
927
|
-
|
918
|
+
|
928
919
|
# Calculate compliance score (simplified)
|
929
920
|
total_events = len(events_24h)
|
930
921
|
compliance_events = len([e for e in events_24h if e.event_type == SecurityEventType.COMPLIANCE_VIOLATION])
|
931
922
|
compliance_score = max(0, 100 - (compliance_events / max(1, total_events) * 100))
|
932
|
-
|
923
|
+
|
933
924
|
return SecurityDashboard(
|
934
925
|
dashboard_id=f"dash-{int(time.time())}",
|
935
926
|
timestamp=now,
|
@@ -938,29 +929,31 @@ class RealTimeSecurityMonitor:
|
|
938
929
|
critical_events_24h=critical_events_24h,
|
939
930
|
high_events_24h=high_events_24h,
|
940
931
|
automated_responses_24h=automated_responses_24h,
|
941
|
-
manual_responses_pending=len(
|
932
|
+
manual_responses_pending=len(
|
933
|
+
[e for e in events_24h if e.manual_response_required and e.response_status == "pending"]
|
934
|
+
),
|
942
935
|
compliance_score=compliance_score,
|
943
936
|
security_posture_trend="stable", # Would be calculated from historical data
|
944
937
|
top_threats=top_threats,
|
945
938
|
business_impact_summary={
|
946
|
-
|
947
|
-
|
948
|
-
|
939
|
+
"high_impact_events": len([e for e in events_24h if e.business_impact == "high"]),
|
940
|
+
"medium_impact_events": len([e for e in events_24h if e.business_impact == "medium"]),
|
941
|
+
"estimated_cost_impact": 0.0, # Would be calculated from business impact
|
949
942
|
},
|
950
943
|
response_time_metrics={
|
951
|
-
|
952
|
-
|
953
|
-
|
944
|
+
"avg_detection_time": 30.0, # seconds
|
945
|
+
"avg_response_time": 120.0, # seconds
|
946
|
+
"automation_rate": (automated_responses_24h / max(1, len(events_24h))) * 100,
|
954
947
|
},
|
955
948
|
cost_impact={
|
956
|
-
|
957
|
-
|
958
|
-
}
|
949
|
+
"potential_savings": 0.0, # From prevented incidents
|
950
|
+
"monitoring_cost": 0.0, # Cost of monitoring infrastructure
|
951
|
+
},
|
959
952
|
)
|
960
953
|
|
961
954
|
async def _display_dashboard_update(self, dashboard: SecurityDashboard):
|
962
955
|
"""Display dashboard update to console."""
|
963
|
-
|
956
|
+
|
964
957
|
dashboard_content = (
|
965
958
|
f"[bold cyan]Security Monitoring Dashboard[/bold cyan]\n\n"
|
966
959
|
f"[green]Accounts Monitored:[/green] {dashboard.accounts_monitored}\n"
|
@@ -970,35 +963,31 @@ class RealTimeSecurityMonitor:
|
|
970
963
|
f"[magenta]Compliance Score:[/magenta] {dashboard.compliance_score:.1f}%\n"
|
971
964
|
f"[cyan]Response Time (avg):[/cyan] {dashboard.response_time_metrics['avg_response_time']:.0f}s"
|
972
965
|
)
|
973
|
-
|
966
|
+
|
974
967
|
# Only display every 5 minutes to avoid spam
|
975
|
-
if not hasattr(self,
|
968
|
+
if not hasattr(self, "_last_dashboard_display"):
|
976
969
|
self._last_dashboard_display = datetime.min
|
977
|
-
|
970
|
+
|
978
971
|
if (datetime.utcnow() - self._last_dashboard_display).total_seconds() > 300:
|
979
|
-
console.print(create_panel(
|
980
|
-
dashboard_content,
|
981
|
-
title="📊 Security Dashboard Update",
|
982
|
-
border_style="blue"
|
983
|
-
))
|
972
|
+
console.print(create_panel(dashboard_content, title="📊 Security Dashboard Update", border_style="blue"))
|
984
973
|
self._last_dashboard_display = datetime.utcnow()
|
985
974
|
|
986
975
|
async def _generate_final_dashboard(self) -> SecurityDashboard:
|
987
976
|
"""Generate final dashboard at end of monitoring session."""
|
988
|
-
|
977
|
+
|
989
978
|
dashboard = await self._generate_current_dashboard()
|
990
|
-
|
979
|
+
|
991
980
|
# Display comprehensive final dashboard
|
992
981
|
self._display_final_dashboard(dashboard)
|
993
|
-
|
982
|
+
|
994
983
|
# Export dashboard data
|
995
984
|
await self._export_dashboard(dashboard)
|
996
|
-
|
985
|
+
|
997
986
|
return dashboard
|
998
987
|
|
999
988
|
def _display_final_dashboard(self, dashboard: SecurityDashboard):
|
1000
989
|
"""Display comprehensive final dashboard."""
|
1001
|
-
|
990
|
+
|
1002
991
|
# Summary panel
|
1003
992
|
summary_content = (
|
1004
993
|
f"[bold green]Monitoring Session Complete[/bold green]\n\n"
|
@@ -1009,13 +998,9 @@ class RealTimeSecurityMonitor:
|
|
1009
998
|
f"[bold]Compliance Score:[/bold] {dashboard.compliance_score:.1f}%\n"
|
1010
999
|
f"[bold]Automation Rate:[/bold] {dashboard.response_time_metrics['automation_rate']:.1f}%"
|
1011
1000
|
)
|
1012
|
-
|
1013
|
-
console.print(create_panel(
|
1014
|
-
|
1015
|
-
title="🔒 Final Security Monitoring Summary",
|
1016
|
-
border_style="green"
|
1017
|
-
))
|
1018
|
-
|
1001
|
+
|
1002
|
+
console.print(create_panel(summary_content, title="🔒 Final Security Monitoring Summary", border_style="green"))
|
1003
|
+
|
1019
1004
|
# Top threats table
|
1020
1005
|
if dashboard.top_threats:
|
1021
1006
|
threats_table = create_table(
|
@@ -1023,174 +1008,167 @@ class RealTimeSecurityMonitor:
|
|
1023
1008
|
columns=[
|
1024
1009
|
{"name": "Threat Type", "style": "red"},
|
1025
1010
|
{"name": "Count", "style": "yellow"},
|
1026
|
-
{"name": "Severity", "style": "magenta"}
|
1027
|
-
]
|
1011
|
+
{"name": "Severity", "style": "magenta"},
|
1012
|
+
],
|
1028
1013
|
)
|
1029
|
-
|
1014
|
+
|
1030
1015
|
for threat in dashboard.top_threats:
|
1031
1016
|
threats_table.add_row(
|
1032
|
-
threat[
|
1033
|
-
str(threat[
|
1034
|
-
"High" if threat[
|
1017
|
+
threat["threat_type"].replace("_", " ").title(),
|
1018
|
+
str(threat["count"]),
|
1019
|
+
"High" if threat["count"] > 5 else "Medium",
|
1035
1020
|
)
|
1036
|
-
|
1021
|
+
|
1037
1022
|
console.print(threats_table)
|
1038
1023
|
|
1039
1024
|
async def _export_dashboard(self, dashboard: SecurityDashboard):
|
1040
1025
|
"""Export dashboard data to file."""
|
1041
|
-
|
1026
|
+
|
1042
1027
|
dashboard_file = self.output_dir / f"security_dashboard_{dashboard.dashboard_id}.json"
|
1043
|
-
|
1028
|
+
|
1044
1029
|
dashboard_data = {
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1030
|
+
"dashboard_id": dashboard.dashboard_id,
|
1031
|
+
"timestamp": dashboard.timestamp.isoformat(),
|
1032
|
+
"accounts_monitored": dashboard.accounts_monitored,
|
1033
|
+
"total_events_24h": dashboard.total_events_24h,
|
1034
|
+
"critical_events_24h": dashboard.critical_events_24h,
|
1035
|
+
"high_events_24h": dashboard.high_events_24h,
|
1036
|
+
"automated_responses_24h": dashboard.automated_responses_24h,
|
1037
|
+
"manual_responses_pending": dashboard.manual_responses_pending,
|
1038
|
+
"compliance_score": dashboard.compliance_score,
|
1039
|
+
"security_posture_trend": dashboard.security_posture_trend,
|
1040
|
+
"top_threats": dashboard.top_threats,
|
1041
|
+
"business_impact_summary": dashboard.business_impact_summary,
|
1042
|
+
"response_time_metrics": dashboard.response_time_metrics,
|
1043
|
+
"cost_impact": dashboard.cost_impact,
|
1059
1044
|
}
|
1060
|
-
|
1061
|
-
with open(dashboard_file,
|
1045
|
+
|
1046
|
+
with open(dashboard_file, "w") as f:
|
1062
1047
|
json.dump(dashboard_data, f, indent=2)
|
1063
|
-
|
1048
|
+
|
1064
1049
|
print_success(f"Dashboard exported to: {dashboard_file}")
|
1065
1050
|
|
1066
1051
|
async def _discover_organization_accounts(self) -> List[str]:
|
1067
1052
|
"""Discover AWS Organization accounts for monitoring."""
|
1068
|
-
|
1053
|
+
|
1069
1054
|
accounts = []
|
1070
|
-
|
1055
|
+
|
1071
1056
|
try:
|
1072
|
-
organizations = self.session.client(
|
1073
|
-
|
1074
|
-
paginator = organizations.get_paginator(
|
1075
|
-
|
1057
|
+
organizations = self.session.client("organizations")
|
1058
|
+
|
1059
|
+
paginator = organizations.get_paginator("list_accounts")
|
1060
|
+
|
1076
1061
|
for page in paginator.paginate():
|
1077
|
-
for account in page.get(
|
1078
|
-
if account[
|
1079
|
-
accounts.append(account[
|
1080
|
-
|
1062
|
+
for account in page.get("Accounts", []):
|
1063
|
+
if account["Status"] == "ACTIVE":
|
1064
|
+
accounts.append(account["Id"])
|
1065
|
+
|
1081
1066
|
print_success(f"Discovered {len(accounts)} active organization accounts for monitoring")
|
1082
|
-
|
1067
|
+
|
1083
1068
|
except ClientError as e:
|
1084
1069
|
print_warning(f"Could not discover organization accounts: {str(e)}")
|
1085
1070
|
# Fallback to current account
|
1086
|
-
sts = self.session.client(
|
1087
|
-
current_account = sts.get_caller_identity()[
|
1071
|
+
sts = self.session.client("sts")
|
1072
|
+
current_account = sts.get_caller_identity()["Account"]
|
1088
1073
|
accounts = [current_account]
|
1089
1074
|
print_info(f"Using current account for monitoring: {current_account}")
|
1090
|
-
|
1075
|
+
|
1091
1076
|
return accounts
|
1092
1077
|
|
1093
1078
|
|
1094
1079
|
class SecurityEventProcessor:
|
1095
1080
|
"""Process and classify security events."""
|
1096
|
-
|
1081
|
+
|
1097
1082
|
def __init__(self, session: boto3.Session, output_dir: Path):
|
1098
1083
|
self.session = session
|
1099
1084
|
self.output_dir = output_dir
|
1100
|
-
|
1085
|
+
|
1101
1086
|
async def process_event(self, event: SecurityEvent) -> Dict[str, Any]:
|
1102
1087
|
"""Process individual security event."""
|
1103
|
-
|
1088
|
+
|
1104
1089
|
return {
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1090
|
+
"event_id": event.event_id,
|
1091
|
+
"processed": True,
|
1092
|
+
"classification": event.event_type.value,
|
1093
|
+
"threat_level": event.threat_level.value,
|
1109
1094
|
}
|
1110
1095
|
|
1111
1096
|
|
1112
1097
|
class ThreatDetectionEngine:
|
1113
1098
|
"""Advanced threat detection using ML patterns."""
|
1114
|
-
|
1099
|
+
|
1115
1100
|
def __init__(self, session: boto3.Session):
|
1116
1101
|
self.session = session
|
1117
|
-
|
1102
|
+
|
1118
1103
|
async def detect_anomalies(self, events: List[SecurityEvent]) -> List[SecurityEvent]:
|
1119
1104
|
"""Detect anomalous patterns in security events."""
|
1120
|
-
|
1105
|
+
|
1121
1106
|
# Placeholder for ML-based anomaly detection
|
1122
1107
|
return []
|
1123
1108
|
|
1124
1109
|
|
1125
1110
|
class AutomatedResponseEngine:
|
1126
1111
|
"""Execute automated security responses."""
|
1127
|
-
|
1112
|
+
|
1128
1113
|
def __init__(self, session: boto3.Session, output_dir: Path):
|
1129
1114
|
self.session = session
|
1130
1115
|
self.output_dir = output_dir
|
1131
|
-
|
1116
|
+
|
1132
1117
|
async def execute_response(self, event: SecurityEvent) -> Dict[str, Any]:
|
1133
1118
|
"""Execute automated response to security event."""
|
1134
|
-
|
1119
|
+
|
1135
1120
|
if not event.auto_response_command:
|
1136
|
-
return {
|
1137
|
-
|
1121
|
+
return {"success": False, "reason": "No automated response available"}
|
1122
|
+
|
1138
1123
|
# In dry-run mode, just log the command that would be executed
|
1139
1124
|
print_info(f"Would execute: {event.auto_response_command}")
|
1140
|
-
|
1141
|
-
return {
|
1142
|
-
'success': True,
|
1143
|
-
'command': event.auto_response_command,
|
1144
|
-
'execution_mode': 'dry_run'
|
1145
|
-
}
|
1125
|
+
|
1126
|
+
return {"success": True, "command": event.auto_response_command, "execution_mode": "dry_run"}
|
1146
1127
|
|
1147
1128
|
|
1148
1129
|
class MCPSecurityConnector:
|
1149
1130
|
"""Connect to MCP servers for real-time security data."""
|
1150
|
-
|
1131
|
+
|
1151
1132
|
def __init__(self):
|
1152
1133
|
self.mcp_endpoints = {
|
1153
|
-
|
1154
|
-
|
1155
|
-
|
1134
|
+
"security_hub": "mcp://aws/security-hub",
|
1135
|
+
"config": "mcp://aws/config",
|
1136
|
+
"cloudtrail": "mcp://aws/cloudtrail",
|
1156
1137
|
}
|
1157
|
-
|
1138
|
+
|
1158
1139
|
async def get_real_time_data(self, endpoint: str) -> Dict[str, Any]:
|
1159
1140
|
"""Get real-time data from MCP endpoint."""
|
1160
|
-
|
1141
|
+
|
1161
1142
|
# Placeholder for MCP integration
|
1162
|
-
return {
|
1143
|
+
return {"status": "available", "data": {}}
|
1163
1144
|
|
1164
1145
|
|
1165
1146
|
# CLI integration for real-time monitoring
|
1166
1147
|
if __name__ == "__main__":
|
1167
1148
|
import argparse
|
1168
|
-
|
1169
|
-
parser = argparse.ArgumentParser(description=
|
1170
|
-
parser.add_argument(
|
1171
|
-
parser.add_argument(
|
1172
|
-
parser.add_argument(
|
1173
|
-
parser.add_argument(
|
1174
|
-
parser.add_argument(
|
1175
|
-
|
1149
|
+
|
1150
|
+
parser = argparse.ArgumentParser(description="Real-Time Security Monitor")
|
1151
|
+
parser.add_argument("--profile", default="default", help="AWS profile to use")
|
1152
|
+
parser.add_argument("--accounts", nargs="+", help="Target account IDs (optional)")
|
1153
|
+
parser.add_argument("--duration", type=int, help="Monitoring duration in minutes (default: continuous)")
|
1154
|
+
parser.add_argument("--max-accounts", type=int, default=61, help="Max concurrent accounts")
|
1155
|
+
parser.add_argument("--output-dir", default="./artifacts/security-monitoring", help="Output directory")
|
1156
|
+
|
1176
1157
|
args = parser.parse_args()
|
1177
|
-
|
1158
|
+
|
1178
1159
|
async def main():
|
1179
1160
|
monitor = RealTimeSecurityMonitor(
|
1180
|
-
profile=args.profile,
|
1181
|
-
output_dir=args.output_dir,
|
1182
|
-
max_concurrent_accounts=args.max_accounts
|
1161
|
+
profile=args.profile, output_dir=args.output_dir, max_concurrent_accounts=args.max_accounts
|
1183
1162
|
)
|
1184
|
-
|
1163
|
+
|
1185
1164
|
dashboard = await monitor.start_real_time_monitoring(
|
1186
|
-
target_accounts=args.accounts,
|
1187
|
-
monitoring_duration=args.duration
|
1165
|
+
target_accounts=args.accounts, monitoring_duration=args.duration
|
1188
1166
|
)
|
1189
|
-
|
1167
|
+
|
1190
1168
|
print_success(f"Monitoring completed. Dashboard ID: {dashboard.dashboard_id}")
|
1191
1169
|
print_info(f"Total events (24h): {dashboard.total_events_24h}")
|
1192
1170
|
print_info(f"Critical events: {dashboard.critical_events_24h}")
|
1193
1171
|
print_info(f"Compliance score: {dashboard.compliance_score:.1f}%")
|
1194
|
-
|
1172
|
+
|
1195
1173
|
# Run the async main function
|
1196
|
-
asyncio.run(main())
|
1174
|
+
asyncio.run(main())
|