runbooks 0.9.7__py3-none-any.whl → 0.9.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/common/mcp_integration.py +174 -0
- runbooks/common/performance_monitor.py +4 -4
- runbooks/common/rich_utils.py +3 -0
- runbooks/enterprise/__init__.py +18 -10
- runbooks/enterprise/security.py +708 -0
- runbooks/finops/enhanced_dashboard_runner.py +2 -1
- runbooks/finops/finops_dashboard.py +322 -11
- runbooks/finops/markdown_exporter.py +226 -0
- runbooks/finops/optimizer.py +2 -0
- runbooks/finops/single_dashboard.py +16 -16
- runbooks/finops/vpc_cleanup_exporter.py +328 -0
- runbooks/finops/vpc_cleanup_optimizer.py +1318 -0
- runbooks/main.py +384 -15
- runbooks/operate/vpc_operations.py +8 -2
- runbooks/vpc/__init__.py +12 -0
- runbooks/vpc/cleanup_wrapper.py +757 -0
- runbooks/vpc/cost_engine.py +527 -3
- runbooks/vpc/networking_wrapper.py +29 -29
- runbooks/vpc/runbooks_adapter.py +479 -0
- runbooks/vpc/unified_scenarios.py +3199 -0
- runbooks/vpc/vpc_cleanup_integration.py +2629 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/METADATA +1 -1
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/RECORD +28 -21
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/WHEEL +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/entry_points.txt +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,708 @@
|
|
1
|
+
"""
|
2
|
+
Enterprise Security Module - Enhanced Security Logging & VPC Security Assessment
|
3
|
+
===============================================================================
|
4
|
+
|
5
|
+
Enterprise-grade security module providing enhanced security logging, VPC security
|
6
|
+
assessment, compliance framework integration, and risk classification for the
|
7
|
+
CloudOps Runbooks platform. This module integrates with the three-bucket VPC
|
8
|
+
cleanup strategy and provides comprehensive security audit trails.
|
9
|
+
|
10
|
+
Key Features:
|
11
|
+
- Enhanced Security Logging with Rich CLI integration
|
12
|
+
- VPC Security Posture Assessment (ACLs, Security Groups, Flow Logs)
|
13
|
+
- Multi-Framework Compliance (SOC2, PCI-DSS, HIPAA, NIST, ISO27001)
|
14
|
+
- Security Risk Classification (LOW/MEDIUM/HIGH)
|
15
|
+
- SHA256 Audit Trail Generation
|
16
|
+
- Integration with VPC Cleanup Safety Controls
|
17
|
+
|
18
|
+
Author: DevOps Security Engineer (Enterprise Agile Team)
|
19
|
+
Coordination: enterprise-product-owner → devops-security-engineer → python-runbooks-engineer → qa-testing-specialist
|
20
|
+
Framework: Enterprise Security-as-Code with FAANG SDLC compliance
|
21
|
+
"""
|
22
|
+
|
23
|
+
import hashlib
|
24
|
+
import json
|
25
|
+
import logging
|
26
|
+
import os
|
27
|
+
import time
|
28
|
+
from datetime import datetime, timezone
|
29
|
+
from enum import Enum
|
30
|
+
from pathlib import Path
|
31
|
+
from typing import Any, Dict, List, Optional, Union, Tuple
|
32
|
+
|
33
|
+
import boto3
|
34
|
+
from botocore.exceptions import ClientError, NoCredentialsError
|
35
|
+
|
36
|
+
# Rich CLI integration for enterprise UX standards
|
37
|
+
from runbooks.common.rich_utils import (
|
38
|
+
console,
|
39
|
+
create_panel,
|
40
|
+
create_progress_bar,
|
41
|
+
create_table,
|
42
|
+
print_error,
|
43
|
+
print_info,
|
44
|
+
print_success,
|
45
|
+
print_warning,
|
46
|
+
STATUS_INDICATORS,
|
47
|
+
CLOUDOPS_THEME
|
48
|
+
)
|
49
|
+
|
50
|
+
# Profile management integration
|
51
|
+
try:
|
52
|
+
from runbooks.common.profile_utils import create_session
|
53
|
+
except ImportError:
|
54
|
+
# Fallback for profile management
|
55
|
+
def create_session(profile_name: str):
|
56
|
+
return boto3.Session(profile_name=profile_name)
|
57
|
+
|
58
|
+
|
59
|
+
class SecurityRiskLevel(Enum):
|
60
|
+
"""Security risk classification levels for enterprise decision making."""
|
61
|
+
|
62
|
+
LOW = "LOW"
|
63
|
+
MEDIUM = "MEDIUM"
|
64
|
+
HIGH = "HIGH"
|
65
|
+
CRITICAL = "CRITICAL"
|
66
|
+
|
67
|
+
|
68
|
+
class ComplianceFramework(Enum):
|
69
|
+
"""Supported compliance frameworks for enterprise validation."""
|
70
|
+
|
71
|
+
SOC2 = "SOC2"
|
72
|
+
PCI_DSS = "PCI-DSS"
|
73
|
+
HIPAA = "HIPAA"
|
74
|
+
NIST = "NIST"
|
75
|
+
ISO27001 = "ISO27001"
|
76
|
+
CIS = "CIS_Benchmarks"
|
77
|
+
AWS_WAF = "AWS_Well_Architected"
|
78
|
+
|
79
|
+
|
80
|
+
class VPCSecurityAnalysis:
|
81
|
+
"""VPC Security Analysis results for cleanup integration."""
|
82
|
+
|
83
|
+
def __init__(self, vpc_id: str, region: str):
|
84
|
+
self.vpc_id = vpc_id
|
85
|
+
self.region = region
|
86
|
+
self.timestamp = datetime.now(timezone.utc)
|
87
|
+
self.security_groups: List[Dict[str, Any]] = []
|
88
|
+
self.nacls: List[Dict[str, Any]] = []
|
89
|
+
self.flow_logs: List[Dict[str, Any]] = []
|
90
|
+
self.route_tables: List[Dict[str, Any]] = []
|
91
|
+
self.findings: List[Dict[str, Any]] = []
|
92
|
+
self.risk_level = SecurityRiskLevel.LOW
|
93
|
+
self.compliance_status: Dict[str, bool] = {}
|
94
|
+
|
95
|
+
def add_finding(self, severity: str, title: str, description: str, resource: str):
|
96
|
+
"""Add a security finding to the analysis."""
|
97
|
+
finding = {
|
98
|
+
"severity": severity,
|
99
|
+
"title": title,
|
100
|
+
"description": description,
|
101
|
+
"resource": resource,
|
102
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
103
|
+
"vpc_id": self.vpc_id
|
104
|
+
}
|
105
|
+
self.findings.append(finding)
|
106
|
+
|
107
|
+
# Update overall risk level based on findings
|
108
|
+
if severity == "HIGH" or severity == "CRITICAL":
|
109
|
+
if self.risk_level in [SecurityRiskLevel.LOW, SecurityRiskLevel.MEDIUM]:
|
110
|
+
self.risk_level = SecurityRiskLevel.HIGH if severity == "HIGH" else SecurityRiskLevel.CRITICAL
|
111
|
+
elif severity == "MEDIUM" and self.risk_level == SecurityRiskLevel.LOW:
|
112
|
+
self.risk_level = SecurityRiskLevel.MEDIUM
|
113
|
+
|
114
|
+
|
115
|
+
class EnterpriseSecurityLogger:
|
116
|
+
"""Enhanced security logger with enterprise audit trails and Rich CLI integration."""
|
117
|
+
|
118
|
+
def __init__(self, module_name: str, log_dir: Optional[Path] = None):
|
119
|
+
"""
|
120
|
+
Initialize enterprise security logger.
|
121
|
+
|
122
|
+
Args:
|
123
|
+
module_name: Name of the module requesting logging
|
124
|
+
log_dir: Optional directory for security logs
|
125
|
+
"""
|
126
|
+
self.module_name = module_name
|
127
|
+
self.log_dir = log_dir or Path.home() / ".runbooks" / "security-logs"
|
128
|
+
self.log_dir.mkdir(parents=True, exist_ok=True)
|
129
|
+
|
130
|
+
# Create security-specific log file
|
131
|
+
self.log_file = self.log_dir / f"{module_name}-security-{datetime.now().strftime('%Y%m%d')}.jsonl"
|
132
|
+
|
133
|
+
# Initialize standard logger as fallback
|
134
|
+
self.logger = logging.getLogger(f"runbooks.security.{module_name}")
|
135
|
+
self.logger.setLevel(logging.INFO)
|
136
|
+
|
137
|
+
if not self.logger.handlers:
|
138
|
+
# Console handler with Rich CLI integration
|
139
|
+
console_handler = logging.StreamHandler()
|
140
|
+
console_handler.setFormatter(logging.Formatter(
|
141
|
+
f"{STATUS_INDICATORS['info']} %(asctime)s | SECURITY | %(message)s"
|
142
|
+
))
|
143
|
+
self.logger.addHandler(console_handler)
|
144
|
+
|
145
|
+
# File handler for audit trails
|
146
|
+
file_handler = logging.FileHandler(self.log_file)
|
147
|
+
file_handler.setFormatter(logging.Formatter(
|
148
|
+
'%(asctime)s | %(levelname)s | SECURITY | %(name)s | %(message)s'
|
149
|
+
))
|
150
|
+
self.logger.addHandler(file_handler)
|
151
|
+
|
152
|
+
def log_security_event(self, event_type: str, message: str, metadata: Optional[Dict[str, Any]] = None):
|
153
|
+
"""
|
154
|
+
Log a security event with comprehensive audit trail.
|
155
|
+
|
156
|
+
Args:
|
157
|
+
event_type: Type of security event (VPC_ANALYSIS, COMPLIANCE_CHECK, etc.)
|
158
|
+
message: Human-readable message
|
159
|
+
metadata: Additional structured metadata
|
160
|
+
"""
|
161
|
+
security_event = {
|
162
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
163
|
+
"module": self.module_name,
|
164
|
+
"event_type": event_type,
|
165
|
+
"message": message,
|
166
|
+
"metadata": metadata or {},
|
167
|
+
"correlation_id": self._generate_correlation_id()
|
168
|
+
}
|
169
|
+
|
170
|
+
# Generate SHA256 hash for tamper detection
|
171
|
+
event_hash = self._generate_event_hash(security_event)
|
172
|
+
security_event["event_hash"] = event_hash
|
173
|
+
|
174
|
+
# Write to audit file
|
175
|
+
with open(self.log_file, 'a') as f:
|
176
|
+
f.write(json.dumps(security_event) + '\n')
|
177
|
+
|
178
|
+
# Rich CLI output
|
179
|
+
console.print(f"{STATUS_INDICATORS['info']} [security]SECURITY[/security] | {event_type} | {message}")
|
180
|
+
|
181
|
+
# Standard logger
|
182
|
+
self.logger.info(f"{event_type} | {message} | Hash: {event_hash[:8]}...")
|
183
|
+
|
184
|
+
def log_vpc_security_analysis(self, analysis: VPCSecurityAnalysis):
|
185
|
+
"""Log VPC security analysis results."""
|
186
|
+
self.log_security_event(
|
187
|
+
"VPC_SECURITY_ANALYSIS",
|
188
|
+
f"VPC {analysis.vpc_id} security assessment completed - Risk: {analysis.risk_level.value}",
|
189
|
+
{
|
190
|
+
"vpc_id": analysis.vpc_id,
|
191
|
+
"region": analysis.region,
|
192
|
+
"findings_count": len(analysis.findings),
|
193
|
+
"risk_level": analysis.risk_level.value,
|
194
|
+
"compliance_status": analysis.compliance_status,
|
195
|
+
"security_groups_count": len(analysis.security_groups),
|
196
|
+
"nacls_count": len(analysis.nacls),
|
197
|
+
"flow_logs_enabled": len(analysis.flow_logs) > 0
|
198
|
+
}
|
199
|
+
)
|
200
|
+
|
201
|
+
def _generate_correlation_id(self) -> str:
|
202
|
+
"""Generate unique correlation ID for tracking operations."""
|
203
|
+
import uuid
|
204
|
+
return str(uuid.uuid4())[:8]
|
205
|
+
|
206
|
+
def _generate_event_hash(self, event_data: Dict[str, Any]) -> str:
|
207
|
+
"""Generate SHA256 hash for security event integrity."""
|
208
|
+
# Remove hash field if present to avoid circular reference
|
209
|
+
event_copy = event_data.copy()
|
210
|
+
event_copy.pop("event_hash", None)
|
211
|
+
|
212
|
+
# Create deterministic string representation
|
213
|
+
event_string = json.dumps(event_copy, sort_keys=True)
|
214
|
+
return hashlib.sha256(event_string.encode()).hexdigest()
|
215
|
+
|
216
|
+
|
217
|
+
def get_enhanced_logger(module_name: str) -> EnterpriseSecurityLogger:
|
218
|
+
"""
|
219
|
+
Get enhanced security logger for enterprise audit trails.
|
220
|
+
|
221
|
+
This is the main function called by VPC cleanup and other modules
|
222
|
+
that need enhanced security logging capabilities.
|
223
|
+
|
224
|
+
Args:
|
225
|
+
module_name: Name of the requesting module
|
226
|
+
|
227
|
+
Returns:
|
228
|
+
EnterpriseSecurityLogger instance with audit trail capabilities
|
229
|
+
"""
|
230
|
+
return EnterpriseSecurityLogger(module_name)
|
231
|
+
|
232
|
+
|
233
|
+
def assess_vpc_security_posture(vpc_id: str, profile: str, region: str = "us-east-1") -> VPCSecurityAnalysis:
|
234
|
+
"""
|
235
|
+
Comprehensive VPC security posture assessment.
|
236
|
+
|
237
|
+
Analyzes VPC security configuration including Security Groups, NACLs,
|
238
|
+
Flow Logs, and route tables to identify security risks and compliance
|
239
|
+
issues before VPC cleanup operations.
|
240
|
+
|
241
|
+
Args:
|
242
|
+
vpc_id: VPC ID to analyze
|
243
|
+
profile: AWS profile for authentication
|
244
|
+
region: AWS region (default: us-east-1)
|
245
|
+
|
246
|
+
Returns:
|
247
|
+
VPCSecurityAnalysis object with comprehensive security findings
|
248
|
+
"""
|
249
|
+
console.print(f"{STATUS_INDICATORS['running']} [security]Assessing VPC security posture for {vpc_id}[/security]")
|
250
|
+
|
251
|
+
analysis = VPCSecurityAnalysis(vpc_id, region)
|
252
|
+
|
253
|
+
try:
|
254
|
+
# Create AWS session with specified profile
|
255
|
+
session = create_session(profile)
|
256
|
+
ec2 = session.client('ec2', region_name=region)
|
257
|
+
|
258
|
+
with create_progress_bar() as progress:
|
259
|
+
task = progress.add_task("[security]Security Assessment[/security]", total=4)
|
260
|
+
|
261
|
+
# 1. Analyze Security Groups
|
262
|
+
progress.update(task, description="[security]Analyzing Security Groups[/security]")
|
263
|
+
security_groups = _analyze_security_groups(ec2, vpc_id, analysis)
|
264
|
+
analysis.security_groups = security_groups
|
265
|
+
progress.advance(task)
|
266
|
+
|
267
|
+
# 2. Analyze Network ACLs
|
268
|
+
progress.update(task, description="[security]Analyzing Network ACLs[/security]")
|
269
|
+
nacls = _analyze_network_acls(ec2, vpc_id, analysis)
|
270
|
+
analysis.nacls = nacls
|
271
|
+
progress.advance(task)
|
272
|
+
|
273
|
+
# 3. Check VPC Flow Logs
|
274
|
+
progress.update(task, description="[security]Checking VPC Flow Logs[/security]")
|
275
|
+
flow_logs = _analyze_flow_logs(ec2, vpc_id, analysis)
|
276
|
+
analysis.flow_logs = flow_logs
|
277
|
+
progress.advance(task)
|
278
|
+
|
279
|
+
# 4. Analyze Route Tables
|
280
|
+
progress.update(task, description="[security]Analyzing Route Tables[/security]")
|
281
|
+
route_tables = _analyze_route_tables(ec2, vpc_id, analysis)
|
282
|
+
analysis.route_tables = route_tables
|
283
|
+
progress.advance(task)
|
284
|
+
|
285
|
+
# Log the security analysis
|
286
|
+
logger = get_enhanced_logger("vpc_cleanup")
|
287
|
+
logger.log_vpc_security_analysis(analysis)
|
288
|
+
|
289
|
+
# Display results with Rich CLI
|
290
|
+
_display_security_analysis_results(analysis)
|
291
|
+
|
292
|
+
console.print(f"{STATUS_INDICATORS['success']} [security]VPC security assessment completed - Risk: {analysis.risk_level.value}[/security]")
|
293
|
+
|
294
|
+
except ClientError as e:
|
295
|
+
error_msg = f"AWS API error during VPC security assessment: {e}"
|
296
|
+
console.print(f"{STATUS_INDICATORS['error']} [error]{error_msg}[/error]")
|
297
|
+
analysis.add_finding("HIGH", "API Access Error", error_msg, vpc_id)
|
298
|
+
|
299
|
+
except Exception as e:
|
300
|
+
error_msg = f"Unexpected error during VPC security assessment: {e}"
|
301
|
+
console.print(f"{STATUS_INDICATORS['error']} [error]{error_msg}[/error]")
|
302
|
+
analysis.add_finding("MEDIUM", "Assessment Error", error_msg, vpc_id)
|
303
|
+
|
304
|
+
return analysis
|
305
|
+
|
306
|
+
|
307
|
+
def validate_compliance_requirements(
|
308
|
+
resource_data: Dict[str, Any],
|
309
|
+
frameworks: List[str]
|
310
|
+
) -> Dict[str, bool]:
|
311
|
+
"""
|
312
|
+
Validate resource configuration against compliance frameworks.
|
313
|
+
|
314
|
+
Args:
|
315
|
+
resource_data: Resource configuration data to validate
|
316
|
+
frameworks: List of compliance frameworks to check against
|
317
|
+
|
318
|
+
Returns:
|
319
|
+
Dict mapping framework names to compliance status (True/False)
|
320
|
+
"""
|
321
|
+
compliance_results = {}
|
322
|
+
|
323
|
+
for framework in frameworks:
|
324
|
+
try:
|
325
|
+
framework_enum = ComplianceFramework(framework.upper().replace('-', '_'))
|
326
|
+
compliance_results[framework] = _check_framework_compliance(resource_data, framework_enum)
|
327
|
+
except ValueError:
|
328
|
+
console.print(f"{STATUS_INDICATORS['warning']} [warning]Unknown compliance framework: {framework}[/warning]")
|
329
|
+
compliance_results[framework] = False
|
330
|
+
|
331
|
+
return compliance_results
|
332
|
+
|
333
|
+
|
334
|
+
def evaluate_security_baseline(analysis_results: Dict[str, Any]) -> Dict[str, Any]:
|
335
|
+
"""
|
336
|
+
Evaluate security baseline from analysis results.
|
337
|
+
|
338
|
+
Args:
|
339
|
+
analysis_results: Combined analysis results from VPC assessment
|
340
|
+
|
341
|
+
Returns:
|
342
|
+
Security baseline evaluation with recommendations
|
343
|
+
"""
|
344
|
+
baseline_evaluation = {
|
345
|
+
"baseline_score": 0,
|
346
|
+
"max_score": 100,
|
347
|
+
"recommendations": [],
|
348
|
+
"critical_findings": [],
|
349
|
+
"compliance_gaps": []
|
350
|
+
}
|
351
|
+
|
352
|
+
# Security Groups baseline (25 points)
|
353
|
+
sg_score = _evaluate_security_groups_baseline(analysis_results.get('security_groups', []))
|
354
|
+
baseline_evaluation["baseline_score"] += sg_score
|
355
|
+
|
356
|
+
# Network ACLs baseline (25 points)
|
357
|
+
nacl_score = _evaluate_nacls_baseline(analysis_results.get('nacls', []))
|
358
|
+
baseline_evaluation["baseline_score"] += nacl_score
|
359
|
+
|
360
|
+
# Flow Logs baseline (25 points)
|
361
|
+
flow_logs_score = _evaluate_flow_logs_baseline(analysis_results.get('flow_logs', []))
|
362
|
+
baseline_evaluation["baseline_score"] += flow_logs_score
|
363
|
+
|
364
|
+
# Route Tables baseline (25 points)
|
365
|
+
route_tables_score = _evaluate_route_tables_baseline(analysis_results.get('route_tables', []))
|
366
|
+
baseline_evaluation["baseline_score"] += route_tables_score
|
367
|
+
|
368
|
+
# Generate recommendations based on score
|
369
|
+
if baseline_evaluation["baseline_score"] < 70:
|
370
|
+
baseline_evaluation["recommendations"].append("Immediate security review required")
|
371
|
+
baseline_evaluation["critical_findings"].append("Security baseline below acceptable threshold")
|
372
|
+
elif baseline_evaluation["baseline_score"] < 85:
|
373
|
+
baseline_evaluation["recommendations"].append("Security improvements recommended")
|
374
|
+
else:
|
375
|
+
baseline_evaluation["recommendations"].append("Security posture meets enterprise standards")
|
376
|
+
|
377
|
+
return baseline_evaluation
|
378
|
+
|
379
|
+
|
380
|
+
def classify_security_risk(resource_analysis: Dict[str, Any]) -> str:
|
381
|
+
"""
|
382
|
+
Classify security risk level for enterprise decision making.
|
383
|
+
|
384
|
+
Args:
|
385
|
+
resource_analysis: Resource security analysis data
|
386
|
+
|
387
|
+
Returns:
|
388
|
+
Risk classification: LOW, MEDIUM, HIGH, or CRITICAL
|
389
|
+
"""
|
390
|
+
risk_factors = []
|
391
|
+
|
392
|
+
# Check for critical security misconfigurations
|
393
|
+
findings = resource_analysis.get('findings', [])
|
394
|
+
critical_count = len([f for f in findings if f.get('severity') == 'CRITICAL'])
|
395
|
+
high_count = len([f for f in findings if f.get('severity') == 'HIGH'])
|
396
|
+
|
397
|
+
if critical_count > 0:
|
398
|
+
return SecurityRiskLevel.CRITICAL.value
|
399
|
+
elif high_count >= 3:
|
400
|
+
return SecurityRiskLevel.HIGH.value
|
401
|
+
elif high_count > 0 or len(findings) >= 5:
|
402
|
+
return SecurityRiskLevel.MEDIUM.value
|
403
|
+
else:
|
404
|
+
return SecurityRiskLevel.LOW.value
|
405
|
+
|
406
|
+
|
407
|
+
# Private helper functions for detailed security analysis
|
408
|
+
|
409
|
+
def _analyze_security_groups(ec2_client, vpc_id: str, analysis: VPCSecurityAnalysis) -> List[Dict[str, Any]]:
|
410
|
+
"""Analyze Security Groups for security risks."""
|
411
|
+
try:
|
412
|
+
response = ec2_client.describe_security_groups(
|
413
|
+
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
|
414
|
+
)
|
415
|
+
|
416
|
+
security_groups = []
|
417
|
+
for sg in response['SecurityGroups']:
|
418
|
+
sg_analysis = {
|
419
|
+
'group_id': sg['GroupId'],
|
420
|
+
'group_name': sg['GroupName'],
|
421
|
+
'description': sg['Description'],
|
422
|
+
'inbound_rules': sg.get('IpPermissions', []),
|
423
|
+
'outbound_rules': sg.get('IpPermissionsEgress', [])
|
424
|
+
}
|
425
|
+
|
426
|
+
# Check for overly permissive rules
|
427
|
+
_check_security_group_rules(sg, analysis)
|
428
|
+
security_groups.append(sg_analysis)
|
429
|
+
|
430
|
+
return security_groups
|
431
|
+
|
432
|
+
except ClientError as e:
|
433
|
+
analysis.add_finding("MEDIUM", "Security Groups Analysis Failed", str(e), vpc_id)
|
434
|
+
return []
|
435
|
+
|
436
|
+
|
437
|
+
def _analyze_network_acls(ec2_client, vpc_id: str, analysis: VPCSecurityAnalysis) -> List[Dict[str, Any]]:
|
438
|
+
"""Analyze Network ACLs for security configuration."""
|
439
|
+
try:
|
440
|
+
response = ec2_client.describe_network_acls(
|
441
|
+
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
|
442
|
+
)
|
443
|
+
|
444
|
+
nacls = []
|
445
|
+
for nacl in response['NetworkAcls']:
|
446
|
+
nacl_analysis = {
|
447
|
+
'nacl_id': nacl['NetworkAclId'],
|
448
|
+
'is_default': nacl['IsDefault'],
|
449
|
+
'entries': nacl.get('Entries', []),
|
450
|
+
'associations': nacl.get('Associations', [])
|
451
|
+
}
|
452
|
+
|
453
|
+
# Check for default NACL usage (potential security risk)
|
454
|
+
if nacl['IsDefault']:
|
455
|
+
analysis.add_finding(
|
456
|
+
"LOW",
|
457
|
+
"Default NACL in use",
|
458
|
+
"Consider creating custom NACLs for better security control",
|
459
|
+
nacl['NetworkAclId']
|
460
|
+
)
|
461
|
+
|
462
|
+
nacls.append(nacl_analysis)
|
463
|
+
|
464
|
+
return nacls
|
465
|
+
|
466
|
+
except ClientError as e:
|
467
|
+
analysis.add_finding("MEDIUM", "Network ACLs Analysis Failed", str(e), vpc_id)
|
468
|
+
return []
|
469
|
+
|
470
|
+
|
471
|
+
def _analyze_flow_logs(ec2_client, vpc_id: str, analysis: VPCSecurityAnalysis) -> List[Dict[str, Any]]:
|
472
|
+
"""Check VPC Flow Logs configuration."""
|
473
|
+
try:
|
474
|
+
response = ec2_client.describe_flow_logs(
|
475
|
+
Filters=[
|
476
|
+
{'Name': 'resource-id', 'Values': [vpc_id]},
|
477
|
+
{'Name': 'resource-type', 'Values': ['VPC']}
|
478
|
+
]
|
479
|
+
)
|
480
|
+
|
481
|
+
flow_logs = response.get('FlowLogs', [])
|
482
|
+
|
483
|
+
if not flow_logs:
|
484
|
+
analysis.add_finding(
|
485
|
+
"MEDIUM",
|
486
|
+
"VPC Flow Logs not enabled",
|
487
|
+
"Enable VPC Flow Logs for network monitoring and security analysis",
|
488
|
+
vpc_id
|
489
|
+
)
|
490
|
+
else:
|
491
|
+
for flow_log in flow_logs:
|
492
|
+
if flow_log['FlowLogStatus'] != 'ACTIVE':
|
493
|
+
analysis.add_finding(
|
494
|
+
"MEDIUM",
|
495
|
+
f"Flow Log {flow_log['FlowLogId']} not active",
|
496
|
+
f"Flow Log status: {flow_log['FlowLogStatus']}",
|
497
|
+
flow_log['FlowLogId']
|
498
|
+
)
|
499
|
+
|
500
|
+
return [{'flow_log_id': fl.get('FlowLogId'), 'status': fl.get('FlowLogStatus')} for fl in flow_logs]
|
501
|
+
|
502
|
+
except ClientError as e:
|
503
|
+
analysis.add_finding("MEDIUM", "Flow Logs Analysis Failed", str(e), vpc_id)
|
504
|
+
return []
|
505
|
+
|
506
|
+
|
507
|
+
def _analyze_route_tables(ec2_client, vpc_id: str, analysis: VPCSecurityAnalysis) -> List[Dict[str, Any]]:
|
508
|
+
"""Analyze Route Tables for security implications."""
|
509
|
+
try:
|
510
|
+
response = ec2_client.describe_route_tables(
|
511
|
+
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]
|
512
|
+
)
|
513
|
+
|
514
|
+
route_tables = []
|
515
|
+
for rt in response['RouteTables']:
|
516
|
+
rt_analysis = {
|
517
|
+
'route_table_id': rt['RouteTableId'],
|
518
|
+
'routes': rt.get('Routes', []),
|
519
|
+
'associations': rt.get('Associations', [])
|
520
|
+
}
|
521
|
+
|
522
|
+
# Check for overly broad routes
|
523
|
+
for route in rt.get('Routes', []):
|
524
|
+
if route.get('DestinationCidrBlock') == '0.0.0.0/0':
|
525
|
+
gateway_id = route.get('GatewayId', '')
|
526
|
+
if gateway_id.startswith('igw-'):
|
527
|
+
analysis.add_finding(
|
528
|
+
"HIGH",
|
529
|
+
"Public route detected",
|
530
|
+
f"Route table {rt['RouteTableId']} has public internet access via {gateway_id}",
|
531
|
+
rt['RouteTableId']
|
532
|
+
)
|
533
|
+
|
534
|
+
route_tables.append(rt_analysis)
|
535
|
+
|
536
|
+
return route_tables
|
537
|
+
|
538
|
+
except ClientError as e:
|
539
|
+
analysis.add_finding("MEDIUM", "Route Tables Analysis Failed", str(e), vpc_id)
|
540
|
+
return []
|
541
|
+
|
542
|
+
|
543
|
+
def _check_security_group_rules(security_group: Dict[str, Any], analysis: VPCSecurityAnalysis):
|
544
|
+
"""Check Security Group rules for common security issues."""
|
545
|
+
sg_id = security_group['GroupId']
|
546
|
+
|
547
|
+
# Check inbound rules
|
548
|
+
for rule in security_group.get('IpPermissions', []):
|
549
|
+
for ip_range in rule.get('IpRanges', []):
|
550
|
+
if ip_range.get('CidrIp') == '0.0.0.0/0':
|
551
|
+
ports = f"{rule.get('FromPort', 'All')}-{rule.get('ToPort', 'All')}"
|
552
|
+
analysis.add_finding(
|
553
|
+
"HIGH",
|
554
|
+
"Overly permissive Security Group",
|
555
|
+
f"Security Group {sg_id} allows inbound access from anywhere (0.0.0.0/0) on ports {ports}",
|
556
|
+
sg_id
|
557
|
+
)
|
558
|
+
|
559
|
+
|
560
|
+
def _check_framework_compliance(resource_data: Dict[str, Any], framework: ComplianceFramework) -> bool:
|
561
|
+
"""Check resource compliance against specific framework."""
|
562
|
+
if framework == ComplianceFramework.SOC2:
|
563
|
+
# SOC2 requires logging and access controls
|
564
|
+
return (
|
565
|
+
resource_data.get('flow_logs_enabled', False) and
|
566
|
+
len(resource_data.get('security_groups', [])) > 0
|
567
|
+
)
|
568
|
+
elif framework == ComplianceFramework.PCI_DSS:
|
569
|
+
# PCI-DSS requires strict access controls
|
570
|
+
findings = resource_data.get('findings', [])
|
571
|
+
high_severity_findings = [f for f in findings if f.get('severity') in ['HIGH', 'CRITICAL']]
|
572
|
+
return len(high_severity_findings) == 0
|
573
|
+
elif framework == ComplianceFramework.HIPAA:
|
574
|
+
# HIPAA requires encryption and access logging
|
575
|
+
return (
|
576
|
+
resource_data.get('flow_logs_enabled', False) and
|
577
|
+
resource_data.get('baseline_score', 0) >= 85
|
578
|
+
)
|
579
|
+
else:
|
580
|
+
# Default compliance check
|
581
|
+
return resource_data.get('baseline_score', 0) >= 70
|
582
|
+
|
583
|
+
|
584
|
+
def _evaluate_security_groups_baseline(security_groups: List[Dict[str, Any]]) -> int:
|
585
|
+
"""Evaluate Security Groups against security baseline (max 25 points)."""
|
586
|
+
if not security_groups:
|
587
|
+
return 0
|
588
|
+
|
589
|
+
score = 25
|
590
|
+
for sg in security_groups:
|
591
|
+
# Check for overly permissive rules
|
592
|
+
inbound_rules = sg.get('inbound_rules', [])
|
593
|
+
for rule in inbound_rules:
|
594
|
+
for ip_range in rule.get('IpRanges', []):
|
595
|
+
if ip_range.get('CidrIp') == '0.0.0.0/0':
|
596
|
+
score -= 5 # Deduct points for open access
|
597
|
+
|
598
|
+
return max(0, score)
|
599
|
+
|
600
|
+
|
601
|
+
def _evaluate_nacls_baseline(nacls: List[Dict[str, Any]]) -> int:
|
602
|
+
"""Evaluate Network ACLs against security baseline (max 25 points)."""
|
603
|
+
if not nacls:
|
604
|
+
return 10 # Partial score for having no custom NACLs
|
605
|
+
|
606
|
+
score = 25
|
607
|
+
default_nacl_count = len([n for n in nacls if n.get('is_default', False)])
|
608
|
+
if default_nacl_count > 0:
|
609
|
+
score -= 5 # Deduct for using default NACLs
|
610
|
+
|
611
|
+
return max(10, score)
|
612
|
+
|
613
|
+
|
614
|
+
def _evaluate_flow_logs_baseline(flow_logs: List[Dict[str, Any]]) -> int:
|
615
|
+
"""Evaluate Flow Logs against security baseline (max 25 points)."""
|
616
|
+
if not flow_logs:
|
617
|
+
return 0 # No flow logs = no points
|
618
|
+
|
619
|
+
active_flow_logs = len([fl for fl in flow_logs if fl.get('status') == 'ACTIVE'])
|
620
|
+
return 25 if active_flow_logs > 0 else 10
|
621
|
+
|
622
|
+
|
623
|
+
def _evaluate_route_tables_baseline(route_tables: List[Dict[str, Any]]) -> int:
|
624
|
+
"""Evaluate Route Tables against security baseline (max 25 points)."""
|
625
|
+
if not route_tables:
|
626
|
+
return 0
|
627
|
+
|
628
|
+
score = 25
|
629
|
+
for rt in route_tables:
|
630
|
+
for route in rt.get('routes', []):
|
631
|
+
if (route.get('DestinationCidrBlock') == '0.0.0.0/0' and
|
632
|
+
route.get('GatewayId', '').startswith('igw-')):
|
633
|
+
score -= 3 # Deduct for public routes
|
634
|
+
|
635
|
+
return max(15, score)
|
636
|
+
|
637
|
+
|
638
|
+
def _display_security_analysis_results(analysis: VPCSecurityAnalysis):
|
639
|
+
"""Display security analysis results with Rich CLI formatting."""
|
640
|
+
|
641
|
+
# Create summary table
|
642
|
+
table = create_table(
|
643
|
+
title=f"VPC Security Analysis - {analysis.vpc_id}",
|
644
|
+
columns=[
|
645
|
+
{"name": "Component", "style": "cyan"},
|
646
|
+
{"name": "Count", "style": "white"},
|
647
|
+
{"name": "Status", "style": "green"}
|
648
|
+
]
|
649
|
+
)
|
650
|
+
|
651
|
+
table.add_row("Security Groups", str(len(analysis.security_groups)), "✅ Analyzed")
|
652
|
+
table.add_row("Network ACLs", str(len(analysis.nacls)), "✅ Analyzed")
|
653
|
+
table.add_row("Flow Logs", str(len(analysis.flow_logs)), "✅ Checked" if analysis.flow_logs else "❌ Missing")
|
654
|
+
table.add_row("Route Tables", str(len(analysis.route_tables)), "✅ Analyzed")
|
655
|
+
|
656
|
+
console.print(table)
|
657
|
+
|
658
|
+
# Display findings if any
|
659
|
+
if analysis.findings:
|
660
|
+
findings_table = create_table(
|
661
|
+
title="Security Findings",
|
662
|
+
columns=[
|
663
|
+
{"name": "Severity", "style": "red bold"},
|
664
|
+
{"name": "Finding", "style": "yellow"},
|
665
|
+
{"name": "Resource", "style": "cyan"}
|
666
|
+
]
|
667
|
+
)
|
668
|
+
|
669
|
+
for finding in analysis.findings:
|
670
|
+
findings_table.add_row(
|
671
|
+
finding['severity'],
|
672
|
+
finding['title'],
|
673
|
+
finding['resource']
|
674
|
+
)
|
675
|
+
|
676
|
+
console.print(findings_table)
|
677
|
+
|
678
|
+
# Risk level summary
|
679
|
+
risk_style = {
|
680
|
+
SecurityRiskLevel.LOW: "green",
|
681
|
+
SecurityRiskLevel.MEDIUM: "yellow",
|
682
|
+
SecurityRiskLevel.HIGH: "red",
|
683
|
+
SecurityRiskLevel.CRITICAL: "red bold"
|
684
|
+
}.get(analysis.risk_level, "white")
|
685
|
+
|
686
|
+
risk_panel = create_panel(
|
687
|
+
f"Overall Security Risk: [{risk_style}]{analysis.risk_level.value}[/{risk_style}]\n"
|
688
|
+
f"Findings: {len(analysis.findings)}\n"
|
689
|
+
f"Analysis Time: {analysis.timestamp.strftime('%Y-%m-%d %H:%M:%S UTC')}",
|
690
|
+
title="[security]Security Risk Assessment[/security]",
|
691
|
+
border_style=risk_style
|
692
|
+
)
|
693
|
+
|
694
|
+
console.print(risk_panel)
|
695
|
+
|
696
|
+
|
697
|
+
# Export the main functions needed by VPC cleanup and other modules
|
698
|
+
__all__ = [
|
699
|
+
"get_enhanced_logger",
|
700
|
+
"assess_vpc_security_posture",
|
701
|
+
"validate_compliance_requirements",
|
702
|
+
"evaluate_security_baseline",
|
703
|
+
"classify_security_risk",
|
704
|
+
"SecurityRiskLevel",
|
705
|
+
"ComplianceFramework",
|
706
|
+
"VPCSecurityAnalysis",
|
707
|
+
"EnterpriseSecurityLogger"
|
708
|
+
]
|