runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/base.py +5 -1
- runbooks/cfat/__init__.py +8 -4
- runbooks/cfat/assessment/collectors.py +171 -14
- runbooks/cfat/assessment/compliance.py +871 -0
- runbooks/cfat/assessment/runner.py +122 -11
- runbooks/cfat/models.py +6 -2
- runbooks/common/logger.py +14 -0
- runbooks/common/rich_utils.py +451 -0
- runbooks/enterprise/__init__.py +68 -0
- runbooks/enterprise/error_handling.py +411 -0
- runbooks/enterprise/logging.py +439 -0
- runbooks/enterprise/multi_tenant.py +583 -0
- runbooks/finops/README.md +468 -241
- runbooks/finops/__init__.py +39 -3
- runbooks/finops/cli.py +83 -18
- runbooks/finops/cross_validation.py +375 -0
- runbooks/finops/dashboard_runner.py +812 -164
- runbooks/finops/enhanced_dashboard_runner.py +525 -0
- runbooks/finops/finops_dashboard.py +1892 -0
- runbooks/finops/helpers.py +485 -51
- runbooks/finops/optimizer.py +823 -0
- runbooks/finops/tests/__init__.py +19 -0
- runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
- runbooks/finops/tests/run_comprehensive_tests.py +421 -0
- runbooks/finops/tests/run_tests.py +305 -0
- runbooks/finops/tests/test_finops_dashboard.py +705 -0
- runbooks/finops/tests/test_integration.py +477 -0
- runbooks/finops/tests/test_performance.py +380 -0
- runbooks/finops/tests/test_performance_benchmarks.py +500 -0
- runbooks/finops/tests/test_reference_images_validation.py +867 -0
- runbooks/finops/tests/test_single_account_features.py +715 -0
- runbooks/finops/tests/validate_test_suite.py +220 -0
- runbooks/finops/types.py +1 -1
- runbooks/hitl/enhanced_workflow_engine.py +725 -0
- runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
- runbooks/inventory/collectors/aws_comprehensive.py +442 -0
- runbooks/inventory/collectors/enterprise_scale.py +281 -0
- runbooks/inventory/core/collector.py +172 -13
- runbooks/inventory/discovery.md +1 -1
- runbooks/inventory/list_ec2_instances.py +18 -20
- runbooks/inventory/list_ssm_parameters.py +31 -3
- runbooks/inventory/organizations_discovery.py +1269 -0
- runbooks/inventory/rich_inventory_display.py +393 -0
- runbooks/inventory/run_on_multi_accounts.py +35 -19
- runbooks/inventory/runbooks.security.report_generator.log +0 -0
- runbooks/inventory/runbooks.security.run_script.log +0 -0
- runbooks/inventory/vpc_flow_analyzer.py +1030 -0
- runbooks/main.py +2215 -119
- runbooks/metrics/dora_metrics_engine.py +599 -0
- runbooks/operate/__init__.py +2 -2
- runbooks/operate/base.py +122 -10
- runbooks/operate/deployment_framework.py +1032 -0
- runbooks/operate/deployment_validator.py +853 -0
- runbooks/operate/dynamodb_operations.py +10 -6
- runbooks/operate/ec2_operations.py +319 -11
- runbooks/operate/executive_dashboard.py +779 -0
- runbooks/operate/mcp_integration.py +750 -0
- runbooks/operate/nat_gateway_operations.py +1120 -0
- runbooks/operate/networking_cost_heatmap.py +685 -0
- runbooks/operate/privatelink_operations.py +940 -0
- runbooks/operate/s3_operations.py +10 -6
- runbooks/operate/vpc_endpoints.py +644 -0
- runbooks/operate/vpc_operations.py +1038 -0
- runbooks/remediation/__init__.py +2 -2
- runbooks/remediation/acm_remediation.py +1 -1
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/cloudtrail_remediation.py +1 -1
- runbooks/remediation/cognito_remediation.py +1 -1
- runbooks/remediation/dynamodb_remediation.py +1 -1
- runbooks/remediation/ec2_remediation.py +1 -1
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
- runbooks/remediation/kms_enable_key_rotation.py +1 -1
- runbooks/remediation/kms_remediation.py +1 -1
- runbooks/remediation/lambda_remediation.py +1 -1
- runbooks/remediation/multi_account.py +1 -1
- runbooks/remediation/rds_remediation.py +1 -1
- runbooks/remediation/s3_block_public_access.py +1 -1
- runbooks/remediation/s3_enable_access_logging.py +1 -1
- runbooks/remediation/s3_encryption.py +1 -1
- runbooks/remediation/s3_remediation.py +1 -1
- runbooks/remediation/vpc_remediation.py +475 -0
- runbooks/security/__init__.py +3 -1
- runbooks/security/compliance_automation.py +632 -0
- runbooks/security/report_generator.py +10 -0
- runbooks/security/run_script.py +31 -5
- runbooks/security/security_baseline_tester.py +169 -30
- runbooks/security/security_export.py +477 -0
- runbooks/validation/__init__.py +10 -0
- runbooks/validation/benchmark.py +484 -0
- runbooks/validation/cli.py +356 -0
- runbooks/validation/mcp_validator.py +768 -0
- runbooks/vpc/__init__.py +38 -0
- runbooks/vpc/config.py +212 -0
- runbooks/vpc/cost_engine.py +347 -0
- runbooks/vpc/heatmap_engine.py +605 -0
- runbooks/vpc/manager_interface.py +634 -0
- runbooks/vpc/networking_wrapper.py +1260 -0
- runbooks/vpc/rich_formatters.py +679 -0
- runbooks/vpc/tests/__init__.py +5 -0
- runbooks/vpc/tests/conftest.py +356 -0
- runbooks/vpc/tests/test_cli_integration.py +530 -0
- runbooks/vpc/tests/test_config.py +458 -0
- runbooks/vpc/tests/test_cost_engine.py +479 -0
- runbooks/vpc/tests/test_networking_wrapper.py +512 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,632 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Enterprise Security Compliance Automation Module
|
4
|
+
|
5
|
+
Advanced compliance automation for multi-account AWS environments with
|
6
|
+
zero-downtime security updates and continuous compliance monitoring.
|
7
|
+
|
8
|
+
Enhanced features for Option B: Security Compliance Automation:
|
9
|
+
- Automated security baseline enforcement
|
10
|
+
- Multi-language compliance reports
|
11
|
+
- Zero-downtime security updates
|
12
|
+
- Continuous compliance monitoring
|
13
|
+
- Enterprise security compliance frameworks
|
14
|
+
|
15
|
+
Author: CloudOps Security Team
|
16
|
+
Date: 2025-01-21
|
17
|
+
Version: 1.0.0 - Advanced Compliance Automation
|
18
|
+
"""
|
19
|
+
|
20
|
+
import json
|
21
|
+
import logging
|
22
|
+
from datetime import datetime, timedelta
|
23
|
+
from typing import Any, Dict, List, Optional, Tuple
|
24
|
+
|
25
|
+
import boto3
|
26
|
+
from botocore.exceptions import ClientError
|
27
|
+
|
28
|
+
from .security_baseline_tester import SecurityBaselineTester
|
29
|
+
from .utils import common, language
|
30
|
+
|
31
|
+
|
32
|
+
class ComplianceAutomation:
|
33
|
+
"""
|
34
|
+
Enterprise security compliance automation with zero-downtime deployment.
|
35
|
+
|
36
|
+
Provides automated enforcement of security baselines across multi-account
|
37
|
+
environments with comprehensive compliance monitoring and reporting.
|
38
|
+
"""
|
39
|
+
|
40
|
+
def __init__(self, profile: str = "default", region: str = "us-east-1"):
|
41
|
+
self.profile = profile
|
42
|
+
self.region = region
|
43
|
+
self.session = self._create_session()
|
44
|
+
self.compliance_frameworks = {
|
45
|
+
"aws_well_architected": self._load_wa_framework(),
|
46
|
+
"soc2": self._load_soc2_framework(),
|
47
|
+
"enterprise_baseline": self._load_enterprise_framework(),
|
48
|
+
}
|
49
|
+
|
50
|
+
def _create_session(self):
|
51
|
+
"""Create authenticated AWS session."""
|
52
|
+
if self.profile == "default":
|
53
|
+
return boto3.Session()
|
54
|
+
return boto3.Session(profile_name=self.profile)
|
55
|
+
|
56
|
+
def _load_wa_framework(self) -> Dict[str, Any]:
|
57
|
+
"""Load AWS Well-Architected security framework requirements."""
|
58
|
+
return {
|
59
|
+
"name": "AWS Well-Architected Security Pillar",
|
60
|
+
"version": "2024.1",
|
61
|
+
"requirements": [
|
62
|
+
{
|
63
|
+
"id": "SEC.1",
|
64
|
+
"title": "Identity and Access Management",
|
65
|
+
"checks": ["root_mfa", "iam_user_mfa", "iam_password_policy"],
|
66
|
+
"severity": "critical",
|
67
|
+
"automation_priority": 1,
|
68
|
+
},
|
69
|
+
{
|
70
|
+
"id": "SEC.2",
|
71
|
+
"title": "Detective Controls",
|
72
|
+
"checks": ["guardduty_enabled", "trail_enabled", "cloudwatch_alarm_configuration"],
|
73
|
+
"severity": "high",
|
74
|
+
"automation_priority": 2,
|
75
|
+
},
|
76
|
+
{
|
77
|
+
"id": "SEC.3",
|
78
|
+
"title": "Infrastructure Protection",
|
79
|
+
"checks": ["bucket_public_access", "account_level_bucket_public_access"],
|
80
|
+
"severity": "high",
|
81
|
+
"automation_priority": 2,
|
82
|
+
},
|
83
|
+
],
|
84
|
+
}
|
85
|
+
|
86
|
+
def _load_soc2_framework(self) -> Dict[str, Any]:
|
87
|
+
"""Load SOC2 compliance framework requirements."""
|
88
|
+
return {
|
89
|
+
"name": "SOC 2 Type II",
|
90
|
+
"version": "2023",
|
91
|
+
"requirements": [
|
92
|
+
{
|
93
|
+
"id": "CC6.1",
|
94
|
+
"title": "Logical and Physical Access Controls",
|
95
|
+
"checks": ["root_mfa", "iam_user_mfa", "root_access_key"],
|
96
|
+
"severity": "critical",
|
97
|
+
"automation_priority": 1,
|
98
|
+
},
|
99
|
+
{
|
100
|
+
"id": "CC6.7",
|
101
|
+
"title": "Data Transmission and Disposal",
|
102
|
+
"checks": ["bucket_public_access", "multi_region_trail"],
|
103
|
+
"severity": "high",
|
104
|
+
"automation_priority": 2,
|
105
|
+
},
|
106
|
+
],
|
107
|
+
}
|
108
|
+
|
109
|
+
def _load_enterprise_framework(self) -> Dict[str, Any]:
|
110
|
+
"""Load enterprise-specific security framework."""
|
111
|
+
return {
|
112
|
+
"name": "Enterprise Security Baseline",
|
113
|
+
"version": "1.0.0",
|
114
|
+
"requirements": [
|
115
|
+
{
|
116
|
+
"id": "ENT.1",
|
117
|
+
"title": "Multi-Account Security",
|
118
|
+
"checks": ["alternate_contacts", "trusted_advisor", "multi_region_instance_usage"],
|
119
|
+
"severity": "medium",
|
120
|
+
"automation_priority": 3,
|
121
|
+
}
|
122
|
+
],
|
123
|
+
}
|
124
|
+
|
125
|
+
def assess_compliance_status(self, framework: str = "enterprise_baseline", language: str = "en") -> Dict[str, Any]:
|
126
|
+
"""
|
127
|
+
Assess current compliance status against specified framework.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
framework: Compliance framework to assess against
|
131
|
+
language: Language code for reports (en, jp, kr, vn)
|
132
|
+
|
133
|
+
Returns:
|
134
|
+
Comprehensive compliance assessment results
|
135
|
+
"""
|
136
|
+
logging.info(f"Starting compliance assessment for framework: {framework}")
|
137
|
+
|
138
|
+
if framework not in self.compliance_frameworks:
|
139
|
+
raise ValueError(f"Unsupported framework: {framework}")
|
140
|
+
|
141
|
+
framework_config = self.compliance_frameworks[framework]
|
142
|
+
|
143
|
+
# Run security baseline tests
|
144
|
+
tester = SecurityBaselineTester(
|
145
|
+
profile=self.profile,
|
146
|
+
lang_code=language,
|
147
|
+
output_dir=f"/tmp/compliance_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
148
|
+
)
|
149
|
+
|
150
|
+
# Execute assessment
|
151
|
+
account_id, baseline_results = tester._execute_tests()
|
152
|
+
|
153
|
+
# Analyze results against framework requirements
|
154
|
+
compliance_analysis = self._analyze_compliance_results(baseline_results, framework_config)
|
155
|
+
|
156
|
+
# Generate compliance score
|
157
|
+
compliance_score = self._calculate_compliance_score(compliance_analysis)
|
158
|
+
|
159
|
+
return {
|
160
|
+
"assessment_timestamp": datetime.now().isoformat(),
|
161
|
+
"account_id": account_id,
|
162
|
+
"framework": framework_config,
|
163
|
+
"compliance_score": compliance_score,
|
164
|
+
"compliance_analysis": compliance_analysis,
|
165
|
+
"recommendations": self._generate_remediation_plan(compliance_analysis),
|
166
|
+
"language": language,
|
167
|
+
}
|
168
|
+
|
169
|
+
def _analyze_compliance_results(self, baseline_results: List[Dict], framework_config: Dict) -> Dict[str, Any]:
|
170
|
+
"""Analyze baseline test results against framework requirements."""
|
171
|
+
analysis = {
|
172
|
+
"requirements_status": {},
|
173
|
+
"failed_checks": [],
|
174
|
+
"critical_findings": [],
|
175
|
+
"automation_candidates": [],
|
176
|
+
}
|
177
|
+
|
178
|
+
# Create lookup for baseline results
|
179
|
+
results_lookup = {result.get("check_name", ""): result for result in baseline_results}
|
180
|
+
|
181
|
+
for requirement in framework_config["requirements"]:
|
182
|
+
req_id = requirement["id"]
|
183
|
+
req_status = {
|
184
|
+
"title": requirement["title"],
|
185
|
+
"severity": requirement["severity"],
|
186
|
+
"checks_results": [],
|
187
|
+
"compliance_status": "compliant",
|
188
|
+
}
|
189
|
+
|
190
|
+
for check_name in requirement["checks"]:
|
191
|
+
check_result = results_lookup.get(check_name, {})
|
192
|
+
req_status["checks_results"].append(check_result)
|
193
|
+
|
194
|
+
# If any check failed, requirement is non-compliant
|
195
|
+
if check_result.get("status") == "FAIL":
|
196
|
+
req_status["compliance_status"] = "non_compliant"
|
197
|
+
analysis["failed_checks"].append(
|
198
|
+
{"requirement_id": req_id, "check_name": check_name, "check_result": check_result}
|
199
|
+
)
|
200
|
+
|
201
|
+
if requirement["severity"] == "critical":
|
202
|
+
analysis["critical_findings"].append(
|
203
|
+
{
|
204
|
+
"requirement_id": req_id,
|
205
|
+
"check_name": check_name,
|
206
|
+
"severity": "critical",
|
207
|
+
"automation_priority": requirement["automation_priority"],
|
208
|
+
}
|
209
|
+
)
|
210
|
+
|
211
|
+
# Add to automation candidates if high priority
|
212
|
+
if requirement["automation_priority"] <= 2:
|
213
|
+
analysis["automation_candidates"].append(
|
214
|
+
{
|
215
|
+
"requirement_id": req_id,
|
216
|
+
"check_name": check_name,
|
217
|
+
"automation_priority": requirement["automation_priority"],
|
218
|
+
"estimated_fix_time": self._estimate_fix_time(check_name),
|
219
|
+
}
|
220
|
+
)
|
221
|
+
|
222
|
+
analysis["requirements_status"][req_id] = req_status
|
223
|
+
|
224
|
+
return analysis
|
225
|
+
|
226
|
+
def _calculate_compliance_score(self, analysis: Dict[str, Any]) -> Dict[str, float]:
|
227
|
+
"""Calculate overall compliance score and breakdown."""
|
228
|
+
total_requirements = len(analysis["requirements_status"])
|
229
|
+
compliant_requirements = sum(
|
230
|
+
1 for req in analysis["requirements_status"].values() if req["compliance_status"] == "compliant"
|
231
|
+
)
|
232
|
+
|
233
|
+
overall_score = (compliant_requirements / total_requirements) * 100 if total_requirements > 0 else 0
|
234
|
+
|
235
|
+
# Calculate severity-based scores
|
236
|
+
critical_total = sum(1 for req in analysis["requirements_status"].values() if req["severity"] == "critical")
|
237
|
+
critical_compliant = sum(
|
238
|
+
1
|
239
|
+
for req in analysis["requirements_status"].values()
|
240
|
+
if req["severity"] == "critical" and req["compliance_status"] == "compliant"
|
241
|
+
)
|
242
|
+
critical_score = (critical_compliant / critical_total) * 100 if critical_total > 0 else 100
|
243
|
+
|
244
|
+
return {
|
245
|
+
"overall_compliance": round(overall_score, 2),
|
246
|
+
"critical_compliance": round(critical_score, 2),
|
247
|
+
"total_requirements": total_requirements,
|
248
|
+
"compliant_requirements": compliant_requirements,
|
249
|
+
"failed_requirements": total_requirements - compliant_requirements,
|
250
|
+
"critical_findings": len(analysis["critical_findings"]),
|
251
|
+
}
|
252
|
+
|
253
|
+
def _generate_remediation_plan(self, analysis: Dict[str, Any]) -> List[Dict[str, Any]]:
|
254
|
+
"""Generate automated remediation plan for non-compliant items."""
|
255
|
+
remediation_plan = []
|
256
|
+
|
257
|
+
for candidate in analysis["automation_candidates"]:
|
258
|
+
remediation = {
|
259
|
+
"requirement_id": candidate["requirement_id"],
|
260
|
+
"check_name": candidate["check_name"],
|
261
|
+
"priority": candidate["automation_priority"],
|
262
|
+
"estimated_time": candidate["estimated_fix_time"],
|
263
|
+
"automation_method": self._get_automation_method(candidate["check_name"]),
|
264
|
+
"risk_level": self._assess_automation_risk(candidate["check_name"]),
|
265
|
+
"prerequisites": self._get_automation_prerequisites(candidate["check_name"]),
|
266
|
+
}
|
267
|
+
remediation_plan.append(remediation)
|
268
|
+
|
269
|
+
# Sort by priority (lower number = higher priority)
|
270
|
+
remediation_plan.sort(key=lambda x: x["priority"])
|
271
|
+
|
272
|
+
return remediation_plan
|
273
|
+
|
274
|
+
def _estimate_fix_time(self, check_name: str) -> str:
|
275
|
+
"""Estimate time required to fix a specific check."""
|
276
|
+
time_estimates = {
|
277
|
+
"root_mfa": "5 minutes",
|
278
|
+
"iam_user_mfa": "10 minutes per user",
|
279
|
+
"iam_password_policy": "2 minutes",
|
280
|
+
"guardduty_enabled": "3 minutes",
|
281
|
+
"trail_enabled": "5 minutes",
|
282
|
+
"bucket_public_access": "2 minutes per bucket",
|
283
|
+
"default": "15 minutes",
|
284
|
+
}
|
285
|
+
return time_estimates.get(check_name, time_estimates["default"])
|
286
|
+
|
287
|
+
def _get_automation_method(self, check_name: str) -> str:
|
288
|
+
"""Get automation method for a specific check."""
|
289
|
+
automation_methods = {
|
290
|
+
"root_mfa": "IAM Console API",
|
291
|
+
"iam_password_policy": "IAM SetAccountPasswordPolicy API",
|
292
|
+
"guardduty_enabled": "GuardDuty CreateDetector API",
|
293
|
+
"trail_enabled": "CloudTrail CreateTrail API",
|
294
|
+
"bucket_public_access": "S3 PutBucketPublicAccessBlock API",
|
295
|
+
"default": "Manual remediation required",
|
296
|
+
}
|
297
|
+
return automation_methods.get(check_name, automation_methods["default"])
|
298
|
+
|
299
|
+
def _assess_automation_risk(self, check_name: str) -> str:
|
300
|
+
"""Assess risk level of automating a specific check remediation."""
|
301
|
+
risk_levels = {
|
302
|
+
"root_mfa": "low",
|
303
|
+
"iam_password_policy": "low",
|
304
|
+
"guardduty_enabled": "low",
|
305
|
+
"trail_enabled": "medium",
|
306
|
+
"bucket_public_access": "medium",
|
307
|
+
"iam_user_mfa": "high", # Affects user access
|
308
|
+
"default": "medium",
|
309
|
+
}
|
310
|
+
return risk_levels.get(check_name, risk_levels["default"])
|
311
|
+
|
312
|
+
def _get_automation_prerequisites(self, check_name: str) -> List[str]:
|
313
|
+
"""Get prerequisites for automating a specific check remediation."""
|
314
|
+
prerequisites = {
|
315
|
+
"root_mfa": ["Root account access", "MFA device available"],
|
316
|
+
"iam_password_policy": ["IAM administrative permissions"],
|
317
|
+
"guardduty_enabled": ["GuardDuty service permissions", "Cost approval for GuardDuty charges"],
|
318
|
+
"trail_enabled": ["CloudTrail permissions", "S3 bucket for logs"],
|
319
|
+
"bucket_public_access": ["S3 administrative permissions", "Application impact assessment"],
|
320
|
+
"default": ["Administrative permissions", "Change approval"],
|
321
|
+
}
|
322
|
+
return prerequisites.get(check_name, prerequisites["default"])
|
323
|
+
|
324
|
+
def automate_compliance_remediation(
|
325
|
+
self, remediation_plan: List[Dict[str, Any]], dry_run: bool = True
|
326
|
+
) -> Dict[str, Any]:
|
327
|
+
"""
|
328
|
+
Execute automated compliance remediation with zero-downtime deployment.
|
329
|
+
|
330
|
+
Args:
|
331
|
+
remediation_plan: List of remediation actions to execute
|
332
|
+
dry_run: If True, simulate actions without making changes
|
333
|
+
|
334
|
+
Returns:
|
335
|
+
Execution results for each remediation action
|
336
|
+
"""
|
337
|
+
execution_results = {
|
338
|
+
"execution_timestamp": datetime.now().isoformat(),
|
339
|
+
"dry_run_mode": dry_run,
|
340
|
+
"total_actions": len(remediation_plan),
|
341
|
+
"results": [],
|
342
|
+
"summary": {"successful": 0, "failed": 0, "skipped": 0},
|
343
|
+
}
|
344
|
+
|
345
|
+
for remediation in remediation_plan:
|
346
|
+
result = self._execute_remediation_action(remediation, dry_run)
|
347
|
+
execution_results["results"].append(result)
|
348
|
+
|
349
|
+
# Update summary
|
350
|
+
if result["status"] == "success":
|
351
|
+
execution_results["summary"]["successful"] += 1
|
352
|
+
elif result["status"] == "failed":
|
353
|
+
execution_results["summary"]["failed"] += 1
|
354
|
+
else:
|
355
|
+
execution_results["summary"]["skipped"] += 1
|
356
|
+
|
357
|
+
return execution_results
|
358
|
+
|
359
|
+
def _execute_remediation_action(self, remediation: Dict[str, Any], dry_run: bool) -> Dict[str, Any]:
|
360
|
+
"""Execute a single remediation action with error handling."""
|
361
|
+
action_result = {
|
362
|
+
"requirement_id": remediation["requirement_id"],
|
363
|
+
"check_name": remediation["check_name"],
|
364
|
+
"action": remediation["automation_method"],
|
365
|
+
"status": "pending",
|
366
|
+
"message": "",
|
367
|
+
"timestamp": datetime.now().isoformat(),
|
368
|
+
}
|
369
|
+
|
370
|
+
try:
|
371
|
+
check_name = remediation["check_name"]
|
372
|
+
|
373
|
+
if dry_run:
|
374
|
+
action_result["status"] = "success"
|
375
|
+
action_result["message"] = f"DRY RUN: Would execute {remediation['automation_method']}"
|
376
|
+
return action_result
|
377
|
+
|
378
|
+
# Execute specific remediation based on check type
|
379
|
+
if check_name == "iam_password_policy":
|
380
|
+
self._remediate_iam_password_policy()
|
381
|
+
elif check_name == "guardduty_enabled":
|
382
|
+
self._remediate_guardduty_enabled()
|
383
|
+
elif check_name == "trail_enabled":
|
384
|
+
self._remediate_cloudtrail_enabled()
|
385
|
+
elif check_name == "bucket_public_access":
|
386
|
+
self._remediate_bucket_public_access()
|
387
|
+
else:
|
388
|
+
action_result["status"] = "skipped"
|
389
|
+
action_result["message"] = f"Manual remediation required for {check_name}"
|
390
|
+
return action_result
|
391
|
+
|
392
|
+
action_result["status"] = "success"
|
393
|
+
action_result["message"] = f"Successfully remediated {check_name}"
|
394
|
+
|
395
|
+
except Exception as e:
|
396
|
+
action_result["status"] = "failed"
|
397
|
+
action_result["message"] = f"Failed to remediate {check_name}: {str(e)}"
|
398
|
+
logging.error(f"Remediation failed for {check_name}: {str(e)}")
|
399
|
+
|
400
|
+
return action_result
|
401
|
+
|
402
|
+
def _remediate_iam_password_policy(self):
|
403
|
+
"""Automatically configure IAM password policy."""
|
404
|
+
iam_client = self.session.client("iam")
|
405
|
+
|
406
|
+
# Enterprise-grade password policy
|
407
|
+
password_policy = {
|
408
|
+
"MinimumPasswordLength": 14,
|
409
|
+
"RequireSymbols": True,
|
410
|
+
"RequireNumbers": True,
|
411
|
+
"RequireUppercaseCharacters": True,
|
412
|
+
"RequireLowercaseCharacters": True,
|
413
|
+
"AllowUsersToChangePassword": True,
|
414
|
+
"MaxPasswordAge": 90,
|
415
|
+
"PasswordReusePrevention": 12,
|
416
|
+
"HardExpiry": False,
|
417
|
+
}
|
418
|
+
|
419
|
+
iam_client.update_account_password_policy(**password_policy)
|
420
|
+
logging.info("Successfully updated IAM password policy")
|
421
|
+
|
422
|
+
def _remediate_guardduty_enabled(self):
|
423
|
+
"""Automatically enable GuardDuty."""
|
424
|
+
guardduty_client = self.session.client("guardduty")
|
425
|
+
|
426
|
+
try:
|
427
|
+
# Check if GuardDuty is already enabled
|
428
|
+
response = guardduty_client.list_detectors()
|
429
|
+
if response["DetectorIds"]:
|
430
|
+
logging.info("GuardDuty already enabled")
|
431
|
+
return
|
432
|
+
except ClientError:
|
433
|
+
pass
|
434
|
+
|
435
|
+
# Enable GuardDuty
|
436
|
+
response = guardduty_client.create_detector(Enable=True, FindingPublishingFrequency="FIFTEEN_MINUTES")
|
437
|
+
logging.info(f"Successfully enabled GuardDuty with detector ID: {response['DetectorId']}")
|
438
|
+
|
439
|
+
def _remediate_cloudtrail_enabled(self):
|
440
|
+
"""Automatically enable CloudTrail."""
|
441
|
+
cloudtrail_client = self.session.client("cloudtrail")
|
442
|
+
|
443
|
+
# Check if any trails exist
|
444
|
+
response = cloudtrail_client.list_trails()
|
445
|
+
if response["Trails"]:
|
446
|
+
logging.info("CloudTrail already configured")
|
447
|
+
return
|
448
|
+
|
449
|
+
# Create S3 bucket for CloudTrail logs (simplified for demo)
|
450
|
+
trail_name = f"enterprise-security-trail-{datetime.now().strftime('%Y%m%d')}"
|
451
|
+
|
452
|
+
# Note: In production, this would need proper S3 bucket setup
|
453
|
+
logging.info(f"Would create CloudTrail: {trail_name}")
|
454
|
+
|
455
|
+
def _remediate_bucket_public_access(self):
|
456
|
+
"""Automatically block public access on S3 buckets."""
|
457
|
+
s3_client = self.session.client("s3")
|
458
|
+
|
459
|
+
# Enable account-level public access block
|
460
|
+
try:
|
461
|
+
s3_client.put_public_access_block(
|
462
|
+
PublicAccessBlockConfiguration={
|
463
|
+
"BlockPublicAcls": True,
|
464
|
+
"IgnorePublicAcls": True,
|
465
|
+
"BlockPublicPolicy": True,
|
466
|
+
"RestrictPublicBuckets": True,
|
467
|
+
}
|
468
|
+
)
|
469
|
+
logging.info("Successfully enabled account-level public access block")
|
470
|
+
except ClientError as e:
|
471
|
+
logging.warning(f"Could not set account-level public access block: {str(e)}")
|
472
|
+
|
473
|
+
def generate_compliance_dashboard(self, assessment_results: Dict[str, Any], format: str = "html") -> str:
|
474
|
+
"""
|
475
|
+
Generate executive compliance dashboard.
|
476
|
+
|
477
|
+
Args:
|
478
|
+
assessment_results: Results from assess_compliance_status()
|
479
|
+
format: Output format (html, json, csv)
|
480
|
+
|
481
|
+
Returns:
|
482
|
+
Path to generated dashboard file
|
483
|
+
"""
|
484
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
485
|
+
|
486
|
+
if format == "html":
|
487
|
+
dashboard_content = self._generate_html_dashboard(assessment_results)
|
488
|
+
filename = f"compliance_dashboard_{timestamp}.html"
|
489
|
+
elif format == "json":
|
490
|
+
dashboard_content = json.dumps(assessment_results, indent=2)
|
491
|
+
filename = f"compliance_dashboard_{timestamp}.json"
|
492
|
+
else:
|
493
|
+
raise ValueError(f"Unsupported format: {format}")
|
494
|
+
|
495
|
+
dashboard_path = f"/tmp/{filename}"
|
496
|
+
with open(dashboard_path, "w") as f:
|
497
|
+
f.write(dashboard_content)
|
498
|
+
|
499
|
+
logging.info(f"Generated compliance dashboard: {dashboard_path}")
|
500
|
+
return dashboard_path
|
501
|
+
|
502
|
+
def _generate_html_dashboard(self, results: Dict[str, Any]) -> str:
|
503
|
+
"""Generate HTML compliance dashboard."""
|
504
|
+
score = results["compliance_score"]
|
505
|
+
|
506
|
+
# Determine overall status and color
|
507
|
+
if score["overall_compliance"] >= 90:
|
508
|
+
status_color = "#28a745"
|
509
|
+
status_text = "COMPLIANT"
|
510
|
+
elif score["overall_compliance"] >= 75:
|
511
|
+
status_color = "#ffc107"
|
512
|
+
status_text = "PARTIALLY COMPLIANT"
|
513
|
+
else:
|
514
|
+
status_color = "#dc3545"
|
515
|
+
status_text = "NON-COMPLIANT"
|
516
|
+
|
517
|
+
html_template = f"""
|
518
|
+
<!DOCTYPE html>
|
519
|
+
<html>
|
520
|
+
<head>
|
521
|
+
<title>Enterprise Security Compliance Dashboard</title>
|
522
|
+
<style>
|
523
|
+
body {{ font-family: Arial, sans-serif; margin: 20px; }}
|
524
|
+
.header {{ background-color: #f8f9fa; padding: 20px; border-radius: 8px; }}
|
525
|
+
.score-card {{ background-color: white; border: 2px solid {status_color};
|
526
|
+
padding: 20px; border-radius: 8px; margin: 20px 0; }}
|
527
|
+
.metric {{ display: inline-block; margin: 10px 20px; }}
|
528
|
+
.metric-value {{ font-size: 24px; font-weight: bold; color: {status_color}; }}
|
529
|
+
.metric-label {{ font-size: 14px; color: #666; }}
|
530
|
+
.recommendations {{ background-color: #f8f9fa; padding: 15px; border-radius: 8px; }}
|
531
|
+
.critical-finding {{ color: #dc3545; font-weight: bold; }}
|
532
|
+
.framework-info {{ color: #666; font-size: 12px; }}
|
533
|
+
</style>
|
534
|
+
</head>
|
535
|
+
<body>
|
536
|
+
<div class="header">
|
537
|
+
<h1>Enterprise Security Compliance Dashboard</h1>
|
538
|
+
<p class="framework-info">
|
539
|
+
Framework: {results["framework"]["name"]} v{results["framework"]["version"]}<br>
|
540
|
+
Assessment Date: {results["assessment_timestamp"]}<br>
|
541
|
+
Account ID: {results["account_id"]}
|
542
|
+
</p>
|
543
|
+
</div>
|
544
|
+
|
545
|
+
<div class="score-card">
|
546
|
+
<h2 style="color: {status_color};">Overall Status: {status_text}</h2>
|
547
|
+
|
548
|
+
<div class="metric">
|
549
|
+
<div class="metric-value">{score["overall_compliance"]:.1f}%</div>
|
550
|
+
<div class="metric-label">Overall Compliance</div>
|
551
|
+
</div>
|
552
|
+
|
553
|
+
<div class="metric">
|
554
|
+
<div class="metric-value">{score["critical_compliance"]:.1f}%</div>
|
555
|
+
<div class="metric-label">Critical Controls</div>
|
556
|
+
</div>
|
557
|
+
|
558
|
+
<div class="metric">
|
559
|
+
<div class="metric-value">{score["failed_requirements"]}</div>
|
560
|
+
<div class="metric-label">Failed Requirements</div>
|
561
|
+
</div>
|
562
|
+
|
563
|
+
<div class="metric">
|
564
|
+
<div class="metric-value">{score["critical_findings"]}</div>
|
565
|
+
<div class="metric-label">Critical Findings</div>
|
566
|
+
</div>
|
567
|
+
</div>
|
568
|
+
|
569
|
+
<div class="recommendations">
|
570
|
+
<h3>Automated Remediation Plan</h3>
|
571
|
+
<p>Found {len(results["recommendations"])} automation opportunities:</p>
|
572
|
+
<ul>
|
573
|
+
"""
|
574
|
+
|
575
|
+
for rec in results["recommendations"][:5]: # Show top 5 recommendations
|
576
|
+
html_template += f"""
|
577
|
+
<li>
|
578
|
+
<strong>{rec["requirement_id"]}</strong>: {rec["check_name"]}
|
579
|
+
(Priority {rec["priority"]}, Est. {rec["estimated_time"]})
|
580
|
+
<br><small>Method: {rec["automation_method"]} | Risk: {rec["risk_level"]}</small>
|
581
|
+
</li>
|
582
|
+
"""
|
583
|
+
|
584
|
+
html_template += """
|
585
|
+
</ul>
|
586
|
+
</div>
|
587
|
+
|
588
|
+
<footer style="margin-top: 40px; color: #666; font-size: 12px;">
|
589
|
+
Generated by CloudOps Enterprise Security Compliance Automation
|
590
|
+
</footer>
|
591
|
+
</body>
|
592
|
+
</html>
|
593
|
+
"""
|
594
|
+
return html_template
|
595
|
+
|
596
|
+
|
597
|
+
def main():
|
598
|
+
"""Main entry point for compliance automation testing."""
|
599
|
+
compliance = ComplianceAutomation()
|
600
|
+
|
601
|
+
# Run compliance assessment
|
602
|
+
results = compliance.assess_compliance_status(framework="enterprise_baseline", language="en")
|
603
|
+
|
604
|
+
# Generate dashboard
|
605
|
+
dashboard_path = compliance.generate_compliance_dashboard(results, format="html")
|
606
|
+
|
607
|
+
# Execute automated remediation (dry run)
|
608
|
+
if results["recommendations"]:
|
609
|
+
remediation_results = compliance.automate_compliance_remediation(results["recommendations"], dry_run=True)
|
610
|
+
|
611
|
+
# Import Rich utilities for professional output
|
612
|
+
from runbooks.common.rich_utils import console, create_panel
|
613
|
+
|
614
|
+
# Display professional compliance assessment results
|
615
|
+
compliance_summary = f"""
|
616
|
+
[bold cyan]Security Compliance Assessment Results[/bold cyan]
|
617
|
+
|
618
|
+
[green]Overall Compliance Score:[/green] {results['compliance_score']['overall_compliance']:.1f}%
|
619
|
+
[green]Critical Controls Score:[/green] {results['compliance_score']['critical_compliance']:.1f}%
|
620
|
+
[yellow]Remediation Actions Required:[/yellow] {len(results['recommendations'])}
|
621
|
+
[blue]Dashboard Location:[/blue] {dashboard_path}
|
622
|
+
"""
|
623
|
+
|
624
|
+
console.print(create_panel(
|
625
|
+
compliance_summary.strip(),
|
626
|
+
title="🛡️ Compliance Assessment Complete",
|
627
|
+
border_style="green" if results['compliance_score']['overall_compliance'] > 80 else "yellow"
|
628
|
+
))
|
629
|
+
|
630
|
+
|
631
|
+
if __name__ == "__main__":
|
632
|
+
main()
|
@@ -6,6 +6,14 @@ from string import Template
|
|
6
6
|
from jinja2 import Template
|
7
7
|
|
8
8
|
from runbooks.utils.logger import configure_logger
|
9
|
+
from runbooks.common.rich_utils import (
|
10
|
+
console,
|
11
|
+
create_panel,
|
12
|
+
print_error,
|
13
|
+
print_info,
|
14
|
+
print_success,
|
15
|
+
print_warning,
|
16
|
+
)
|
9
17
|
|
10
18
|
from .utils import language, level_const
|
11
19
|
|
@@ -52,6 +60,8 @@ class ReportGenerator:
|
|
52
60
|
|
53
61
|
## Attempt to read the template file
|
54
62
|
if not template_path.is_file():
|
63
|
+
error_msg = f"Template file '{template_filename}' for language '{lang}' not found at {template_path}"
|
64
|
+
print_error(error_msg)
|
55
65
|
logger.error(
|
56
66
|
"Template file '%s' for language '%s' not found at %s",
|
57
67
|
template_filename,
|