aws-cis-controls-assessment 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_cis_assessment/__init__.py +11 -0
- aws_cis_assessment/cli/__init__.py +3 -0
- aws_cis_assessment/cli/examples.py +274 -0
- aws_cis_assessment/cli/main.py +1259 -0
- aws_cis_assessment/cli/utils.py +356 -0
- aws_cis_assessment/config/__init__.py +1 -0
- aws_cis_assessment/config/config_loader.py +328 -0
- aws_cis_assessment/config/rules/cis_controls_ig1.yaml +590 -0
- aws_cis_assessment/config/rules/cis_controls_ig2.yaml +412 -0
- aws_cis_assessment/config/rules/cis_controls_ig3.yaml +100 -0
- aws_cis_assessment/controls/__init__.py +1 -0
- aws_cis_assessment/controls/base_control.py +400 -0
- aws_cis_assessment/controls/ig1/__init__.py +239 -0
- aws_cis_assessment/controls/ig1/control_1_1.py +586 -0
- aws_cis_assessment/controls/ig1/control_2_2.py +231 -0
- aws_cis_assessment/controls/ig1/control_3_3.py +718 -0
- aws_cis_assessment/controls/ig1/control_3_4.py +235 -0
- aws_cis_assessment/controls/ig1/control_4_1.py +461 -0
- aws_cis_assessment/controls/ig1/control_access_keys.py +310 -0
- aws_cis_assessment/controls/ig1/control_advanced_security.py +512 -0
- aws_cis_assessment/controls/ig1/control_backup_recovery.py +510 -0
- aws_cis_assessment/controls/ig1/control_cloudtrail_logging.py +197 -0
- aws_cis_assessment/controls/ig1/control_critical_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_data_protection.py +898 -0
- aws_cis_assessment/controls/ig1/control_iam_advanced.py +573 -0
- aws_cis_assessment/controls/ig1/control_iam_governance.py +493 -0
- aws_cis_assessment/controls/ig1/control_iam_policies.py +383 -0
- aws_cis_assessment/controls/ig1/control_instance_optimization.py +100 -0
- aws_cis_assessment/controls/ig1/control_network_enhancements.py +203 -0
- aws_cis_assessment/controls/ig1/control_network_security.py +672 -0
- aws_cis_assessment/controls/ig1/control_s3_enhancements.py +173 -0
- aws_cis_assessment/controls/ig1/control_s3_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_vpc_security.py +235 -0
- aws_cis_assessment/controls/ig2/__init__.py +172 -0
- aws_cis_assessment/controls/ig2/control_3_10.py +698 -0
- aws_cis_assessment/controls/ig2/control_3_11.py +1330 -0
- aws_cis_assessment/controls/ig2/control_5_2.py +393 -0
- aws_cis_assessment/controls/ig2/control_advanced_encryption.py +355 -0
- aws_cis_assessment/controls/ig2/control_codebuild_security.py +263 -0
- aws_cis_assessment/controls/ig2/control_encryption_rest.py +382 -0
- aws_cis_assessment/controls/ig2/control_encryption_transit.py +382 -0
- aws_cis_assessment/controls/ig2/control_network_ha.py +467 -0
- aws_cis_assessment/controls/ig2/control_remaining_encryption.py +426 -0
- aws_cis_assessment/controls/ig2/control_remaining_rules.py +363 -0
- aws_cis_assessment/controls/ig2/control_service_logging.py +402 -0
- aws_cis_assessment/controls/ig3/__init__.py +49 -0
- aws_cis_assessment/controls/ig3/control_12_8.py +395 -0
- aws_cis_assessment/controls/ig3/control_13_1.py +467 -0
- aws_cis_assessment/controls/ig3/control_3_14.py +523 -0
- aws_cis_assessment/controls/ig3/control_7_1.py +359 -0
- aws_cis_assessment/core/__init__.py +1 -0
- aws_cis_assessment/core/accuracy_validator.py +425 -0
- aws_cis_assessment/core/assessment_engine.py +1266 -0
- aws_cis_assessment/core/audit_trail.py +491 -0
- aws_cis_assessment/core/aws_client_factory.py +313 -0
- aws_cis_assessment/core/error_handler.py +607 -0
- aws_cis_assessment/core/models.py +166 -0
- aws_cis_assessment/core/scoring_engine.py +459 -0
- aws_cis_assessment/reporters/__init__.py +8 -0
- aws_cis_assessment/reporters/base_reporter.py +454 -0
- aws_cis_assessment/reporters/csv_reporter.py +835 -0
- aws_cis_assessment/reporters/html_reporter.py +2162 -0
- aws_cis_assessment/reporters/json_reporter.py +561 -0
- aws_cis_controls_assessment-1.0.3.dist-info/METADATA +248 -0
- aws_cis_controls_assessment-1.0.3.dist-info/RECORD +77 -0
- aws_cis_controls_assessment-1.0.3.dist-info/WHEEL +5 -0
- aws_cis_controls_assessment-1.0.3.dist-info/entry_points.txt +2 -0
- aws_cis_controls_assessment-1.0.3.dist-info/licenses/LICENSE +21 -0
- aws_cis_controls_assessment-1.0.3.dist-info/top_level.txt +2 -0
- docs/README.md +94 -0
- docs/assessment-logic.md +766 -0
- docs/cli-reference.md +698 -0
- docs/config-rule-mappings.md +393 -0
- docs/developer-guide.md +858 -0
- docs/installation.md +299 -0
- docs/troubleshooting.md +634 -0
- docs/user-guide.md +487 -0
|
@@ -0,0 +1,835 @@
|
|
|
1
|
+
"""CSV Reporter for CIS Controls compliance assessment reports."""
|
|
2
|
+
|
|
3
|
+
import csv
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from io import StringIO
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
from aws_cis_assessment.reporters.base_reporter import ReportGenerator
|
|
11
|
+
from aws_cis_assessment.core.models import (
|
|
12
|
+
AssessmentResult, ComplianceSummary, RemediationGuidance,
|
|
13
|
+
IGScore, ControlScore, ComplianceResult
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class CSVReporter(ReportGenerator):
|
|
20
|
+
"""CSV format reporter for compliance assessment results.
|
|
21
|
+
|
|
22
|
+
Generates spreadsheet-compatible output with flat structure suitable
|
|
23
|
+
for data analysis and filtering. Supports multiple CSV files for
|
|
24
|
+
summary, detailed findings, and remediation guidance with proper
|
|
25
|
+
escaping and formatting for Excel compatibility.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, template_dir: Optional[str] = None,
|
|
29
|
+
generate_multiple_files: bool = True,
|
|
30
|
+
excel_compatible: bool = True):
|
|
31
|
+
"""Initialize CSV reporter.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
template_dir: Optional path to custom report templates
|
|
35
|
+
generate_multiple_files: Whether to generate separate CSV files for different data types
|
|
36
|
+
excel_compatible: Whether to use Excel-compatible formatting
|
|
37
|
+
"""
|
|
38
|
+
super().__init__(template_dir)
|
|
39
|
+
self.generate_multiple_files = generate_multiple_files
|
|
40
|
+
self.excel_compatible = excel_compatible
|
|
41
|
+
self.csv_dialect = 'excel' if excel_compatible else 'unix'
|
|
42
|
+
logger.info(f"Initialized CSVReporter with multiple_files={generate_multiple_files}, excel_compatible={excel_compatible}")
|
|
43
|
+
|
|
44
|
+
def generate_report(self, assessment_result: AssessmentResult,
|
|
45
|
+
compliance_summary: ComplianceSummary,
|
|
46
|
+
output_path: Optional[str] = None) -> str:
|
|
47
|
+
"""Generate CSV format compliance assessment report.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
assessment_result: Complete assessment result data
|
|
51
|
+
compliance_summary: Executive summary of compliance status
|
|
52
|
+
output_path: Optional path to save the CSV report(s)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
CSV formatted report content as string (summary CSV if multiple files)
|
|
56
|
+
"""
|
|
57
|
+
logger.info(f"Generating CSV report for account {assessment_result.account_id}")
|
|
58
|
+
|
|
59
|
+
# Validate input data
|
|
60
|
+
if not self.validate_assessment_data(assessment_result, compliance_summary):
|
|
61
|
+
logger.error("Assessment data validation failed")
|
|
62
|
+
return ""
|
|
63
|
+
|
|
64
|
+
# Prepare structured report data
|
|
65
|
+
report_data = self._prepare_report_data(assessment_result, compliance_summary)
|
|
66
|
+
|
|
67
|
+
# Validate prepared data
|
|
68
|
+
if not self._validate_report_data(report_data):
|
|
69
|
+
logger.error("Report data validation failed")
|
|
70
|
+
return ""
|
|
71
|
+
|
|
72
|
+
# Enhance CSV-specific data structure
|
|
73
|
+
csv_report_data = self._enhance_csv_structure(report_data)
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
if self.generate_multiple_files:
|
|
77
|
+
# Generate multiple CSV files
|
|
78
|
+
csv_files = self._generate_multiple_csv_files(csv_report_data)
|
|
79
|
+
|
|
80
|
+
# Save files if output path provided
|
|
81
|
+
if output_path:
|
|
82
|
+
self._save_multiple_csv_files(csv_files, output_path)
|
|
83
|
+
|
|
84
|
+
# Return summary CSV content
|
|
85
|
+
return csv_files.get('summary', '')
|
|
86
|
+
else:
|
|
87
|
+
# Generate single comprehensive CSV
|
|
88
|
+
csv_content = self._generate_single_csv_file(csv_report_data)
|
|
89
|
+
|
|
90
|
+
# Save to file if path provided
|
|
91
|
+
if output_path:
|
|
92
|
+
if self._save_report_to_file(csv_content, output_path):
|
|
93
|
+
logger.info(f"CSV report saved to {output_path}")
|
|
94
|
+
else:
|
|
95
|
+
logger.error(f"Failed to save CSV report to {output_path}")
|
|
96
|
+
|
|
97
|
+
return csv_content
|
|
98
|
+
|
|
99
|
+
except Exception as e:
|
|
100
|
+
logger.error(f"Failed to generate CSV report: {e}")
|
|
101
|
+
return ""
|
|
102
|
+
|
|
103
|
+
def get_supported_formats(self) -> List[str]:
|
|
104
|
+
"""Get list of supported output formats.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
List containing 'csv' format
|
|
108
|
+
"""
|
|
109
|
+
return ['csv']
|
|
110
|
+
|
|
111
|
+
def _enhance_csv_structure(self, report_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
112
|
+
"""Enhance report data structure for CSV-specific requirements.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
report_data: Base report data from parent class
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Enhanced data structure optimized for CSV output
|
|
119
|
+
"""
|
|
120
|
+
# Create enhanced CSV structure
|
|
121
|
+
csv_data = {
|
|
122
|
+
"report_format": "csv",
|
|
123
|
+
"report_version": "1.0",
|
|
124
|
+
"excel_compatible": self.excel_compatible,
|
|
125
|
+
**report_data
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
# Add CSV-specific metadata
|
|
129
|
+
csv_data["metadata"]["report_format"] = "csv"
|
|
130
|
+
csv_data["metadata"]["flat_structure"] = True
|
|
131
|
+
csv_data["metadata"]["multiple_files"] = self.generate_multiple_files
|
|
132
|
+
|
|
133
|
+
# Flatten nested structures for CSV compatibility
|
|
134
|
+
csv_data["flattened_data"] = self._flatten_data_structures(csv_data)
|
|
135
|
+
|
|
136
|
+
return csv_data
|
|
137
|
+
|
|
138
|
+
def _flatten_data_structures(self, csv_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
139
|
+
"""Flatten nested data structures for CSV compatibility.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
csv_data: Enhanced CSV report data
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Dictionary containing flattened data structures
|
|
146
|
+
"""
|
|
147
|
+
flattened = {}
|
|
148
|
+
|
|
149
|
+
# Flatten executive summary
|
|
150
|
+
exec_summary = csv_data["executive_summary"]
|
|
151
|
+
flattened["summary_records"] = [{
|
|
152
|
+
"metric": "Overall Compliance",
|
|
153
|
+
"value": exec_summary.get("overall_compliance_percentage", 0),
|
|
154
|
+
"unit": "percentage",
|
|
155
|
+
"category": "executive_summary"
|
|
156
|
+
}, {
|
|
157
|
+
"metric": "IG1 Compliance",
|
|
158
|
+
"value": exec_summary.get("ig1_compliance_percentage", 0),
|
|
159
|
+
"unit": "percentage",
|
|
160
|
+
"category": "implementation_group"
|
|
161
|
+
}, {
|
|
162
|
+
"metric": "IG2 Compliance",
|
|
163
|
+
"value": exec_summary.get("ig2_compliance_percentage", 0),
|
|
164
|
+
"unit": "percentage",
|
|
165
|
+
"category": "implementation_group"
|
|
166
|
+
}, {
|
|
167
|
+
"metric": "IG3 Compliance",
|
|
168
|
+
"value": exec_summary.get("ig3_compliance_percentage", 0),
|
|
169
|
+
"unit": "percentage",
|
|
170
|
+
"category": "implementation_group"
|
|
171
|
+
}, {
|
|
172
|
+
"metric": "Total Resources",
|
|
173
|
+
"value": exec_summary.get("total_resources", 0),
|
|
174
|
+
"unit": "count",
|
|
175
|
+
"category": "resource_summary"
|
|
176
|
+
}, {
|
|
177
|
+
"metric": "Compliant Resources",
|
|
178
|
+
"value": exec_summary.get("compliant_resources", 0),
|
|
179
|
+
"unit": "count",
|
|
180
|
+
"category": "resource_summary"
|
|
181
|
+
}, {
|
|
182
|
+
"metric": "Non-Compliant Resources",
|
|
183
|
+
"value": exec_summary.get("non_compliant_resources", 0),
|
|
184
|
+
"unit": "count",
|
|
185
|
+
"category": "resource_summary"
|
|
186
|
+
}]
|
|
187
|
+
|
|
188
|
+
# Flatten Implementation Group data
|
|
189
|
+
flattened["ig_records"] = []
|
|
190
|
+
for ig_name, ig_data in csv_data["implementation_groups"].items():
|
|
191
|
+
flattened["ig_records"].append({
|
|
192
|
+
"implementation_group": ig_name,
|
|
193
|
+
"total_controls": ig_data["total_controls"],
|
|
194
|
+
"compliant_controls": ig_data["compliant_controls"],
|
|
195
|
+
"compliance_percentage": ig_data["compliance_percentage"],
|
|
196
|
+
"controls_at_risk": ig_data["total_controls"] - ig_data["compliant_controls"]
|
|
197
|
+
})
|
|
198
|
+
|
|
199
|
+
# Flatten Control data
|
|
200
|
+
flattened["control_records"] = []
|
|
201
|
+
for ig_name, ig_data in csv_data["implementation_groups"].items():
|
|
202
|
+
for control_id, control_data in ig_data["controls"].items():
|
|
203
|
+
flattened["control_records"].append({
|
|
204
|
+
"implementation_group": ig_name,
|
|
205
|
+
"control_id": control_id,
|
|
206
|
+
"control_title": control_data.get("title", ""),
|
|
207
|
+
"total_resources": control_data["total_resources"],
|
|
208
|
+
"compliant_resources": control_data["compliant_resources"],
|
|
209
|
+
"compliance_percentage": control_data["compliance_percentage"],
|
|
210
|
+
"non_compliant_count": len(control_data.get("non_compliant_findings", [])),
|
|
211
|
+
"config_rules_evaluated": "; ".join(control_data["config_rules_evaluated"]),
|
|
212
|
+
"findings_count": control_data["findings_count"]
|
|
213
|
+
})
|
|
214
|
+
|
|
215
|
+
# Flatten detailed findings
|
|
216
|
+
flattened["findings_records"] = []
|
|
217
|
+
for ig_name, ig_findings in csv_data["detailed_findings"].items():
|
|
218
|
+
for control_id, control_findings in ig_findings.items():
|
|
219
|
+
for finding in control_findings:
|
|
220
|
+
flattened["findings_records"].append({
|
|
221
|
+
"implementation_group": ig_name,
|
|
222
|
+
"control_id": control_id,
|
|
223
|
+
"resource_id": finding["resource_id"],
|
|
224
|
+
"resource_type": finding["resource_type"],
|
|
225
|
+
"compliance_status": finding["compliance_status"],
|
|
226
|
+
"evaluation_reason": finding["evaluation_reason"],
|
|
227
|
+
"config_rule_name": finding["config_rule_name"],
|
|
228
|
+
"region": finding["region"],
|
|
229
|
+
"timestamp": finding["timestamp"],
|
|
230
|
+
"remediation_guidance": finding.get("remediation_guidance", "")
|
|
231
|
+
})
|
|
232
|
+
|
|
233
|
+
# Flatten remediation guidance
|
|
234
|
+
flattened["remediation_records"] = []
|
|
235
|
+
for remediation in csv_data["remediation_priorities"]:
|
|
236
|
+
flattened["remediation_records"].append({
|
|
237
|
+
"config_rule_name": remediation["config_rule_name"],
|
|
238
|
+
"control_id": remediation["control_id"],
|
|
239
|
+
"priority": remediation["priority"],
|
|
240
|
+
"estimated_effort": remediation["estimated_effort"],
|
|
241
|
+
"remediation_steps": " | ".join(remediation["remediation_steps"]),
|
|
242
|
+
"aws_documentation_link": remediation["aws_documentation_link"]
|
|
243
|
+
})
|
|
244
|
+
|
|
245
|
+
return flattened
|
|
246
|
+
|
|
247
|
+
def _generate_multiple_csv_files(self, csv_data: Dict[str, Any]) -> Dict[str, str]:
|
|
248
|
+
"""Generate multiple CSV files for different data types.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
csv_data: Enhanced CSV report data
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Dictionary containing CSV content for different file types
|
|
255
|
+
"""
|
|
256
|
+
csv_files = {}
|
|
257
|
+
|
|
258
|
+
# Generate summary CSV
|
|
259
|
+
csv_files["summary"] = self._generate_summary_csv(csv_data)
|
|
260
|
+
|
|
261
|
+
# Generate detailed findings CSV
|
|
262
|
+
csv_files["findings"] = self._generate_findings_csv(csv_data)
|
|
263
|
+
|
|
264
|
+
# Generate remediation CSV
|
|
265
|
+
csv_files["remediation"] = self._generate_remediation_csv(csv_data)
|
|
266
|
+
|
|
267
|
+
# Generate controls CSV
|
|
268
|
+
csv_files["controls"] = self._generate_controls_csv(csv_data)
|
|
269
|
+
|
|
270
|
+
# Generate implementation groups CSV
|
|
271
|
+
csv_files["implementation_groups"] = self._generate_ig_csv(csv_data)
|
|
272
|
+
|
|
273
|
+
logger.info(f"Generated {len(csv_files)} CSV files")
|
|
274
|
+
return csv_files
|
|
275
|
+
|
|
276
|
+
def _generate_summary_csv(self, csv_data: Dict[str, Any]) -> str:
|
|
277
|
+
"""Generate summary CSV content.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
csv_data: Enhanced CSV report data
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
Summary CSV content as string
|
|
284
|
+
"""
|
|
285
|
+
output = StringIO()
|
|
286
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
287
|
+
|
|
288
|
+
# Write header
|
|
289
|
+
writer.writerow([
|
|
290
|
+
"Metric",
|
|
291
|
+
"Value",
|
|
292
|
+
"Unit",
|
|
293
|
+
"Category"
|
|
294
|
+
])
|
|
295
|
+
|
|
296
|
+
# Write summary records
|
|
297
|
+
for record in csv_data["flattened_data"]["summary_records"]:
|
|
298
|
+
writer.writerow([
|
|
299
|
+
record["metric"],
|
|
300
|
+
record["value"],
|
|
301
|
+
record["unit"],
|
|
302
|
+
record["category"]
|
|
303
|
+
])
|
|
304
|
+
|
|
305
|
+
# Add metadata section
|
|
306
|
+
writer.writerow([]) # Empty row
|
|
307
|
+
writer.writerow(["Metadata", "", "", ""])
|
|
308
|
+
writer.writerow(["Account ID", csv_data["metadata"].get("account_id", ""), "text", "metadata"])
|
|
309
|
+
writer.writerow(["Assessment Date", csv_data["metadata"].get("assessment_timestamp", ""), "datetime", "metadata"])
|
|
310
|
+
writer.writerow(["Report Generated", csv_data["metadata"].get("report_generated_at", ""), "datetime", "metadata"])
|
|
311
|
+
writer.writerow(["Regions Assessed", "; ".join(csv_data["metadata"].get("regions_assessed", [])), "text", "metadata"])
|
|
312
|
+
writer.writerow(["Assessment Duration", csv_data["metadata"].get("assessment_duration", ""), "text", "metadata"])
|
|
313
|
+
|
|
314
|
+
content = output.getvalue()
|
|
315
|
+
output.close()
|
|
316
|
+
return content
|
|
317
|
+
|
|
318
|
+
def _generate_findings_csv(self, csv_data: Dict[str, Any]) -> str:
|
|
319
|
+
"""Generate detailed findings CSV content.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
csv_data: Enhanced CSV report data
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Findings CSV content as string
|
|
326
|
+
"""
|
|
327
|
+
output = StringIO()
|
|
328
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
329
|
+
|
|
330
|
+
# Write header
|
|
331
|
+
writer.writerow([
|
|
332
|
+
"Implementation Group",
|
|
333
|
+
"Control ID",
|
|
334
|
+
"Resource ID",
|
|
335
|
+
"Resource Type",
|
|
336
|
+
"Compliance Status",
|
|
337
|
+
"Evaluation Reason",
|
|
338
|
+
"Config Rule Name",
|
|
339
|
+
"Region",
|
|
340
|
+
"Timestamp",
|
|
341
|
+
"Remediation Guidance"
|
|
342
|
+
])
|
|
343
|
+
|
|
344
|
+
# Write findings records
|
|
345
|
+
for record in csv_data["flattened_data"]["findings_records"]:
|
|
346
|
+
writer.writerow([
|
|
347
|
+
record["implementation_group"],
|
|
348
|
+
record["control_id"],
|
|
349
|
+
record["resource_id"],
|
|
350
|
+
record["resource_type"],
|
|
351
|
+
record["compliance_status"],
|
|
352
|
+
record["evaluation_reason"],
|
|
353
|
+
record["config_rule_name"],
|
|
354
|
+
record["region"],
|
|
355
|
+
record["timestamp"],
|
|
356
|
+
record["remediation_guidance"]
|
|
357
|
+
])
|
|
358
|
+
|
|
359
|
+
content = output.getvalue()
|
|
360
|
+
output.close()
|
|
361
|
+
return content
|
|
362
|
+
|
|
363
|
+
def _generate_remediation_csv(self, csv_data: Dict[str, Any]) -> str:
|
|
364
|
+
"""Generate remediation guidance CSV content.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
csv_data: Enhanced CSV report data
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Remediation CSV content as string
|
|
371
|
+
"""
|
|
372
|
+
output = StringIO()
|
|
373
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
374
|
+
|
|
375
|
+
# Write header
|
|
376
|
+
writer.writerow([
|
|
377
|
+
"Config Rule Name",
|
|
378
|
+
"Control ID",
|
|
379
|
+
"Priority",
|
|
380
|
+
"Estimated Effort",
|
|
381
|
+
"Remediation Steps",
|
|
382
|
+
"AWS Documentation Link"
|
|
383
|
+
])
|
|
384
|
+
|
|
385
|
+
# Write remediation records
|
|
386
|
+
for record in csv_data["flattened_data"]["remediation_records"]:
|
|
387
|
+
writer.writerow([
|
|
388
|
+
record["config_rule_name"],
|
|
389
|
+
record["control_id"],
|
|
390
|
+
record["priority"],
|
|
391
|
+
record["estimated_effort"],
|
|
392
|
+
record["remediation_steps"],
|
|
393
|
+
record["aws_documentation_link"]
|
|
394
|
+
])
|
|
395
|
+
|
|
396
|
+
content = output.getvalue()
|
|
397
|
+
output.close()
|
|
398
|
+
return content
|
|
399
|
+
|
|
400
|
+
def _generate_controls_csv(self, csv_data: Dict[str, Any]) -> str:
|
|
401
|
+
"""Generate controls CSV content.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
csv_data: Enhanced CSV report data
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Controls CSV content as string
|
|
408
|
+
"""
|
|
409
|
+
output = StringIO()
|
|
410
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
411
|
+
|
|
412
|
+
# Write header
|
|
413
|
+
writer.writerow([
|
|
414
|
+
"Implementation Group",
|
|
415
|
+
"Control ID",
|
|
416
|
+
"Control Title",
|
|
417
|
+
"Total Resources",
|
|
418
|
+
"Compliant Resources",
|
|
419
|
+
"Compliance Percentage",
|
|
420
|
+
"Non-Compliant Count",
|
|
421
|
+
"Config Rules Evaluated",
|
|
422
|
+
"Total Findings"
|
|
423
|
+
])
|
|
424
|
+
|
|
425
|
+
# Write control records
|
|
426
|
+
for record in csv_data["flattened_data"]["control_records"]:
|
|
427
|
+
writer.writerow([
|
|
428
|
+
record["implementation_group"],
|
|
429
|
+
record["control_id"],
|
|
430
|
+
record["control_title"],
|
|
431
|
+
record["total_resources"],
|
|
432
|
+
record["compliant_resources"],
|
|
433
|
+
record["compliance_percentage"],
|
|
434
|
+
record["non_compliant_count"],
|
|
435
|
+
record["config_rules_evaluated"],
|
|
436
|
+
record["findings_count"]
|
|
437
|
+
])
|
|
438
|
+
|
|
439
|
+
content = output.getvalue()
|
|
440
|
+
output.close()
|
|
441
|
+
return content
|
|
442
|
+
|
|
443
|
+
def _generate_ig_csv(self, csv_data: Dict[str, Any]) -> str:
|
|
444
|
+
"""Generate Implementation Groups CSV content.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
csv_data: Enhanced CSV report data
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
Implementation Groups CSV content as string
|
|
451
|
+
"""
|
|
452
|
+
output = StringIO()
|
|
453
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
454
|
+
|
|
455
|
+
# Write header
|
|
456
|
+
writer.writerow([
|
|
457
|
+
"Implementation Group",
|
|
458
|
+
"Total Controls",
|
|
459
|
+
"Compliant Controls",
|
|
460
|
+
"Compliance Percentage",
|
|
461
|
+
"Controls at Risk"
|
|
462
|
+
])
|
|
463
|
+
|
|
464
|
+
# Write IG records
|
|
465
|
+
for record in csv_data["flattened_data"]["ig_records"]:
|
|
466
|
+
writer.writerow([
|
|
467
|
+
record["implementation_group"],
|
|
468
|
+
record["total_controls"],
|
|
469
|
+
record["compliant_controls"],
|
|
470
|
+
record["compliance_percentage"],
|
|
471
|
+
record["controls_at_risk"]
|
|
472
|
+
])
|
|
473
|
+
|
|
474
|
+
content = output.getvalue()
|
|
475
|
+
output.close()
|
|
476
|
+
return content
|
|
477
|
+
|
|
478
|
+
def _generate_single_csv_file(self, csv_data: Dict[str, Any]) -> str:
|
|
479
|
+
"""Generate single comprehensive CSV file.
|
|
480
|
+
|
|
481
|
+
Args:
|
|
482
|
+
csv_data: Enhanced CSV report data
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
Comprehensive CSV content as string
|
|
486
|
+
"""
|
|
487
|
+
output = StringIO()
|
|
488
|
+
writer = csv.writer(output, dialect=self.csv_dialect)
|
|
489
|
+
|
|
490
|
+
# Write executive summary section
|
|
491
|
+
writer.writerow(["EXECUTIVE SUMMARY"])
|
|
492
|
+
writer.writerow([
|
|
493
|
+
"Metric",
|
|
494
|
+
"Value",
|
|
495
|
+
"Unit",
|
|
496
|
+
"Category"
|
|
497
|
+
])
|
|
498
|
+
|
|
499
|
+
for record in csv_data["flattened_data"]["summary_records"]:
|
|
500
|
+
writer.writerow([
|
|
501
|
+
record["metric"],
|
|
502
|
+
record["value"],
|
|
503
|
+
record["unit"],
|
|
504
|
+
record["category"]
|
|
505
|
+
])
|
|
506
|
+
|
|
507
|
+
# Add separator
|
|
508
|
+
writer.writerow([])
|
|
509
|
+
writer.writerow([])
|
|
510
|
+
|
|
511
|
+
# Write metadata section
|
|
512
|
+
writer.writerow(["METADATA"])
|
|
513
|
+
writer.writerow([
|
|
514
|
+
"Field",
|
|
515
|
+
"Value"
|
|
516
|
+
])
|
|
517
|
+
|
|
518
|
+
metadata = csv_data["metadata"]
|
|
519
|
+
writer.writerow(["Account ID", metadata.get("account_id", "")])
|
|
520
|
+
writer.writerow(["Assessment Date", metadata.get("assessment_timestamp", "")])
|
|
521
|
+
writer.writerow(["Report Generated", metadata.get("report_generated_at", "")])
|
|
522
|
+
writer.writerow(["Regions Assessed", "; ".join(metadata.get("regions_assessed", []))])
|
|
523
|
+
writer.writerow(["Assessment Duration", metadata.get("assessment_duration", "")])
|
|
524
|
+
writer.writerow(["Total Resources Evaluated", metadata.get("total_resources_evaluated", 0)])
|
|
525
|
+
|
|
526
|
+
# Add separator
|
|
527
|
+
writer.writerow([])
|
|
528
|
+
writer.writerow([])
|
|
529
|
+
|
|
530
|
+
# Write Implementation Groups section
|
|
531
|
+
writer.writerow(["IMPLEMENTATION GROUPS"])
|
|
532
|
+
writer.writerow([
|
|
533
|
+
"Implementation Group",
|
|
534
|
+
"Total Controls",
|
|
535
|
+
"Compliant Controls",
|
|
536
|
+
"Compliance Percentage",
|
|
537
|
+
"Controls at Risk"
|
|
538
|
+
])
|
|
539
|
+
|
|
540
|
+
for record in csv_data["flattened_data"]["ig_records"]:
|
|
541
|
+
writer.writerow([
|
|
542
|
+
record["implementation_group"],
|
|
543
|
+
record["total_controls"],
|
|
544
|
+
record["compliant_controls"],
|
|
545
|
+
record["compliance_percentage"],
|
|
546
|
+
record["controls_at_risk"]
|
|
547
|
+
])
|
|
548
|
+
|
|
549
|
+
# Add separator
|
|
550
|
+
writer.writerow([])
|
|
551
|
+
writer.writerow([])
|
|
552
|
+
|
|
553
|
+
# Write Controls section
|
|
554
|
+
writer.writerow(["CONTROLS"])
|
|
555
|
+
writer.writerow([
|
|
556
|
+
"Implementation Group",
|
|
557
|
+
"Control ID",
|
|
558
|
+
"Control Title",
|
|
559
|
+
"Total Resources",
|
|
560
|
+
"Compliant Resources",
|
|
561
|
+
"Compliance Percentage",
|
|
562
|
+
"Non-Compliant Count",
|
|
563
|
+
"Config Rules Evaluated",
|
|
564
|
+
"Total Findings"
|
|
565
|
+
])
|
|
566
|
+
|
|
567
|
+
for record in csv_data["flattened_data"]["control_records"]:
|
|
568
|
+
writer.writerow([
|
|
569
|
+
record["implementation_group"],
|
|
570
|
+
record["control_id"],
|
|
571
|
+
record["control_title"],
|
|
572
|
+
record["total_resources"],
|
|
573
|
+
record["compliant_resources"],
|
|
574
|
+
record["compliance_percentage"],
|
|
575
|
+
record["non_compliant_count"],
|
|
576
|
+
record["config_rules_evaluated"],
|
|
577
|
+
record["findings_count"]
|
|
578
|
+
])
|
|
579
|
+
|
|
580
|
+
# Add separator
|
|
581
|
+
writer.writerow([])
|
|
582
|
+
writer.writerow([])
|
|
583
|
+
|
|
584
|
+
# Write Detailed Findings section (limited to first 1000 for single file)
|
|
585
|
+
writer.writerow(["DETAILED FINDINGS (Limited to 1000 records)"])
|
|
586
|
+
writer.writerow([
|
|
587
|
+
"Implementation Group",
|
|
588
|
+
"Control ID",
|
|
589
|
+
"Resource ID",
|
|
590
|
+
"Resource Type",
|
|
591
|
+
"Compliance Status",
|
|
592
|
+
"Evaluation Reason",
|
|
593
|
+
"Config Rule Name",
|
|
594
|
+
"Region",
|
|
595
|
+
"Timestamp",
|
|
596
|
+
"Remediation Guidance"
|
|
597
|
+
])
|
|
598
|
+
|
|
599
|
+
findings_records = csv_data["flattened_data"]["findings_records"][:1000] # Limit for single file
|
|
600
|
+
for record in findings_records:
|
|
601
|
+
writer.writerow([
|
|
602
|
+
record["implementation_group"],
|
|
603
|
+
record["control_id"],
|
|
604
|
+
record["resource_id"],
|
|
605
|
+
record["resource_type"],
|
|
606
|
+
record["compliance_status"],
|
|
607
|
+
record["evaluation_reason"],
|
|
608
|
+
record["config_rule_name"],
|
|
609
|
+
record["region"],
|
|
610
|
+
record["timestamp"],
|
|
611
|
+
record["remediation_guidance"]
|
|
612
|
+
])
|
|
613
|
+
|
|
614
|
+
# Add separator
|
|
615
|
+
writer.writerow([])
|
|
616
|
+
writer.writerow([])
|
|
617
|
+
|
|
618
|
+
# Write Remediation section
|
|
619
|
+
writer.writerow(["REMEDIATION GUIDANCE"])
|
|
620
|
+
writer.writerow([
|
|
621
|
+
"Config Rule Name",
|
|
622
|
+
"Control ID",
|
|
623
|
+
"Priority",
|
|
624
|
+
"Estimated Effort",
|
|
625
|
+
"Remediation Steps",
|
|
626
|
+
"AWS Documentation Link"
|
|
627
|
+
])
|
|
628
|
+
|
|
629
|
+
for record in csv_data["flattened_data"]["remediation_records"]:
|
|
630
|
+
writer.writerow([
|
|
631
|
+
record["config_rule_name"],
|
|
632
|
+
record["control_id"],
|
|
633
|
+
record["priority"],
|
|
634
|
+
record["estimated_effort"],
|
|
635
|
+
record["remediation_steps"],
|
|
636
|
+
record["aws_documentation_link"]
|
|
637
|
+
])
|
|
638
|
+
|
|
639
|
+
content = output.getvalue()
|
|
640
|
+
output.close()
|
|
641
|
+
return content
|
|
642
|
+
|
|
643
|
+
def _save_multiple_csv_files(self, csv_files: Dict[str, str], base_output_path: str) -> bool:
|
|
644
|
+
"""Save multiple CSV files to disk.
|
|
645
|
+
|
|
646
|
+
Args:
|
|
647
|
+
csv_files: Dictionary of CSV content by file type
|
|
648
|
+
base_output_path: Base path for saving files
|
|
649
|
+
|
|
650
|
+
Returns:
|
|
651
|
+
True if all files saved successfully, False otherwise
|
|
652
|
+
"""
|
|
653
|
+
try:
|
|
654
|
+
base_path = Path(base_output_path)
|
|
655
|
+
base_dir = base_path.parent
|
|
656
|
+
base_name = base_path.stem
|
|
657
|
+
|
|
658
|
+
# Create output directory if it doesn't exist
|
|
659
|
+
base_dir.mkdir(parents=True, exist_ok=True)
|
|
660
|
+
|
|
661
|
+
saved_files = []
|
|
662
|
+
for file_type, content in csv_files.items():
|
|
663
|
+
file_path = base_dir / f"{base_name}_{file_type}.csv"
|
|
664
|
+
|
|
665
|
+
with open(file_path, 'w', encoding='utf-8', newline='') as f:
|
|
666
|
+
f.write(content)
|
|
667
|
+
|
|
668
|
+
saved_files.append(str(file_path))
|
|
669
|
+
logger.info(f"Saved {file_type} CSV to: {file_path}")
|
|
670
|
+
|
|
671
|
+
logger.info(f"Successfully saved {len(saved_files)} CSV files")
|
|
672
|
+
return True
|
|
673
|
+
|
|
674
|
+
except Exception as e:
|
|
675
|
+
logger.error(f"Failed to save multiple CSV files: {e}")
|
|
676
|
+
return False
|
|
677
|
+
|
|
678
|
+
def set_csv_options(self, generate_multiple_files: bool = True,
|
|
679
|
+
excel_compatible: bool = True,
|
|
680
|
+
custom_dialect: Optional[str] = None) -> None:
|
|
681
|
+
"""Configure CSV generation options.
|
|
682
|
+
|
|
683
|
+
Args:
|
|
684
|
+
generate_multiple_files: Whether to generate separate CSV files
|
|
685
|
+
excel_compatible: Whether to use Excel-compatible formatting
|
|
686
|
+
custom_dialect: Custom CSV dialect to use
|
|
687
|
+
"""
|
|
688
|
+
self.generate_multiple_files = generate_multiple_files
|
|
689
|
+
self.excel_compatible = excel_compatible
|
|
690
|
+
|
|
691
|
+
if custom_dialect:
|
|
692
|
+
self.csv_dialect = custom_dialect
|
|
693
|
+
else:
|
|
694
|
+
self.csv_dialect = 'excel' if excel_compatible else 'unix'
|
|
695
|
+
|
|
696
|
+
logger.debug(f"Updated CSV options: multiple_files={generate_multiple_files}, "
|
|
697
|
+
f"excel_compatible={excel_compatible}, dialect={self.csv_dialect}")
|
|
698
|
+
|
|
699
|
+
def validate_csv_output(self, csv_content: str) -> bool:
|
|
700
|
+
"""Validate that the generated CSV is well-formed.
|
|
701
|
+
|
|
702
|
+
Args:
|
|
703
|
+
csv_content: CSV content string to validate
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
True if CSV is valid, False otherwise
|
|
707
|
+
"""
|
|
708
|
+
try:
|
|
709
|
+
# Try to parse the CSV content
|
|
710
|
+
reader = csv.reader(StringIO(csv_content), dialect=self.csv_dialect)
|
|
711
|
+
rows = list(reader)
|
|
712
|
+
|
|
713
|
+
if not rows:
|
|
714
|
+
logger.error("CSV validation failed: no rows found")
|
|
715
|
+
return False
|
|
716
|
+
|
|
717
|
+
# Check that all rows have consistent column counts (allowing for empty rows)
|
|
718
|
+
non_empty_rows = [row for row in rows if any(cell.strip() for cell in row)]
|
|
719
|
+
if not non_empty_rows:
|
|
720
|
+
logger.error("CSV validation failed: no non-empty rows found")
|
|
721
|
+
return False
|
|
722
|
+
|
|
723
|
+
# Get expected column count from first non-empty row
|
|
724
|
+
expected_cols = len(non_empty_rows[0])
|
|
725
|
+
|
|
726
|
+
# Allow some flexibility for section headers and separators
|
|
727
|
+
inconsistent_rows = 0
|
|
728
|
+
for i, row in enumerate(non_empty_rows):
|
|
729
|
+
if len(row) != expected_cols:
|
|
730
|
+
inconsistent_rows += 1
|
|
731
|
+
|
|
732
|
+
# Allow up to 20% of rows to have different column counts (for headers/separators)
|
|
733
|
+
if inconsistent_rows > len(non_empty_rows) * 0.2:
|
|
734
|
+
logger.warning(f"CSV validation warning: {inconsistent_rows} rows have inconsistent column counts")
|
|
735
|
+
|
|
736
|
+
logger.debug("CSV validation passed")
|
|
737
|
+
return True
|
|
738
|
+
|
|
739
|
+
except csv.Error as e:
|
|
740
|
+
logger.error(f"CSV validation failed: {e}")
|
|
741
|
+
return False
|
|
742
|
+
except Exception as e:
|
|
743
|
+
logger.error(f"CSV validation failed with unexpected error: {e}")
|
|
744
|
+
return False
|
|
745
|
+
|
|
746
|
+
def extract_summary_data(self, csv_content: str) -> Optional[Dict[str, Any]]:
|
|
747
|
+
"""Extract summary data from generated CSV report.
|
|
748
|
+
|
|
749
|
+
Args:
|
|
750
|
+
csv_content: CSV report content
|
|
751
|
+
|
|
752
|
+
Returns:
|
|
753
|
+
Dictionary containing summary data or None if extraction fails
|
|
754
|
+
"""
|
|
755
|
+
try:
|
|
756
|
+
reader = csv.reader(StringIO(csv_content), dialect=self.csv_dialect)
|
|
757
|
+
rows = list(reader)
|
|
758
|
+
|
|
759
|
+
summary_data = {}
|
|
760
|
+
|
|
761
|
+
# Look for summary data in the CSV
|
|
762
|
+
for i, row in enumerate(rows):
|
|
763
|
+
if len(row) >= 2:
|
|
764
|
+
metric = row[0].strip()
|
|
765
|
+
value = row[1].strip()
|
|
766
|
+
|
|
767
|
+
if metric == "Overall Compliance":
|
|
768
|
+
try:
|
|
769
|
+
summary_data["overall_compliance"] = float(value)
|
|
770
|
+
except ValueError:
|
|
771
|
+
pass
|
|
772
|
+
elif metric == "Account ID":
|
|
773
|
+
summary_data["account_id"] = value
|
|
774
|
+
elif metric == "Total Resources":
|
|
775
|
+
try:
|
|
776
|
+
summary_data["total_resources"] = int(value)
|
|
777
|
+
except ValueError:
|
|
778
|
+
pass
|
|
779
|
+
elif metric == "Assessment Date":
|
|
780
|
+
summary_data["assessment_date"] = value
|
|
781
|
+
|
|
782
|
+
return summary_data if summary_data else None
|
|
783
|
+
|
|
784
|
+
except Exception as e:
|
|
785
|
+
logger.error(f"Failed to extract summary data from CSV: {e}")
|
|
786
|
+
return None
|
|
787
|
+
|
|
788
|
+
def get_file_extensions(self) -> List[str]:
|
|
789
|
+
"""Get list of file extensions used by this reporter.
|
|
790
|
+
|
|
791
|
+
Returns:
|
|
792
|
+
List of file extensions
|
|
793
|
+
"""
|
|
794
|
+
if self.generate_multiple_files:
|
|
795
|
+
return [
|
|
796
|
+
"_summary.csv",
|
|
797
|
+
"_findings.csv",
|
|
798
|
+
"_remediation.csv",
|
|
799
|
+
"_controls.csv",
|
|
800
|
+
"_implementation_groups.csv"
|
|
801
|
+
]
|
|
802
|
+
else:
|
|
803
|
+
return [".csv"]
|
|
804
|
+
|
|
805
|
+
def estimate_file_sizes(self, assessment_result: AssessmentResult) -> Dict[str, int]:
|
|
806
|
+
"""Estimate file sizes for the generated CSV files.
|
|
807
|
+
|
|
808
|
+
Args:
|
|
809
|
+
assessment_result: Assessment result to analyze
|
|
810
|
+
|
|
811
|
+
Returns:
|
|
812
|
+
Dictionary mapping file types to estimated sizes in bytes
|
|
813
|
+
"""
|
|
814
|
+
# Rough estimates based on typical data sizes
|
|
815
|
+
total_findings = sum(
|
|
816
|
+
sum(len(control.findings) for control in ig.control_scores.values())
|
|
817
|
+
for ig in assessment_result.ig_scores.values()
|
|
818
|
+
)
|
|
819
|
+
|
|
820
|
+
total_controls = sum(
|
|
821
|
+
len(ig.control_scores) for ig in assessment_result.ig_scores.values()
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
estimates = {
|
|
825
|
+
"summary": 2000, # Small summary file
|
|
826
|
+
"implementation_groups": 500, # Very small IG summary
|
|
827
|
+
"controls": total_controls * 150, # ~150 bytes per control
|
|
828
|
+
"findings": total_findings * 200, # ~200 bytes per finding
|
|
829
|
+
"remediation": len(assessment_result.ig_scores) * 300 # ~300 bytes per remediation item
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
if not self.generate_multiple_files:
|
|
833
|
+
estimates = {"single_file": sum(estimates.values())}
|
|
834
|
+
|
|
835
|
+
return estimates
|