aws-cis-controls-assessment 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_cis_assessment/__init__.py +11 -0
- aws_cis_assessment/cli/__init__.py +3 -0
- aws_cis_assessment/cli/examples.py +274 -0
- aws_cis_assessment/cli/main.py +1259 -0
- aws_cis_assessment/cli/utils.py +356 -0
- aws_cis_assessment/config/__init__.py +1 -0
- aws_cis_assessment/config/config_loader.py +328 -0
- aws_cis_assessment/config/rules/cis_controls_ig1.yaml +590 -0
- aws_cis_assessment/config/rules/cis_controls_ig2.yaml +412 -0
- aws_cis_assessment/config/rules/cis_controls_ig3.yaml +100 -0
- aws_cis_assessment/controls/__init__.py +1 -0
- aws_cis_assessment/controls/base_control.py +400 -0
- aws_cis_assessment/controls/ig1/__init__.py +239 -0
- aws_cis_assessment/controls/ig1/control_1_1.py +586 -0
- aws_cis_assessment/controls/ig1/control_2_2.py +231 -0
- aws_cis_assessment/controls/ig1/control_3_3.py +718 -0
- aws_cis_assessment/controls/ig1/control_3_4.py +235 -0
- aws_cis_assessment/controls/ig1/control_4_1.py +461 -0
- aws_cis_assessment/controls/ig1/control_access_keys.py +310 -0
- aws_cis_assessment/controls/ig1/control_advanced_security.py +512 -0
- aws_cis_assessment/controls/ig1/control_backup_recovery.py +510 -0
- aws_cis_assessment/controls/ig1/control_cloudtrail_logging.py +197 -0
- aws_cis_assessment/controls/ig1/control_critical_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_data_protection.py +898 -0
- aws_cis_assessment/controls/ig1/control_iam_advanced.py +573 -0
- aws_cis_assessment/controls/ig1/control_iam_governance.py +493 -0
- aws_cis_assessment/controls/ig1/control_iam_policies.py +383 -0
- aws_cis_assessment/controls/ig1/control_instance_optimization.py +100 -0
- aws_cis_assessment/controls/ig1/control_network_enhancements.py +203 -0
- aws_cis_assessment/controls/ig1/control_network_security.py +672 -0
- aws_cis_assessment/controls/ig1/control_s3_enhancements.py +173 -0
- aws_cis_assessment/controls/ig1/control_s3_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_vpc_security.py +235 -0
- aws_cis_assessment/controls/ig2/__init__.py +172 -0
- aws_cis_assessment/controls/ig2/control_3_10.py +698 -0
- aws_cis_assessment/controls/ig2/control_3_11.py +1330 -0
- aws_cis_assessment/controls/ig2/control_5_2.py +393 -0
- aws_cis_assessment/controls/ig2/control_advanced_encryption.py +355 -0
- aws_cis_assessment/controls/ig2/control_codebuild_security.py +263 -0
- aws_cis_assessment/controls/ig2/control_encryption_rest.py +382 -0
- aws_cis_assessment/controls/ig2/control_encryption_transit.py +382 -0
- aws_cis_assessment/controls/ig2/control_network_ha.py +467 -0
- aws_cis_assessment/controls/ig2/control_remaining_encryption.py +426 -0
- aws_cis_assessment/controls/ig2/control_remaining_rules.py +363 -0
- aws_cis_assessment/controls/ig2/control_service_logging.py +402 -0
- aws_cis_assessment/controls/ig3/__init__.py +49 -0
- aws_cis_assessment/controls/ig3/control_12_8.py +395 -0
- aws_cis_assessment/controls/ig3/control_13_1.py +467 -0
- aws_cis_assessment/controls/ig3/control_3_14.py +523 -0
- aws_cis_assessment/controls/ig3/control_7_1.py +359 -0
- aws_cis_assessment/core/__init__.py +1 -0
- aws_cis_assessment/core/accuracy_validator.py +425 -0
- aws_cis_assessment/core/assessment_engine.py +1266 -0
- aws_cis_assessment/core/audit_trail.py +491 -0
- aws_cis_assessment/core/aws_client_factory.py +313 -0
- aws_cis_assessment/core/error_handler.py +607 -0
- aws_cis_assessment/core/models.py +166 -0
- aws_cis_assessment/core/scoring_engine.py +459 -0
- aws_cis_assessment/reporters/__init__.py +8 -0
- aws_cis_assessment/reporters/base_reporter.py +454 -0
- aws_cis_assessment/reporters/csv_reporter.py +835 -0
- aws_cis_assessment/reporters/html_reporter.py +2162 -0
- aws_cis_assessment/reporters/json_reporter.py +561 -0
- aws_cis_controls_assessment-1.0.3.dist-info/METADATA +248 -0
- aws_cis_controls_assessment-1.0.3.dist-info/RECORD +77 -0
- aws_cis_controls_assessment-1.0.3.dist-info/WHEEL +5 -0
- aws_cis_controls_assessment-1.0.3.dist-info/entry_points.txt +2 -0
- aws_cis_controls_assessment-1.0.3.dist-info/licenses/LICENSE +21 -0
- aws_cis_controls_assessment-1.0.3.dist-info/top_level.txt +2 -0
- docs/README.md +94 -0
- docs/assessment-logic.md +766 -0
- docs/cli-reference.md +698 -0
- docs/config-rule-mappings.md +393 -0
- docs/developer-guide.md +858 -0
- docs/installation.md +299 -0
- docs/troubleshooting.md +634 -0
- docs/user-guide.md +487 -0
|
@@ -0,0 +1,561 @@
|
|
|
1
|
+
"""JSON Reporter for CIS Controls compliance assessment reports."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Dict, Any, List, Optional
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from aws_cis_assessment.reporters.base_reporter import ReportGenerator
|
|
9
|
+
from aws_cis_assessment.core.models import (
|
|
10
|
+
AssessmentResult, ComplianceSummary, RemediationGuidance,
|
|
11
|
+
IGScore, ControlScore, ComplianceResult
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class JSONReporter(ReportGenerator):
|
|
18
|
+
"""JSON format reporter for compliance assessment results.
|
|
19
|
+
|
|
20
|
+
Generates structured JSON output with detailed compliance results,
|
|
21
|
+
resource-level findings, assessment metadata, and remediation guidance.
|
|
22
|
+
Designed for machine-readable integration with other tools.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, template_dir: Optional[str] = None, indent: int = 2):
|
|
26
|
+
"""Initialize JSON reporter.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
template_dir: Optional path to custom report templates
|
|
30
|
+
indent: JSON indentation level for pretty printing (default: 2)
|
|
31
|
+
"""
|
|
32
|
+
super().__init__(template_dir)
|
|
33
|
+
self.indent = indent
|
|
34
|
+
logger.info(f"Initialized JSONReporter with indent={indent}")
|
|
35
|
+
|
|
36
|
+
def generate_report(self, assessment_result: AssessmentResult,
|
|
37
|
+
compliance_summary: ComplianceSummary,
|
|
38
|
+
output_path: Optional[str] = None) -> str:
|
|
39
|
+
"""Generate JSON format compliance assessment report.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
assessment_result: Complete assessment result data
|
|
43
|
+
compliance_summary: Executive summary of compliance status
|
|
44
|
+
output_path: Optional path to save the JSON report
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
JSON formatted report content as string
|
|
48
|
+
"""
|
|
49
|
+
logger.info(f"Generating JSON report for account {assessment_result.account_id}")
|
|
50
|
+
|
|
51
|
+
# Validate input data
|
|
52
|
+
if not self.validate_assessment_data(assessment_result, compliance_summary):
|
|
53
|
+
logger.error("Assessment data validation failed")
|
|
54
|
+
return ""
|
|
55
|
+
|
|
56
|
+
# Prepare structured report data
|
|
57
|
+
report_data = self._prepare_report_data(assessment_result, compliance_summary)
|
|
58
|
+
|
|
59
|
+
# Validate prepared data
|
|
60
|
+
if not self._validate_report_data(report_data):
|
|
61
|
+
logger.error("Report data validation failed")
|
|
62
|
+
return ""
|
|
63
|
+
|
|
64
|
+
# Enhance JSON-specific data structure
|
|
65
|
+
json_report_data = self._enhance_json_structure(report_data)
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
# Generate JSON content with proper formatting
|
|
69
|
+
json_content = json.dumps(
|
|
70
|
+
json_report_data,
|
|
71
|
+
indent=self.indent,
|
|
72
|
+
default=self._json_serializer,
|
|
73
|
+
ensure_ascii=False,
|
|
74
|
+
sort_keys=True
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
logger.info(f"Generated JSON report with {len(json_content)} characters")
|
|
78
|
+
|
|
79
|
+
# Save to file if path provided
|
|
80
|
+
if output_path:
|
|
81
|
+
if self._save_report_to_file(json_content, output_path):
|
|
82
|
+
logger.info(f"JSON report saved to {output_path}")
|
|
83
|
+
else:
|
|
84
|
+
logger.error(f"Failed to save JSON report to {output_path}")
|
|
85
|
+
|
|
86
|
+
return json_content
|
|
87
|
+
|
|
88
|
+
except Exception as e:
|
|
89
|
+
logger.error(f"Failed to generate JSON report: {e}")
|
|
90
|
+
return ""
|
|
91
|
+
|
|
92
|
+
def get_supported_formats(self) -> List[str]:
|
|
93
|
+
"""Get list of supported output formats.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
List containing 'json' format
|
|
97
|
+
"""
|
|
98
|
+
return ['json']
|
|
99
|
+
|
|
100
|
+
def _enhance_json_structure(self, report_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
101
|
+
"""Enhance report data structure for JSON-specific requirements.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
report_data: Base report data from parent class
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Enhanced data structure optimized for JSON output
|
|
108
|
+
"""
|
|
109
|
+
# Create enhanced JSON structure
|
|
110
|
+
json_data = {
|
|
111
|
+
"report_format": "json",
|
|
112
|
+
"report_version": "1.0",
|
|
113
|
+
"schema_version": "2024.1",
|
|
114
|
+
**report_data
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Add JSON-specific metadata
|
|
118
|
+
json_data["metadata"]["report_format"] = "json"
|
|
119
|
+
json_data["metadata"]["machine_readable"] = True
|
|
120
|
+
json_data["metadata"]["api_version"] = "v1"
|
|
121
|
+
|
|
122
|
+
# Enhance executive summary with additional computed metrics
|
|
123
|
+
exec_summary = json_data["executive_summary"]
|
|
124
|
+
|
|
125
|
+
# Add compliance grade based on overall percentage
|
|
126
|
+
overall_pct = exec_summary["overall_compliance_percentage"]
|
|
127
|
+
exec_summary["compliance_grade"] = self._calculate_compliance_grade(overall_pct)
|
|
128
|
+
|
|
129
|
+
# Add risk level assessment
|
|
130
|
+
exec_summary["risk_level"] = self._calculate_risk_level(overall_pct)
|
|
131
|
+
|
|
132
|
+
# Add resource efficiency metrics
|
|
133
|
+
total_resources = exec_summary["total_resources"]
|
|
134
|
+
compliant_resources = exec_summary["compliant_resources"]
|
|
135
|
+
non_compliant_resources = exec_summary["non_compliant_resources"]
|
|
136
|
+
|
|
137
|
+
if total_resources > 0:
|
|
138
|
+
exec_summary["compliance_ratio"] = compliant_resources / total_resources
|
|
139
|
+
exec_summary["non_compliance_ratio"] = non_compliant_resources / total_resources
|
|
140
|
+
else:
|
|
141
|
+
exec_summary["compliance_ratio"] = 0.0
|
|
142
|
+
exec_summary["non_compliance_ratio"] = 0.0
|
|
143
|
+
|
|
144
|
+
# Enhance Implementation Group data with additional metrics
|
|
145
|
+
for ig_name, ig_data in json_data["implementation_groups"].items():
|
|
146
|
+
ig_data["compliance_status"] = self._get_compliance_status(ig_data["compliance_percentage"])
|
|
147
|
+
ig_data["controls_compliance_ratio"] = (
|
|
148
|
+
ig_data["compliant_controls"] / ig_data["total_controls"]
|
|
149
|
+
if ig_data["total_controls"] > 0 else 0.0
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Enhance control data
|
|
153
|
+
for control_id, control_data in ig_data["controls"].items():
|
|
154
|
+
control_data["compliance_status"] = self._get_compliance_status(
|
|
155
|
+
control_data["compliance_percentage"]
|
|
156
|
+
)
|
|
157
|
+
control_data["risk_score"] = self._calculate_control_risk_score(control_data)
|
|
158
|
+
|
|
159
|
+
# Add finding statistics
|
|
160
|
+
total_findings = len(control_data.get("non_compliant_findings", []))
|
|
161
|
+
control_data["findings_summary"] = {
|
|
162
|
+
"total_non_compliant": total_findings,
|
|
163
|
+
"has_findings": total_findings > 0,
|
|
164
|
+
"severity_distribution": self._analyze_finding_severity(
|
|
165
|
+
control_data.get("non_compliant_findings", [])
|
|
166
|
+
)
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
# Enhance remediation priorities with additional context
|
|
170
|
+
for remediation in json_data["remediation_priorities"]:
|
|
171
|
+
remediation["priority_score"] = self._calculate_priority_score(remediation)
|
|
172
|
+
remediation["effort_category"] = self._categorize_effort(remediation["estimated_effort"])
|
|
173
|
+
|
|
174
|
+
# Add assessment statistics
|
|
175
|
+
json_data["assessment_statistics"] = self._generate_assessment_statistics(json_data)
|
|
176
|
+
|
|
177
|
+
# Add data quality metrics
|
|
178
|
+
json_data["data_quality"] = self._assess_data_quality(json_data)
|
|
179
|
+
|
|
180
|
+
return json_data
|
|
181
|
+
|
|
182
|
+
def _json_serializer(self, obj) -> str:
|
|
183
|
+
"""Custom JSON serializer for non-standard types.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
obj: Object to serialize
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
String representation of the object
|
|
190
|
+
"""
|
|
191
|
+
# Handle datetime objects
|
|
192
|
+
if hasattr(obj, 'isoformat'):
|
|
193
|
+
return obj.isoformat()
|
|
194
|
+
|
|
195
|
+
# Handle timedelta objects
|
|
196
|
+
if hasattr(obj, 'total_seconds'):
|
|
197
|
+
return f"{obj.total_seconds():.2f}s"
|
|
198
|
+
|
|
199
|
+
# Handle enum objects
|
|
200
|
+
if hasattr(obj, 'value'):
|
|
201
|
+
return obj.value
|
|
202
|
+
|
|
203
|
+
# Default string representation
|
|
204
|
+
return str(obj)
|
|
205
|
+
|
|
206
|
+
def _calculate_compliance_grade(self, compliance_percentage: float) -> str:
|
|
207
|
+
"""Calculate compliance grade based on percentage.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
compliance_percentage: Compliance percentage (0-100)
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
Compliance grade (A, B, C, D, F)
|
|
214
|
+
"""
|
|
215
|
+
if compliance_percentage >= 95.0:
|
|
216
|
+
return "A"
|
|
217
|
+
elif compliance_percentage >= 85.0:
|
|
218
|
+
return "B"
|
|
219
|
+
elif compliance_percentage >= 75.0:
|
|
220
|
+
return "C"
|
|
221
|
+
elif compliance_percentage >= 60.0:
|
|
222
|
+
return "D"
|
|
223
|
+
else:
|
|
224
|
+
return "F"
|
|
225
|
+
|
|
226
|
+
def _calculate_risk_level(self, compliance_percentage: float) -> str:
|
|
227
|
+
"""Calculate risk level based on compliance percentage.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
compliance_percentage: Compliance percentage (0-100)
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Risk level (LOW, MEDIUM, HIGH, CRITICAL)
|
|
234
|
+
"""
|
|
235
|
+
if compliance_percentage >= 90.0:
|
|
236
|
+
return "LOW"
|
|
237
|
+
elif compliance_percentage >= 75.0:
|
|
238
|
+
return "MEDIUM"
|
|
239
|
+
elif compliance_percentage >= 50.0:
|
|
240
|
+
return "HIGH"
|
|
241
|
+
else:
|
|
242
|
+
return "CRITICAL"
|
|
243
|
+
|
|
244
|
+
def _get_compliance_status(self, compliance_percentage: float) -> str:
|
|
245
|
+
"""Get compliance status based on percentage.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
compliance_percentage: Compliance percentage (0-100)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Compliance status (EXCELLENT, GOOD, FAIR, POOR, CRITICAL)
|
|
252
|
+
"""
|
|
253
|
+
if compliance_percentage >= 95.0:
|
|
254
|
+
return "EXCELLENT"
|
|
255
|
+
elif compliance_percentage >= 80.0:
|
|
256
|
+
return "GOOD"
|
|
257
|
+
elif compliance_percentage >= 60.0:
|
|
258
|
+
return "FAIR"
|
|
259
|
+
elif compliance_percentage >= 40.0:
|
|
260
|
+
return "POOR"
|
|
261
|
+
else:
|
|
262
|
+
return "CRITICAL"
|
|
263
|
+
|
|
264
|
+
def _calculate_control_risk_score(self, control_data: Dict[str, Any]) -> float:
|
|
265
|
+
"""Calculate risk score for a control based on compliance and findings.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
control_data: Control data dictionary
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
Risk score (0.0 to 10.0, higher is riskier)
|
|
272
|
+
"""
|
|
273
|
+
compliance_pct = control_data.get("compliance_percentage", 100.0)
|
|
274
|
+
total_resources = control_data.get("total_resources", 0)
|
|
275
|
+
non_compliant_findings = len(control_data.get("non_compliant_findings", []))
|
|
276
|
+
|
|
277
|
+
# Base risk from non-compliance
|
|
278
|
+
base_risk = (100.0 - compliance_pct) / 10.0 # 0-10 scale
|
|
279
|
+
|
|
280
|
+
# Amplify risk based on number of affected resources
|
|
281
|
+
if total_resources > 0:
|
|
282
|
+
resource_factor = min(non_compliant_findings / total_resources, 1.0)
|
|
283
|
+
base_risk *= (1.0 + resource_factor)
|
|
284
|
+
|
|
285
|
+
# Cap at 10.0
|
|
286
|
+
return min(base_risk, 10.0)
|
|
287
|
+
|
|
288
|
+
def _analyze_finding_severity(self, findings: List[Dict[str, Any]]) -> Dict[str, int]:
|
|
289
|
+
"""Analyze severity distribution of findings.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
findings: List of finding dictionaries
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
Dictionary with severity counts
|
|
296
|
+
"""
|
|
297
|
+
severity_counts = {"HIGH": 0, "MEDIUM": 0, "LOW": 0}
|
|
298
|
+
|
|
299
|
+
for finding in findings:
|
|
300
|
+
# Simple severity assessment based on resource type and compliance status
|
|
301
|
+
resource_type = finding.get("resource_type", "")
|
|
302
|
+
|
|
303
|
+
# High severity for security-critical resources
|
|
304
|
+
if any(critical in resource_type.lower() for critical in
|
|
305
|
+
["iam", "security", "kms", "cloudtrail", "guardduty"]):
|
|
306
|
+
severity_counts["HIGH"] += 1
|
|
307
|
+
# Medium severity for network and data resources
|
|
308
|
+
elif any(medium in resource_type.lower() for medium in
|
|
309
|
+
["vpc", "s3", "rds", "ec2", "elb", "api"]):
|
|
310
|
+
severity_counts["MEDIUM"] += 1
|
|
311
|
+
# Low severity for others
|
|
312
|
+
else:
|
|
313
|
+
severity_counts["LOW"] += 1
|
|
314
|
+
|
|
315
|
+
return severity_counts
|
|
316
|
+
|
|
317
|
+
def _calculate_priority_score(self, remediation: Dict[str, Any]) -> int:
|
|
318
|
+
"""Calculate numeric priority score for remediation item.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
remediation: Remediation guidance dictionary
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
Priority score (1-10, higher is more urgent)
|
|
325
|
+
"""
|
|
326
|
+
priority = remediation.get("priority", "MEDIUM")
|
|
327
|
+
effort = remediation.get("estimated_effort", "Unknown")
|
|
328
|
+
|
|
329
|
+
# Base score from priority
|
|
330
|
+
priority_scores = {"HIGH": 8, "MEDIUM": 5, "LOW": 2}
|
|
331
|
+
base_score = priority_scores.get(priority, 5)
|
|
332
|
+
|
|
333
|
+
# Adjust based on effort (lower effort = higher priority)
|
|
334
|
+
effort_adjustments = {
|
|
335
|
+
"Low": 2, "Medium": 0, "High": -2, "Unknown": 0
|
|
336
|
+
}
|
|
337
|
+
effort_adjustment = effort_adjustments.get(effort, 0)
|
|
338
|
+
|
|
339
|
+
final_score = base_score + effort_adjustment
|
|
340
|
+
return max(1, min(final_score, 10)) # Clamp to 1-10 range
|
|
341
|
+
|
|
342
|
+
def _categorize_effort(self, estimated_effort: str) -> str:
|
|
343
|
+
"""Categorize effort level.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
estimated_effort: Effort estimation string
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Effort category (MINIMAL, MODERATE, SIGNIFICANT, EXTENSIVE)
|
|
350
|
+
"""
|
|
351
|
+
effort_lower = estimated_effort.lower()
|
|
352
|
+
|
|
353
|
+
if "low" in effort_lower or "minimal" in effort_lower:
|
|
354
|
+
return "MINIMAL"
|
|
355
|
+
elif "medium" in effort_lower or "moderate" in effort_lower:
|
|
356
|
+
return "MODERATE"
|
|
357
|
+
elif "high" in effort_lower or "significant" in effort_lower:
|
|
358
|
+
return "SIGNIFICANT"
|
|
359
|
+
else:
|
|
360
|
+
return "EXTENSIVE"
|
|
361
|
+
|
|
362
|
+
def _generate_assessment_statistics(self, json_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
363
|
+
"""Generate comprehensive assessment statistics.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
json_data: Enhanced JSON report data
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
Dictionary containing assessment statistics
|
|
370
|
+
"""
|
|
371
|
+
metadata = json_data["metadata"]
|
|
372
|
+
exec_summary = json_data["executive_summary"]
|
|
373
|
+
ig_data = json_data["implementation_groups"]
|
|
374
|
+
|
|
375
|
+
# Calculate IG statistics
|
|
376
|
+
ig_stats = {}
|
|
377
|
+
for ig_name, ig_info in ig_data.items():
|
|
378
|
+
ig_stats[ig_name] = {
|
|
379
|
+
"total_controls": ig_info["total_controls"],
|
|
380
|
+
"compliant_controls": ig_info["compliant_controls"],
|
|
381
|
+
"compliance_percentage": ig_info["compliance_percentage"],
|
|
382
|
+
"controls_at_risk": ig_info["total_controls"] - ig_info["compliant_controls"]
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
# Calculate control distribution
|
|
386
|
+
all_controls = []
|
|
387
|
+
for ig_info in ig_data.values():
|
|
388
|
+
all_controls.extend(ig_info["controls"].values())
|
|
389
|
+
|
|
390
|
+
control_distribution = {
|
|
391
|
+
"total_controls_evaluated": len(all_controls),
|
|
392
|
+
"fully_compliant_controls": len([c for c in all_controls if c["compliance_percentage"] >= 100.0]),
|
|
393
|
+
"mostly_compliant_controls": len([c for c in all_controls if 80.0 <= c["compliance_percentage"] < 100.0]),
|
|
394
|
+
"partially_compliant_controls": len([c for c in all_controls if 50.0 <= c["compliance_percentage"] < 80.0]),
|
|
395
|
+
"non_compliant_controls": len([c for c in all_controls if c["compliance_percentage"] < 50.0])
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
# Calculate regional coverage
|
|
399
|
+
regions_assessed = metadata.get("regions_assessed", [])
|
|
400
|
+
regional_stats = {
|
|
401
|
+
"total_regions": len(regions_assessed),
|
|
402
|
+
"regions_list": regions_assessed,
|
|
403
|
+
"multi_region_assessment": len(regions_assessed) > 1
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
return {
|
|
407
|
+
"implementation_groups": ig_stats,
|
|
408
|
+
"control_distribution": control_distribution,
|
|
409
|
+
"regional_coverage": regional_stats,
|
|
410
|
+
"assessment_scope": {
|
|
411
|
+
"total_resources_evaluated": exec_summary["total_resources"],
|
|
412
|
+
"resources_per_region": (
|
|
413
|
+
exec_summary["total_resources"] / len(regions_assessed)
|
|
414
|
+
if regions_assessed else 0
|
|
415
|
+
),
|
|
416
|
+
"assessment_duration": metadata.get("assessment_duration"),
|
|
417
|
+
"resources_per_minute": self._calculate_resources_per_minute(
|
|
418
|
+
exec_summary["total_resources"],
|
|
419
|
+
metadata.get("assessment_duration")
|
|
420
|
+
)
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
def _calculate_resources_per_minute(self, total_resources: int, duration_str: Optional[str]) -> float:
|
|
425
|
+
"""Calculate resources processed per minute.
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
total_resources: Total number of resources evaluated
|
|
429
|
+
duration_str: Duration string (e.g., "1800.00s")
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
Resources per minute rate
|
|
433
|
+
"""
|
|
434
|
+
if not duration_str or total_resources == 0:
|
|
435
|
+
return 0.0
|
|
436
|
+
|
|
437
|
+
try:
|
|
438
|
+
# Extract seconds from duration string
|
|
439
|
+
if duration_str.endswith('s'):
|
|
440
|
+
seconds = float(duration_str[:-1])
|
|
441
|
+
minutes = seconds / 60.0
|
|
442
|
+
return total_resources / minutes if minutes > 0 else 0.0
|
|
443
|
+
except (ValueError, TypeError):
|
|
444
|
+
pass
|
|
445
|
+
|
|
446
|
+
return 0.0
|
|
447
|
+
|
|
448
|
+
def _assess_data_quality(self, json_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
449
|
+
"""Assess data quality metrics for the assessment.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
json_data: Enhanced JSON report data
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Dictionary containing data quality metrics
|
|
456
|
+
"""
|
|
457
|
+
metadata = json_data["metadata"]
|
|
458
|
+
exec_summary = json_data["executive_summary"]
|
|
459
|
+
ig_data = json_data["implementation_groups"]
|
|
460
|
+
|
|
461
|
+
# Calculate completeness metrics
|
|
462
|
+
total_expected_igs = 3 # IG1, IG2, IG3
|
|
463
|
+
actual_igs = len(ig_data)
|
|
464
|
+
ig_completeness = actual_igs / total_expected_igs
|
|
465
|
+
|
|
466
|
+
# Calculate data consistency
|
|
467
|
+
total_resources_from_summary = exec_summary["total_resources"]
|
|
468
|
+
total_resources_from_metadata = metadata.get("total_resources_evaluated", 0)
|
|
469
|
+
|
|
470
|
+
resource_consistency = (
|
|
471
|
+
1.0 if total_resources_from_summary == total_resources_from_metadata
|
|
472
|
+
else 0.8 # Slight inconsistency
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Calculate assessment coverage
|
|
476
|
+
regions_assessed = len(metadata.get("regions_assessed", []))
|
|
477
|
+
region_coverage_score = min(regions_assessed / 3.0, 1.0) # Assume 3 regions is good coverage
|
|
478
|
+
|
|
479
|
+
# Overall quality score
|
|
480
|
+
quality_components = [ig_completeness, resource_consistency, region_coverage_score]
|
|
481
|
+
overall_quality = sum(quality_components) / len(quality_components)
|
|
482
|
+
|
|
483
|
+
return {
|
|
484
|
+
"overall_quality_score": round(overall_quality, 3),
|
|
485
|
+
"completeness": {
|
|
486
|
+
"implementation_groups": ig_completeness,
|
|
487
|
+
"expected_igs": total_expected_igs,
|
|
488
|
+
"actual_igs": actual_igs
|
|
489
|
+
},
|
|
490
|
+
"consistency": {
|
|
491
|
+
"resource_count_consistency": resource_consistency,
|
|
492
|
+
"metadata_alignment": 1.0 if metadata.get("account_id") else 0.0
|
|
493
|
+
},
|
|
494
|
+
"coverage": {
|
|
495
|
+
"regional_coverage_score": region_coverage_score,
|
|
496
|
+
"regions_assessed": regions_assessed
|
|
497
|
+
},
|
|
498
|
+
"data_freshness": {
|
|
499
|
+
"assessment_timestamp": metadata.get("assessment_timestamp"),
|
|
500
|
+
"report_generation_timestamp": metadata.get("report_generated_at")
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
def set_json_formatting(self, indent: int = 2, sort_keys: bool = True,
|
|
505
|
+
ensure_ascii: bool = False) -> None:
|
|
506
|
+
"""Configure JSON formatting options.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
indent: Number of spaces for indentation
|
|
510
|
+
sort_keys: Whether to sort dictionary keys
|
|
511
|
+
ensure_ascii: Whether to escape non-ASCII characters
|
|
512
|
+
"""
|
|
513
|
+
self.indent = indent
|
|
514
|
+
self.sort_keys = sort_keys
|
|
515
|
+
self.ensure_ascii = ensure_ascii
|
|
516
|
+
logger.debug(f"Updated JSON formatting: indent={indent}, sort_keys={sort_keys}, ensure_ascii={ensure_ascii}")
|
|
517
|
+
|
|
518
|
+
def validate_json_output(self, json_content: str) -> bool:
|
|
519
|
+
"""Validate that the generated JSON is well-formed.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
json_content: JSON content string to validate
|
|
523
|
+
|
|
524
|
+
Returns:
|
|
525
|
+
True if JSON is valid, False otherwise
|
|
526
|
+
"""
|
|
527
|
+
try:
|
|
528
|
+
json.loads(json_content)
|
|
529
|
+
logger.debug("JSON validation passed")
|
|
530
|
+
return True
|
|
531
|
+
except json.JSONDecodeError as e:
|
|
532
|
+
logger.error(f"JSON validation failed: {e}")
|
|
533
|
+
return False
|
|
534
|
+
|
|
535
|
+
def extract_summary_data(self, json_content: str) -> Optional[Dict[str, Any]]:
|
|
536
|
+
"""Extract summary data from generated JSON report.
|
|
537
|
+
|
|
538
|
+
Args:
|
|
539
|
+
json_content: JSON report content
|
|
540
|
+
|
|
541
|
+
Returns:
|
|
542
|
+
Dictionary containing summary data or None if extraction fails
|
|
543
|
+
"""
|
|
544
|
+
try:
|
|
545
|
+
data = json.loads(json_content)
|
|
546
|
+
return {
|
|
547
|
+
"account_id": data["metadata"]["account_id"],
|
|
548
|
+
"overall_compliance": data["executive_summary"]["overall_compliance_percentage"],
|
|
549
|
+
"compliance_grade": data["executive_summary"]["compliance_grade"],
|
|
550
|
+
"risk_level": data["executive_summary"]["risk_level"],
|
|
551
|
+
"total_resources": data["executive_summary"]["total_resources"],
|
|
552
|
+
"assessment_date": data["metadata"]["assessment_timestamp"],
|
|
553
|
+
"ig_scores": {
|
|
554
|
+
"IG1": data["executive_summary"]["ig1_compliance_percentage"],
|
|
555
|
+
"IG2": data["executive_summary"]["ig2_compliance_percentage"],
|
|
556
|
+
"IG3": data["executive_summary"]["ig3_compliance_percentage"]
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
560
|
+
logger.error(f"Failed to extract summary data: {e}")
|
|
561
|
+
return None
|