aws-cis-controls-assessment 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_cis_assessment/__init__.py +11 -0
- aws_cis_assessment/cli/__init__.py +3 -0
- aws_cis_assessment/cli/examples.py +274 -0
- aws_cis_assessment/cli/main.py +1259 -0
- aws_cis_assessment/cli/utils.py +356 -0
- aws_cis_assessment/config/__init__.py +1 -0
- aws_cis_assessment/config/config_loader.py +328 -0
- aws_cis_assessment/config/rules/cis_controls_ig1.yaml +590 -0
- aws_cis_assessment/config/rules/cis_controls_ig2.yaml +412 -0
- aws_cis_assessment/config/rules/cis_controls_ig3.yaml +100 -0
- aws_cis_assessment/controls/__init__.py +1 -0
- aws_cis_assessment/controls/base_control.py +400 -0
- aws_cis_assessment/controls/ig1/__init__.py +239 -0
- aws_cis_assessment/controls/ig1/control_1_1.py +586 -0
- aws_cis_assessment/controls/ig1/control_2_2.py +231 -0
- aws_cis_assessment/controls/ig1/control_3_3.py +718 -0
- aws_cis_assessment/controls/ig1/control_3_4.py +235 -0
- aws_cis_assessment/controls/ig1/control_4_1.py +461 -0
- aws_cis_assessment/controls/ig1/control_access_keys.py +310 -0
- aws_cis_assessment/controls/ig1/control_advanced_security.py +512 -0
- aws_cis_assessment/controls/ig1/control_backup_recovery.py +510 -0
- aws_cis_assessment/controls/ig1/control_cloudtrail_logging.py +197 -0
- aws_cis_assessment/controls/ig1/control_critical_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_data_protection.py +898 -0
- aws_cis_assessment/controls/ig1/control_iam_advanced.py +573 -0
- aws_cis_assessment/controls/ig1/control_iam_governance.py +493 -0
- aws_cis_assessment/controls/ig1/control_iam_policies.py +383 -0
- aws_cis_assessment/controls/ig1/control_instance_optimization.py +100 -0
- aws_cis_assessment/controls/ig1/control_network_enhancements.py +203 -0
- aws_cis_assessment/controls/ig1/control_network_security.py +672 -0
- aws_cis_assessment/controls/ig1/control_s3_enhancements.py +173 -0
- aws_cis_assessment/controls/ig1/control_s3_security.py +422 -0
- aws_cis_assessment/controls/ig1/control_vpc_security.py +235 -0
- aws_cis_assessment/controls/ig2/__init__.py +172 -0
- aws_cis_assessment/controls/ig2/control_3_10.py +698 -0
- aws_cis_assessment/controls/ig2/control_3_11.py +1330 -0
- aws_cis_assessment/controls/ig2/control_5_2.py +393 -0
- aws_cis_assessment/controls/ig2/control_advanced_encryption.py +355 -0
- aws_cis_assessment/controls/ig2/control_codebuild_security.py +263 -0
- aws_cis_assessment/controls/ig2/control_encryption_rest.py +382 -0
- aws_cis_assessment/controls/ig2/control_encryption_transit.py +382 -0
- aws_cis_assessment/controls/ig2/control_network_ha.py +467 -0
- aws_cis_assessment/controls/ig2/control_remaining_encryption.py +426 -0
- aws_cis_assessment/controls/ig2/control_remaining_rules.py +363 -0
- aws_cis_assessment/controls/ig2/control_service_logging.py +402 -0
- aws_cis_assessment/controls/ig3/__init__.py +49 -0
- aws_cis_assessment/controls/ig3/control_12_8.py +395 -0
- aws_cis_assessment/controls/ig3/control_13_1.py +467 -0
- aws_cis_assessment/controls/ig3/control_3_14.py +523 -0
- aws_cis_assessment/controls/ig3/control_7_1.py +359 -0
- aws_cis_assessment/core/__init__.py +1 -0
- aws_cis_assessment/core/accuracy_validator.py +425 -0
- aws_cis_assessment/core/assessment_engine.py +1266 -0
- aws_cis_assessment/core/audit_trail.py +491 -0
- aws_cis_assessment/core/aws_client_factory.py +313 -0
- aws_cis_assessment/core/error_handler.py +607 -0
- aws_cis_assessment/core/models.py +166 -0
- aws_cis_assessment/core/scoring_engine.py +459 -0
- aws_cis_assessment/reporters/__init__.py +8 -0
- aws_cis_assessment/reporters/base_reporter.py +454 -0
- aws_cis_assessment/reporters/csv_reporter.py +835 -0
- aws_cis_assessment/reporters/html_reporter.py +2162 -0
- aws_cis_assessment/reporters/json_reporter.py +561 -0
- aws_cis_controls_assessment-1.0.3.dist-info/METADATA +248 -0
- aws_cis_controls_assessment-1.0.3.dist-info/RECORD +77 -0
- aws_cis_controls_assessment-1.0.3.dist-info/WHEEL +5 -0
- aws_cis_controls_assessment-1.0.3.dist-info/entry_points.txt +2 -0
- aws_cis_controls_assessment-1.0.3.dist-info/licenses/LICENSE +21 -0
- aws_cis_controls_assessment-1.0.3.dist-info/top_level.txt +2 -0
- docs/README.md +94 -0
- docs/assessment-logic.md +766 -0
- docs/cli-reference.md +698 -0
- docs/config-rule-mappings.md +393 -0
- docs/developer-guide.md +858 -0
- docs/installation.md +299 -0
- docs/troubleshooting.md +634 -0
- docs/user-guide.md +487 -0
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""Accuracy validation against AWS Config rule evaluations."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Dict, List, Any, Optional, Tuple
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import datetime, timedelta
|
|
7
|
+
import boto3
|
|
8
|
+
from botocore.exceptions import ClientError
|
|
9
|
+
|
|
10
|
+
from aws_cis_assessment.core.models import ComplianceResult, ComplianceStatus
|
|
11
|
+
from aws_cis_assessment.core.aws_client_factory import AWSClientFactory
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class ValidationResult:
|
|
18
|
+
"""Result of accuracy validation."""
|
|
19
|
+
config_rule_name: str
|
|
20
|
+
total_resources: int
|
|
21
|
+
matching_results: int
|
|
22
|
+
accuracy_percentage: float
|
|
23
|
+
discrepancies: List[Dict[str, Any]]
|
|
24
|
+
validation_timestamp: datetime
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def is_accurate(self) -> bool:
|
|
28
|
+
"""Check if validation meets accuracy threshold."""
|
|
29
|
+
return self.accuracy_percentage >= 95.0 # 95% accuracy threshold
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class ValidationSummary:
|
|
34
|
+
"""Summary of all validation results."""
|
|
35
|
+
total_rules_validated: int
|
|
36
|
+
accurate_rules: int
|
|
37
|
+
overall_accuracy: float
|
|
38
|
+
validation_results: List[ValidationResult]
|
|
39
|
+
validation_timestamp: datetime
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
def accuracy_percentage(self) -> float:
|
|
43
|
+
"""Calculate overall accuracy percentage."""
|
|
44
|
+
if self.total_rules_validated == 0:
|
|
45
|
+
return 0.0
|
|
46
|
+
return (self.accurate_rules / self.total_rules_validated) * 100
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AccuracyValidator:
|
|
50
|
+
"""Validates assessment accuracy against AWS Config rule evaluations."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, aws_factory: AWSClientFactory):
|
|
53
|
+
"""Initialize accuracy validator.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
aws_factory: AWS client factory for API access
|
|
57
|
+
"""
|
|
58
|
+
self.aws_factory = aws_factory
|
|
59
|
+
self.config_clients = {}
|
|
60
|
+
self._initialize_config_clients()
|
|
61
|
+
|
|
62
|
+
def _initialize_config_clients(self):
|
|
63
|
+
"""Initialize AWS Config clients for all regions."""
|
|
64
|
+
for region in self.aws_factory.regions:
|
|
65
|
+
try:
|
|
66
|
+
self.config_clients[region] = self.aws_factory.get_client('config', region)
|
|
67
|
+
logger.debug(f"Initialized Config client for region: {region}")
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.warning(f"Failed to initialize Config client for {region}: {e}")
|
|
70
|
+
|
|
71
|
+
def validate_assessment_accuracy(self,
|
|
72
|
+
assessment_results: List[ComplianceResult],
|
|
73
|
+
config_rule_names: Optional[List[str]] = None) -> ValidationSummary:
|
|
74
|
+
"""Validate assessment accuracy against AWS Config evaluations.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
assessment_results: Results from our assessment
|
|
78
|
+
config_rule_names: Optional list of specific Config rules to validate
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
ValidationSummary with accuracy metrics
|
|
82
|
+
"""
|
|
83
|
+
logger.info("Starting assessment accuracy validation against AWS Config")
|
|
84
|
+
|
|
85
|
+
# Group results by Config rule and region
|
|
86
|
+
results_by_rule = self._group_results_by_rule(assessment_results)
|
|
87
|
+
|
|
88
|
+
# Filter by specific rules if provided
|
|
89
|
+
if config_rule_names:
|
|
90
|
+
results_by_rule = {
|
|
91
|
+
rule: results for rule, results in results_by_rule.items()
|
|
92
|
+
if rule in config_rule_names
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
validation_results = []
|
|
96
|
+
|
|
97
|
+
for config_rule_name, rule_results in results_by_rule.items():
|
|
98
|
+
logger.info(f"Validating accuracy for Config rule: {config_rule_name}")
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
validation_result = self._validate_single_rule(config_rule_name, rule_results)
|
|
102
|
+
validation_results.append(validation_result)
|
|
103
|
+
|
|
104
|
+
logger.info(f" Accuracy: {validation_result.accuracy_percentage:.1f}% "
|
|
105
|
+
f"({validation_result.matching_results}/{validation_result.total_resources})")
|
|
106
|
+
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.error(f"Failed to validate {config_rule_name}: {e}")
|
|
109
|
+
# Create failed validation result
|
|
110
|
+
validation_results.append(ValidationResult(
|
|
111
|
+
config_rule_name=config_rule_name,
|
|
112
|
+
total_resources=len(rule_results),
|
|
113
|
+
matching_results=0,
|
|
114
|
+
accuracy_percentage=0.0,
|
|
115
|
+
discrepancies=[{"error": str(e)}],
|
|
116
|
+
validation_timestamp=datetime.now()
|
|
117
|
+
))
|
|
118
|
+
|
|
119
|
+
# Calculate overall accuracy
|
|
120
|
+
accurate_rules = sum(1 for result in validation_results if result.is_accurate)
|
|
121
|
+
overall_accuracy = (accurate_rules / len(validation_results) * 100) if validation_results else 0.0
|
|
122
|
+
|
|
123
|
+
summary = ValidationSummary(
|
|
124
|
+
total_rules_validated=len(validation_results),
|
|
125
|
+
accurate_rules=accurate_rules,
|
|
126
|
+
overall_accuracy=overall_accuracy,
|
|
127
|
+
validation_results=validation_results,
|
|
128
|
+
validation_timestamp=datetime.now()
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
logger.info(f"Validation completed: {summary.accuracy_percentage:.1f}% overall accuracy "
|
|
132
|
+
f"({accurate_rules}/{len(validation_results)} rules accurate)")
|
|
133
|
+
|
|
134
|
+
return summary
|
|
135
|
+
|
|
136
|
+
def _group_results_by_rule(self, results: List[ComplianceResult]) -> Dict[str, List[ComplianceResult]]:
|
|
137
|
+
"""Group compliance results by Config rule name.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
results: List of compliance results
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Dictionary mapping rule names to results
|
|
144
|
+
"""
|
|
145
|
+
grouped = {}
|
|
146
|
+
for result in results:
|
|
147
|
+
rule_name = result.config_rule_name
|
|
148
|
+
if rule_name not in grouped:
|
|
149
|
+
grouped[rule_name] = []
|
|
150
|
+
grouped[rule_name].append(result)
|
|
151
|
+
|
|
152
|
+
return grouped
|
|
153
|
+
|
|
154
|
+
def _validate_single_rule(self, config_rule_name: str,
|
|
155
|
+
our_results: List[ComplianceResult]) -> ValidationResult:
|
|
156
|
+
"""Validate accuracy for a single Config rule.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
config_rule_name: Name of the Config rule
|
|
160
|
+
our_results: Our assessment results for this rule
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
ValidationResult with accuracy metrics
|
|
164
|
+
"""
|
|
165
|
+
# Get AWS Config evaluations for this rule
|
|
166
|
+
config_evaluations = self._get_config_evaluations(config_rule_name)
|
|
167
|
+
|
|
168
|
+
if not config_evaluations:
|
|
169
|
+
logger.warning(f"No Config evaluations found for rule: {config_rule_name}")
|
|
170
|
+
return ValidationResult(
|
|
171
|
+
config_rule_name=config_rule_name,
|
|
172
|
+
total_resources=len(our_results),
|
|
173
|
+
matching_results=0,
|
|
174
|
+
accuracy_percentage=0.0,
|
|
175
|
+
discrepancies=[{"error": "No Config evaluations available"}],
|
|
176
|
+
validation_timestamp=datetime.now()
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Compare our results with Config evaluations
|
|
180
|
+
matching_results = 0
|
|
181
|
+
discrepancies = []
|
|
182
|
+
|
|
183
|
+
# Create lookup for our results
|
|
184
|
+
our_results_lookup = {
|
|
185
|
+
(result.resource_id, result.region): result
|
|
186
|
+
for result in our_results
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# Create lookup for Config evaluations
|
|
190
|
+
config_lookup = {
|
|
191
|
+
(eval_data['ResourceId'], eval_data['Region']): eval_data
|
|
192
|
+
for eval_data in config_evaluations
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
# Compare results
|
|
196
|
+
all_resource_keys = set(our_results_lookup.keys()) | set(config_lookup.keys())
|
|
197
|
+
|
|
198
|
+
for resource_key in all_resource_keys:
|
|
199
|
+
resource_id, region = resource_key
|
|
200
|
+
|
|
201
|
+
our_result = our_results_lookup.get(resource_key)
|
|
202
|
+
config_result = config_lookup.get(resource_key)
|
|
203
|
+
|
|
204
|
+
if our_result and config_result:
|
|
205
|
+
# Both have results - compare compliance status
|
|
206
|
+
our_status = self._normalize_compliance_status(our_result.compliance_status)
|
|
207
|
+
config_status = self._normalize_compliance_status(config_result['ComplianceType'])
|
|
208
|
+
|
|
209
|
+
if our_status == config_status:
|
|
210
|
+
matching_results += 1
|
|
211
|
+
else:
|
|
212
|
+
discrepancies.append({
|
|
213
|
+
'resource_id': resource_id,
|
|
214
|
+
'region': region,
|
|
215
|
+
'our_status': our_status.value,
|
|
216
|
+
'config_status': config_status.value,
|
|
217
|
+
'our_reason': our_result.evaluation_reason,
|
|
218
|
+
'config_reason': config_result.get('Annotation', 'No annotation')
|
|
219
|
+
})
|
|
220
|
+
|
|
221
|
+
elif our_result and not config_result:
|
|
222
|
+
# We have result but Config doesn't
|
|
223
|
+
discrepancies.append({
|
|
224
|
+
'resource_id': resource_id,
|
|
225
|
+
'region': region,
|
|
226
|
+
'issue': 'Resource found by our assessment but not in Config evaluations',
|
|
227
|
+
'our_status': our_result.compliance_status.value
|
|
228
|
+
})
|
|
229
|
+
|
|
230
|
+
elif config_result and not our_result:
|
|
231
|
+
# Config has result but we don't
|
|
232
|
+
discrepancies.append({
|
|
233
|
+
'resource_id': resource_id,
|
|
234
|
+
'region': region,
|
|
235
|
+
'issue': 'Resource found in Config evaluations but not in our assessment',
|
|
236
|
+
'config_status': config_result['ComplianceType']
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
total_resources = len(all_resource_keys)
|
|
240
|
+
accuracy_percentage = (matching_results / total_resources * 100) if total_resources > 0 else 0.0
|
|
241
|
+
|
|
242
|
+
return ValidationResult(
|
|
243
|
+
config_rule_name=config_rule_name,
|
|
244
|
+
total_resources=total_resources,
|
|
245
|
+
matching_results=matching_results,
|
|
246
|
+
accuracy_percentage=accuracy_percentage,
|
|
247
|
+
discrepancies=discrepancies,
|
|
248
|
+
validation_timestamp=datetime.now()
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
def _get_config_evaluations(self, config_rule_name: str) -> List[Dict[str, Any]]:
|
|
252
|
+
"""Get AWS Config evaluations for a specific rule.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
config_rule_name: Name of the Config rule
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
List of Config evaluation results
|
|
259
|
+
"""
|
|
260
|
+
all_evaluations = []
|
|
261
|
+
|
|
262
|
+
for region, config_client in self.config_clients.items():
|
|
263
|
+
try:
|
|
264
|
+
# Check if Config rule exists in this region
|
|
265
|
+
try:
|
|
266
|
+
config_client.describe_config_rules(ConfigRuleNames=[config_rule_name])
|
|
267
|
+
except ClientError as e:
|
|
268
|
+
if e.response['Error']['Code'] == 'NoSuchConfigRuleException':
|
|
269
|
+
logger.debug(f"Config rule {config_rule_name} not found in region {region}")
|
|
270
|
+
continue
|
|
271
|
+
raise
|
|
272
|
+
|
|
273
|
+
# Get compliance details for the rule
|
|
274
|
+
paginator = config_client.get_paginator('get_compliance_details_by_config_rule')
|
|
275
|
+
|
|
276
|
+
for page in paginator.paginate(ConfigRuleName=config_rule_name):
|
|
277
|
+
for evaluation in page.get('EvaluationResults', []):
|
|
278
|
+
evaluation_data = {
|
|
279
|
+
'ResourceId': evaluation['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId'],
|
|
280
|
+
'ResourceType': evaluation['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceType'],
|
|
281
|
+
'ComplianceType': evaluation['ComplianceType'],
|
|
282
|
+
'ConfigRuleInvokedTime': evaluation['ConfigRuleInvokedTime'],
|
|
283
|
+
'ResultRecordedTime': evaluation['ResultRecordedTime'],
|
|
284
|
+
'Annotation': evaluation.get('Annotation', ''),
|
|
285
|
+
'Region': region
|
|
286
|
+
}
|
|
287
|
+
all_evaluations.append(evaluation_data)
|
|
288
|
+
|
|
289
|
+
logger.debug(f"Retrieved {len(all_evaluations)} evaluations for {config_rule_name} in {region}")
|
|
290
|
+
|
|
291
|
+
except ClientError as e:
|
|
292
|
+
error_code = e.response.get('Error', {}).get('Code', '')
|
|
293
|
+
if error_code in ['ConfigurationNotRecordedException', 'NoSuchConfigurationRecorderException']:
|
|
294
|
+
logger.warning(f"AWS Config not enabled in region {region}")
|
|
295
|
+
elif error_code == 'AccessDenied':
|
|
296
|
+
logger.warning(f"Access denied to Config service in region {region}")
|
|
297
|
+
else:
|
|
298
|
+
logger.error(f"Error getting Config evaluations in {region}: {e}")
|
|
299
|
+
|
|
300
|
+
except Exception as e:
|
|
301
|
+
logger.error(f"Unexpected error getting Config evaluations in {region}: {e}")
|
|
302
|
+
|
|
303
|
+
return all_evaluations
|
|
304
|
+
|
|
305
|
+
def _normalize_compliance_status(self, status: Any) -> ComplianceStatus:
|
|
306
|
+
"""Normalize compliance status to our enum.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
status: Status from either our assessment or AWS Config
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Normalized ComplianceStatus
|
|
313
|
+
"""
|
|
314
|
+
if isinstance(status, ComplianceStatus):
|
|
315
|
+
return status
|
|
316
|
+
|
|
317
|
+
# Convert string status to our enum
|
|
318
|
+
status_str = str(status).upper()
|
|
319
|
+
|
|
320
|
+
if status_str in ['COMPLIANT', 'COMPLIANCE']:
|
|
321
|
+
return ComplianceStatus.COMPLIANT
|
|
322
|
+
elif status_str in ['NON_COMPLIANT', 'NONCOMPLIANT', 'NON-COMPLIANT']:
|
|
323
|
+
return ComplianceStatus.NON_COMPLIANT
|
|
324
|
+
elif status_str in ['NOT_APPLICABLE', 'NOTAPPLICABLE', 'NOT-APPLICABLE']:
|
|
325
|
+
return ComplianceStatus.NOT_APPLICABLE
|
|
326
|
+
elif status_str in ['INSUFFICIENT_DATA', 'INSUFFICIENTDATA', 'INSUFFICIENT-DATA']:
|
|
327
|
+
return ComplianceStatus.INSUFFICIENT_PERMISSIONS
|
|
328
|
+
else:
|
|
329
|
+
return ComplianceStatus.ERROR
|
|
330
|
+
|
|
331
|
+
def check_config_service_availability(self) -> Dict[str, bool]:
|
|
332
|
+
"""Check AWS Config service availability in all regions.
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Dictionary mapping regions to availability status
|
|
336
|
+
"""
|
|
337
|
+
availability = {}
|
|
338
|
+
|
|
339
|
+
for region in self.aws_factory.regions:
|
|
340
|
+
try:
|
|
341
|
+
config_client = self.aws_factory.get_client('config', region)
|
|
342
|
+
|
|
343
|
+
# Try to describe configuration recorders
|
|
344
|
+
response = config_client.describe_configuration_recorders()
|
|
345
|
+
|
|
346
|
+
# Check if Config is recording
|
|
347
|
+
recorders = response.get('ConfigurationRecorders', [])
|
|
348
|
+
is_recording = any(
|
|
349
|
+
recorder.get('recordingGroup', {}).get('allSupported', False)
|
|
350
|
+
for recorder in recorders
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
availability[region] = is_recording
|
|
354
|
+
|
|
355
|
+
if is_recording:
|
|
356
|
+
logger.debug(f"AWS Config is active and recording in region: {region}")
|
|
357
|
+
else:
|
|
358
|
+
logger.warning(f"AWS Config is not recording in region: {region}")
|
|
359
|
+
|
|
360
|
+
except ClientError as e:
|
|
361
|
+
error_code = e.response.get('Error', {}).get('Code', '')
|
|
362
|
+
if error_code in ['ConfigurationNotRecordedException', 'NoSuchConfigurationRecorderException']:
|
|
363
|
+
logger.warning(f"AWS Config not configured in region: {region}")
|
|
364
|
+
availability[region] = False
|
|
365
|
+
elif error_code == 'AccessDenied':
|
|
366
|
+
logger.warning(f"Access denied to Config service in region: {region}")
|
|
367
|
+
availability[region] = False
|
|
368
|
+
else:
|
|
369
|
+
logger.error(f"Error checking Config availability in {region}: {e}")
|
|
370
|
+
availability[region] = False
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.error(f"Unexpected error checking Config in {region}: {e}")
|
|
374
|
+
availability[region] = False
|
|
375
|
+
|
|
376
|
+
return availability
|
|
377
|
+
|
|
378
|
+
def generate_validation_report(self, summary: ValidationSummary) -> str:
|
|
379
|
+
"""Generate a validation report.
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
summary: ValidationSummary to report on
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
Validation report as string
|
|
386
|
+
"""
|
|
387
|
+
report_lines = []
|
|
388
|
+
report_lines.append("# Assessment Accuracy Validation Report")
|
|
389
|
+
report_lines.append(f"Generated: {summary.validation_timestamp.isoformat()}")
|
|
390
|
+
report_lines.append("")
|
|
391
|
+
|
|
392
|
+
# Overall summary
|
|
393
|
+
report_lines.append("## Overall Summary")
|
|
394
|
+
report_lines.append(f"- Total rules validated: {summary.total_rules_validated}")
|
|
395
|
+
report_lines.append(f"- Accurate rules: {summary.accurate_rules}")
|
|
396
|
+
report_lines.append(f"- Overall accuracy: {summary.overall_accuracy:.1f}%")
|
|
397
|
+
report_lines.append("")
|
|
398
|
+
|
|
399
|
+
# Individual rule results
|
|
400
|
+
report_lines.append("## Individual Rule Results")
|
|
401
|
+
|
|
402
|
+
for result in summary.validation_results:
|
|
403
|
+
status = "✓ ACCURATE" if result.is_accurate else "✗ INACCURATE"
|
|
404
|
+
report_lines.append(f"### {result.config_rule_name} - {status}")
|
|
405
|
+
report_lines.append(f"- Accuracy: {result.accuracy_percentage:.1f}%")
|
|
406
|
+
report_lines.append(f"- Matching results: {result.matching_results}/{result.total_resources}")
|
|
407
|
+
|
|
408
|
+
if result.discrepancies:
|
|
409
|
+
report_lines.append(f"- Discrepancies: {len(result.discrepancies)}")
|
|
410
|
+
|
|
411
|
+
# Show first few discrepancies
|
|
412
|
+
for i, discrepancy in enumerate(result.discrepancies[:5]):
|
|
413
|
+
if 'issue' in discrepancy:
|
|
414
|
+
report_lines.append(f" - {discrepancy['resource_id']}: {discrepancy['issue']}")
|
|
415
|
+
else:
|
|
416
|
+
report_lines.append(f" - {discrepancy['resource_id']}: "
|
|
417
|
+
f"Our={discrepancy['our_status']}, "
|
|
418
|
+
f"Config={discrepancy['config_status']}")
|
|
419
|
+
|
|
420
|
+
if len(result.discrepancies) > 5:
|
|
421
|
+
report_lines.append(f" - ... and {len(result.discrepancies) - 5} more")
|
|
422
|
+
|
|
423
|
+
report_lines.append("")
|
|
424
|
+
|
|
425
|
+
return "\n".join(report_lines)
|