regscale-cli 6.27.3.0__py3-none-any.whl → 6.28.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/core/app/utils/app_utils.py +11 -2
- regscale/dev/cli.py +26 -0
- regscale/dev/version.py +72 -0
- regscale/integrations/commercial/__init__.py +15 -1
- regscale/integrations/commercial/amazon/amazon/__init__.py +0 -0
- regscale/integrations/commercial/amazon/amazon/common.py +204 -0
- regscale/integrations/commercial/amazon/common.py +48 -58
- regscale/integrations/commercial/aws/audit_manager_compliance.py +2671 -0
- regscale/integrations/commercial/aws/cli.py +3093 -55
- regscale/integrations/commercial/aws/cloudtrail_control_mappings.py +333 -0
- regscale/integrations/commercial/aws/cloudtrail_evidence.py +501 -0
- regscale/integrations/commercial/aws/cloudwatch_control_mappings.py +357 -0
- regscale/integrations/commercial/aws/cloudwatch_evidence.py +490 -0
- regscale/integrations/commercial/aws/config_compliance.py +914 -0
- regscale/integrations/commercial/aws/conformance_pack_mappings.py +198 -0
- regscale/integrations/commercial/aws/evidence_generator.py +283 -0
- regscale/integrations/commercial/aws/guardduty_control_mappings.py +340 -0
- regscale/integrations/commercial/aws/guardduty_evidence.py +1053 -0
- regscale/integrations/commercial/aws/iam_control_mappings.py +368 -0
- regscale/integrations/commercial/aws/iam_evidence.py +574 -0
- regscale/integrations/commercial/aws/inventory/__init__.py +223 -22
- regscale/integrations/commercial/aws/inventory/base.py +107 -5
- regscale/integrations/commercial/aws/inventory/resources/audit_manager.py +513 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudtrail.py +315 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudtrail_logs_metadata.py +476 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudwatch.py +191 -0
- regscale/integrations/commercial/aws/inventory/resources/compute.py +66 -9
- regscale/integrations/commercial/aws/inventory/resources/config.py +464 -0
- regscale/integrations/commercial/aws/inventory/resources/containers.py +74 -9
- regscale/integrations/commercial/aws/inventory/resources/database.py +106 -31
- regscale/integrations/commercial/aws/inventory/resources/guardduty.py +286 -0
- regscale/integrations/commercial/aws/inventory/resources/iam.py +470 -0
- regscale/integrations/commercial/aws/inventory/resources/inspector.py +476 -0
- regscale/integrations/commercial/aws/inventory/resources/integration.py +175 -61
- regscale/integrations/commercial/aws/inventory/resources/kms.py +447 -0
- regscale/integrations/commercial/aws/inventory/resources/networking.py +103 -67
- regscale/integrations/commercial/aws/inventory/resources/s3.py +394 -0
- regscale/integrations/commercial/aws/inventory/resources/security.py +268 -72
- regscale/integrations/commercial/aws/inventory/resources/securityhub.py +473 -0
- regscale/integrations/commercial/aws/inventory/resources/storage.py +53 -29
- regscale/integrations/commercial/aws/inventory/resources/systems_manager.py +657 -0
- regscale/integrations/commercial/aws/inventory/resources/vpc.py +655 -0
- regscale/integrations/commercial/aws/kms_control_mappings.py +288 -0
- regscale/integrations/commercial/aws/kms_evidence.py +879 -0
- regscale/integrations/commercial/aws/ocsf/__init__.py +7 -0
- regscale/integrations/commercial/aws/ocsf/constants.py +115 -0
- regscale/integrations/commercial/aws/ocsf/mapper.py +435 -0
- regscale/integrations/commercial/aws/org_control_mappings.py +286 -0
- regscale/integrations/commercial/aws/org_evidence.py +666 -0
- regscale/integrations/commercial/aws/s3_control_mappings.py +356 -0
- regscale/integrations/commercial/aws/s3_evidence.py +632 -0
- regscale/integrations/commercial/aws/scanner.py +851 -206
- regscale/integrations/commercial/aws/security_hub.py +319 -0
- regscale/integrations/commercial/aws/session_manager.py +282 -0
- regscale/integrations/commercial/aws/ssm_control_mappings.py +291 -0
- regscale/integrations/commercial/aws/ssm_evidence.py +492 -0
- regscale/integrations/commercial/synqly/ticketing.py +27 -0
- regscale/integrations/compliance_integration.py +308 -38
- regscale/integrations/due_date_handler.py +3 -0
- regscale/integrations/scanner_integration.py +399 -84
- regscale/models/integration_models/cisa_kev_data.json +65 -5
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/synqly_models/connectors/vulnerabilities.py +17 -9
- regscale/models/regscale_models/assessment.py +2 -1
- regscale/models/regscale_models/control_objective.py +74 -5
- regscale/models/regscale_models/file.py +2 -0
- regscale/models/regscale_models/issue.py +2 -5
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/METADATA +1 -1
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/RECORD +113 -34
- tests/regscale/integrations/commercial/aws/__init__.py +0 -0
- tests/regscale/integrations/commercial/aws/test_audit_manager_compliance.py +1304 -0
- tests/regscale/integrations/commercial/aws/test_audit_manager_evidence_aggregation.py +341 -0
- tests/regscale/integrations/commercial/aws/test_aws_audit_manager_collector.py +1155 -0
- tests/regscale/integrations/commercial/aws/test_aws_cloudtrail_collector.py +534 -0
- tests/regscale/integrations/commercial/aws/test_aws_config_collector.py +400 -0
- tests/regscale/integrations/commercial/aws/test_aws_guardduty_collector.py +315 -0
- tests/regscale/integrations/commercial/aws/test_aws_iam_collector.py +458 -0
- tests/regscale/integrations/commercial/aws/test_aws_inspector_collector.py +353 -0
- tests/regscale/integrations/commercial/aws/test_aws_inventory_integration.py +530 -0
- tests/regscale/integrations/commercial/aws/test_aws_kms_collector.py +919 -0
- tests/regscale/integrations/commercial/aws/test_aws_s3_collector.py +722 -0
- tests/regscale/integrations/commercial/aws/test_aws_scanner_integration.py +722 -0
- tests/regscale/integrations/commercial/aws/test_aws_securityhub_collector.py +792 -0
- tests/regscale/integrations/commercial/aws/test_aws_systems_manager_collector.py +918 -0
- tests/regscale/integrations/commercial/aws/test_aws_vpc_collector.py +996 -0
- tests/regscale/integrations/commercial/aws/test_cli_evidence.py +431 -0
- tests/regscale/integrations/commercial/aws/test_cloudtrail_control_mappings.py +452 -0
- tests/regscale/integrations/commercial/aws/test_cloudtrail_evidence.py +788 -0
- tests/regscale/integrations/commercial/aws/test_config_compliance.py +298 -0
- tests/regscale/integrations/commercial/aws/test_conformance_pack_mappings.py +200 -0
- tests/regscale/integrations/commercial/aws/test_evidence_generator.py +386 -0
- tests/regscale/integrations/commercial/aws/test_guardduty_control_mappings.py +564 -0
- tests/regscale/integrations/commercial/aws/test_guardduty_evidence.py +1041 -0
- tests/regscale/integrations/commercial/aws/test_iam_control_mappings.py +718 -0
- tests/regscale/integrations/commercial/aws/test_iam_evidence.py +1375 -0
- tests/regscale/integrations/commercial/aws/test_kms_control_mappings.py +656 -0
- tests/regscale/integrations/commercial/aws/test_kms_evidence.py +1163 -0
- tests/regscale/integrations/commercial/aws/test_ocsf_mapper.py +370 -0
- tests/regscale/integrations/commercial/aws/test_org_control_mappings.py +546 -0
- tests/regscale/integrations/commercial/aws/test_org_evidence.py +1240 -0
- tests/regscale/integrations/commercial/aws/test_s3_control_mappings.py +672 -0
- tests/regscale/integrations/commercial/aws/test_s3_evidence.py +987 -0
- tests/regscale/integrations/commercial/aws/test_scanner_evidence.py +373 -0
- tests/regscale/integrations/commercial/aws/test_security_hub_config_filtering.py +539 -0
- tests/regscale/integrations/commercial/aws/test_session_manager.py +516 -0
- tests/regscale/integrations/commercial/aws/test_ssm_control_mappings.py +588 -0
- tests/regscale/integrations/commercial/aws/test_ssm_evidence.py +735 -0
- tests/regscale/integrations/commercial/test_aws.py +55 -56
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,2671 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""AWS Audit Manager Compliance Integration for RegScale CLI."""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
import boto3
|
|
13
|
+
from botocore.exceptions import ClientError
|
|
14
|
+
|
|
15
|
+
from regscale.core.app.utils.app_utils import create_progress_object, get_current_datetime
|
|
16
|
+
from regscale.integrations.compliance_integration import ComplianceIntegration, ComplianceItem
|
|
17
|
+
from regscale.models import regscale_models
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("regscale")
|
|
20
|
+
|
|
21
|
+
# Constants for file paths and cache TTL
|
|
22
|
+
AUDIT_MANAGER_CACHE_FILE = os.path.join("artifacts", "aws", "audit_manager_assessments.json")
|
|
23
|
+
CACHE_TTL_SECONDS = 4 * 60 * 60 # 4 hours in seconds
|
|
24
|
+
|
|
25
|
+
# AWS Audit Manager IAM permission constants
|
|
26
|
+
IAM_PERMISSION_LIST_ASSESSMENTS = "auditmanager:ListAssessments"
|
|
27
|
+
IAM_PERMISSION_GET_ASSESSMENT = "auditmanager:GetAssessment"
|
|
28
|
+
IAM_PERMISSION_GET_EVIDENCE_FOLDERS = "auditmanager:GetEvidenceFoldersByAssessmentControl"
|
|
29
|
+
|
|
30
|
+
# HTML tag constants to avoid duplication
|
|
31
|
+
HTML_STRONG_OPEN = "<strong>"
|
|
32
|
+
HTML_STRONG_CLOSE = "</strong>"
|
|
33
|
+
HTML_P_OPEN = "<p>"
|
|
34
|
+
HTML_P_CLOSE = "</p>"
|
|
35
|
+
HTML_UL_OPEN = "<ul>"
|
|
36
|
+
HTML_UL_CLOSE = "</ul>"
|
|
37
|
+
HTML_LI_OPEN = "<li>"
|
|
38
|
+
HTML_LI_CLOSE = "</li>"
|
|
39
|
+
HTML_H4_OPEN = "<h4>"
|
|
40
|
+
HTML_H4_CLOSE = "</h4>"
|
|
41
|
+
HTML_BR = "<br>"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class AWSAuditManagerComplianceItem(ComplianceItem):
|
|
45
|
+
"""
|
|
46
|
+
Compliance item from AWS Audit Manager assessment.
|
|
47
|
+
|
|
48
|
+
IMPORTANT: Evidence-Based Compliance Determination
|
|
49
|
+
---------------------------------------------------
|
|
50
|
+
This integration uses evidence items to determine control compliance status:
|
|
51
|
+
|
|
52
|
+
1. Control 'status' field (REVIEWED/UNDER_REVIEW/INACTIVE) is workflow tracking only
|
|
53
|
+
2. Actual compliance is determined by aggregating evidence items' complianceCheck fields
|
|
54
|
+
3. Evidence complianceCheck values: "COMPLIANT", "FAILED", "NOT_APPLICABLE", or None/missing
|
|
55
|
+
|
|
56
|
+
Aggregation Logic:
|
|
57
|
+
- ANY evidence with "FAILED" → Control FAILS
|
|
58
|
+
- ALL evidence with "COMPLIANT" → Control PASSES
|
|
59
|
+
- NOT_APPLICABLE evidence is tracked separately and doesn't affect compliance
|
|
60
|
+
- No evidence or only inconclusive/not applicable evidence → Returns None (control not updated)
|
|
61
|
+
|
|
62
|
+
The None return value signals the integration framework to skip updating the control
|
|
63
|
+
status, preventing false positive/negative results when evidence is unavailable.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self, assessment_data: Dict[str, Any], control_data: Dict[str, Any], evidence_items: Optional[List[Dict]] = None
|
|
68
|
+
):
|
|
69
|
+
"""
|
|
70
|
+
Initialize from AWS Audit Manager assessment and control data.
|
|
71
|
+
|
|
72
|
+
:param Dict[str, Any] assessment_data: Assessment metadata
|
|
73
|
+
:param Dict[str, Any] control_data: Control assessment result
|
|
74
|
+
:param Optional[List[Dict]] evidence_items: Evidence items with complianceCheck fields.
|
|
75
|
+
REQUIRED for accurate compliance determination.
|
|
76
|
+
Without evidence, control status will not be updated.
|
|
77
|
+
"""
|
|
78
|
+
self.assessment_data = assessment_data
|
|
79
|
+
self.control_data = control_data
|
|
80
|
+
self.evidence_items = evidence_items or []
|
|
81
|
+
|
|
82
|
+
# Extract assessment metadata
|
|
83
|
+
self.assessment_name = assessment_data.get("name", "")
|
|
84
|
+
self.assessment_id = assessment_data.get("arn", "")
|
|
85
|
+
self.framework_name = assessment_data.get("framework", {}).get("metadata", {}).get("name", "")
|
|
86
|
+
self.framework_type = assessment_data.get("framework", {}).get("type", "")
|
|
87
|
+
self.compliance_type = assessment_data.get("complianceType", "")
|
|
88
|
+
self.aws_account = assessment_data.get("awsAccount", {})
|
|
89
|
+
|
|
90
|
+
# Extract control metadata
|
|
91
|
+
# AWS Audit Manager embeds the control ID in the 'name' field
|
|
92
|
+
# Format: "AC-2 - Control Name" or "AC-2(1) - Control Name with Enhancement"
|
|
93
|
+
control_name = control_data.get("name", "")
|
|
94
|
+
self._control_name = control_name
|
|
95
|
+
|
|
96
|
+
# Extract control ID from name field (before the hyphen)
|
|
97
|
+
# Example: "AC-2 - Access Control" -> "AC-2"
|
|
98
|
+
self._control_id = self._extract_control_id_from_name(control_name)
|
|
99
|
+
|
|
100
|
+
self.control_status = control_data.get("status", "UNDER_REVIEW")
|
|
101
|
+
self.control_response = control_data.get("response", "")
|
|
102
|
+
self.control_comments = control_data.get("comments", [])
|
|
103
|
+
|
|
104
|
+
# Log extracted control ID for debugging
|
|
105
|
+
logger.debug(f"Extracted control ID: '{self._control_id}' from name: '{control_name}'")
|
|
106
|
+
|
|
107
|
+
# Extract evidence counts
|
|
108
|
+
self.evidence_count = control_data.get("evidenceCount", 0)
|
|
109
|
+
self.assessment_report_evidence_count = control_data.get("assessmentReportEvidenceCount", 0)
|
|
110
|
+
|
|
111
|
+
# Extract remediation and testing guidance
|
|
112
|
+
self.action_plan_title = control_data.get("actionPlanTitle", "")
|
|
113
|
+
self.action_plan_instructions = control_data.get("actionPlanInstructions", "")
|
|
114
|
+
self.testing_information = control_data.get("testingInformation", "")
|
|
115
|
+
|
|
116
|
+
# Resource information (from evidence sources)
|
|
117
|
+
self._resource_id = None
|
|
118
|
+
self._resource_name = None
|
|
119
|
+
self._severity = "MEDIUM"
|
|
120
|
+
|
|
121
|
+
# Cache for aggregated compliance result
|
|
122
|
+
self._aggregated_compliance_result = None
|
|
123
|
+
|
|
124
|
+
@property
|
|
125
|
+
def resource_id(self) -> str:
|
|
126
|
+
"""Unique identifier for the resource being assessed."""
|
|
127
|
+
if self._resource_id:
|
|
128
|
+
return self._resource_id
|
|
129
|
+
return self.aws_account.get("id", "")
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def resource_name(self) -> str:
|
|
133
|
+
"""Human-readable name of the resource."""
|
|
134
|
+
if self._resource_name:
|
|
135
|
+
return self._resource_name
|
|
136
|
+
account_name = self.aws_account.get("name", "")
|
|
137
|
+
account_id = self.aws_account.get("id", "")
|
|
138
|
+
if account_name:
|
|
139
|
+
return f"{account_name} ({account_id})"
|
|
140
|
+
return account_id
|
|
141
|
+
|
|
142
|
+
def _try_extract_with_pattern(self, control_name: str, pattern: str) -> Optional[str]:
|
|
143
|
+
"""
|
|
144
|
+
Try to extract control ID using a specific regex pattern.
|
|
145
|
+
|
|
146
|
+
:param str control_name: Full control name from AWS
|
|
147
|
+
:param str pattern: Regex pattern to match
|
|
148
|
+
:return: Extracted control ID or None
|
|
149
|
+
:rtype: Optional[str]
|
|
150
|
+
"""
|
|
151
|
+
import re
|
|
152
|
+
|
|
153
|
+
match = re.match(pattern, control_name)
|
|
154
|
+
return match.group(1).strip() if match else None
|
|
155
|
+
|
|
156
|
+
def _extract_control_id_from_name(self, control_name: str) -> str:
|
|
157
|
+
"""
|
|
158
|
+
Extract control ID from AWS Audit Manager control name.
|
|
159
|
+
|
|
160
|
+
Supports multiple control ID formats:
|
|
161
|
+
- NIST (colon): "AC-2: Access Control (NIST-SP-800-53-r5)", "AC-2(1): Enhancement (NIST-SP-800-53-r5)"
|
|
162
|
+
- NIST (hyphen): "AC-2 - Access Control", "AC-2(1) - Access Control Enhancement"
|
|
163
|
+
- SOC 2: "CC1.1 COSO Principle 1...", "PI1.5 The entity implements..."
|
|
164
|
+
- CIS: "1.1 Ensure...", "1.1.1 Ensure..."
|
|
165
|
+
- ISO: "A.5.1 Policies for...", "A.5.1.1 Policies..."
|
|
166
|
+
|
|
167
|
+
:param str control_name: Full control name from AWS
|
|
168
|
+
:return: Extracted control ID
|
|
169
|
+
:rtype: str
|
|
170
|
+
"""
|
|
171
|
+
if not control_name:
|
|
172
|
+
return ""
|
|
173
|
+
|
|
174
|
+
# Define patterns in order of specificity
|
|
175
|
+
patterns = [
|
|
176
|
+
r"^([A-Z]{2,3}-\d+(?:\(\d+\))?):\s*", # NIST with colon
|
|
177
|
+
r"^([A-Z]{2,3}-\d+(?:\(\d+\))?)\s*-\s*", # NIST with hyphen
|
|
178
|
+
r"^([A-Z]{1,3}\d+\.\d+)\s+", # SOC 2
|
|
179
|
+
r"^(\d+(?:\.\d+){1,3})\s+", # CIS
|
|
180
|
+
r"^([A-Z]\.\d+(?:\.\d+){1,2})\s+", # ISO
|
|
181
|
+
r"^([A-Z]+\d+(?:\.\d+)*)\s+", # Generic alphanumeric with dots
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
for pattern in patterns:
|
|
185
|
+
result = self._try_extract_with_pattern(control_name, pattern)
|
|
186
|
+
if result:
|
|
187
|
+
return result
|
|
188
|
+
|
|
189
|
+
logger.warning(f"Could not extract control ID from name: '{control_name}'")
|
|
190
|
+
return ""
|
|
191
|
+
|
|
192
|
+
@property
|
|
193
|
+
def control_id(self) -> str:
|
|
194
|
+
"""Control identifier (e.g., AC-3, SI-2)."""
|
|
195
|
+
# The control ID is already in the correct format from the name field
|
|
196
|
+
# Just return it directly
|
|
197
|
+
return self._control_id if self._control_id else ""
|
|
198
|
+
|
|
199
|
+
def _get_evidence_compliance(self, evidence: Dict[str, Any]) -> Optional[str]:
|
|
200
|
+
"""
|
|
201
|
+
Extract compliance check result from a single evidence item.
|
|
202
|
+
|
|
203
|
+
Checks both root-level and resource-level complianceCheck fields.
|
|
204
|
+
|
|
205
|
+
:param Dict[str, Any] evidence: Evidence item
|
|
206
|
+
:return: "COMPLIANT", "FAILED", "NOT_APPLICABLE", or None
|
|
207
|
+
:rtype: Optional[str]
|
|
208
|
+
"""
|
|
209
|
+
# Check root-level complianceCheck first
|
|
210
|
+
compliance_check = evidence.get("complianceCheck")
|
|
211
|
+
|
|
212
|
+
# If no root-level check, look in resourcesIncluded
|
|
213
|
+
if compliance_check is None:
|
|
214
|
+
resources_included = evidence.get("resourcesIncluded", [])
|
|
215
|
+
if resources_included:
|
|
216
|
+
# Check all resources - if ANY resource failed, evidence is failed
|
|
217
|
+
resource_checks = [r.get("complianceCheck") for r in resources_included]
|
|
218
|
+
if "FAILED" in resource_checks:
|
|
219
|
+
compliance_check = "FAILED"
|
|
220
|
+
elif any(check == "COMPLIANT" for check in resource_checks):
|
|
221
|
+
compliance_check = "COMPLIANT"
|
|
222
|
+
elif any(check == "NOT_APPLICABLE" for check in resource_checks):
|
|
223
|
+
compliance_check = "NOT_APPLICABLE"
|
|
224
|
+
|
|
225
|
+
return compliance_check
|
|
226
|
+
|
|
227
|
+
def _log_inconclusive_status(self, not_applicable_count: int, inconclusive_count: int, total_evidence: int) -> None:
|
|
228
|
+
"""Log inconclusive status with appropriate message."""
|
|
229
|
+
if not_applicable_count > 0 and inconclusive_count == 0:
|
|
230
|
+
logger.info(
|
|
231
|
+
f"Control {self.control_id}: All {not_applicable_count} evidence item(s) marked as NOT_APPLICABLE. "
|
|
232
|
+
"Control status will not be updated."
|
|
233
|
+
)
|
|
234
|
+
elif not_applicable_count > 0:
|
|
235
|
+
logger.info(
|
|
236
|
+
f"Control {self.control_id}: {not_applicable_count} NOT_APPLICABLE, "
|
|
237
|
+
f"{inconclusive_count} inconclusive evidence item(s). Control status will not be updated."
|
|
238
|
+
)
|
|
239
|
+
else:
|
|
240
|
+
logger.info(
|
|
241
|
+
f"Control {self.control_id}: No compliance checks available in {total_evidence} evidence item(s). "
|
|
242
|
+
"Control status will not be updated."
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
def _aggregate_evidence_compliance(self) -> Optional[str]:
|
|
246
|
+
"""
|
|
247
|
+
Aggregate evidence complianceCheck fields to determine overall control compliance.
|
|
248
|
+
|
|
249
|
+
AWS Audit Manager evidence items contain a complianceCheck field with values:
|
|
250
|
+
- "COMPLIANT": Evidence shows resource is compliant
|
|
251
|
+
- "FAILED": Evidence shows resource is non-compliant
|
|
252
|
+
- "NOT_APPLICABLE": Evidence is not applicable to this control
|
|
253
|
+
- null/None: No compliance check available for this evidence
|
|
254
|
+
|
|
255
|
+
Evidence can have compliance checks in TWO locations:
|
|
256
|
+
1. Root level: evidence["complianceCheck"]
|
|
257
|
+
2. Resource level: evidence["resourcesIncluded"][*]["complianceCheck"]
|
|
258
|
+
|
|
259
|
+
This method checks BOTH locations to ensure accurate compliance determination.
|
|
260
|
+
|
|
261
|
+
Aggregation Logic:
|
|
262
|
+
1. If ANY evidence shows "FAILED" → Control FAILS
|
|
263
|
+
2. If ALL evidence shows "COMPLIANT" → Control PASSES
|
|
264
|
+
3. NOT_APPLICABLE evidence is tracked separately and doesn't affect compliance
|
|
265
|
+
4. If NO compliance checks available (all null/NOT_APPLICABLE) → INCONCLUSIVE
|
|
266
|
+
5. If mixed (some COMPLIANT, some null, no FAILED) → PASS with warning
|
|
267
|
+
|
|
268
|
+
:return: "PASS", "FAIL", or None (if inconclusive/no evidence)
|
|
269
|
+
:rtype: Optional[str]
|
|
270
|
+
"""
|
|
271
|
+
if not self.evidence_items:
|
|
272
|
+
logger.debug(f"Control {self.control_id}: No evidence items available for aggregation")
|
|
273
|
+
return None
|
|
274
|
+
|
|
275
|
+
compliant_count = 0
|
|
276
|
+
failed_count = 0
|
|
277
|
+
inconclusive_count = 0
|
|
278
|
+
not_applicable_count = 0
|
|
279
|
+
|
|
280
|
+
for evidence in self.evidence_items:
|
|
281
|
+
compliance_check = self._get_evidence_compliance(evidence)
|
|
282
|
+
|
|
283
|
+
if compliance_check == "FAILED":
|
|
284
|
+
failed_count += 1
|
|
285
|
+
elif compliance_check == "COMPLIANT":
|
|
286
|
+
compliant_count += 1
|
|
287
|
+
elif compliance_check == "NOT_APPLICABLE":
|
|
288
|
+
not_applicable_count += 1
|
|
289
|
+
else:
|
|
290
|
+
inconclusive_count += 1
|
|
291
|
+
|
|
292
|
+
total_evidence = len(self.evidence_items)
|
|
293
|
+
|
|
294
|
+
logger.debug(
|
|
295
|
+
f"Control {self.control_id} evidence summary: "
|
|
296
|
+
f"{failed_count} FAILED, {compliant_count} COMPLIANT, "
|
|
297
|
+
f"{not_applicable_count} NOT_APPLICABLE, {inconclusive_count} inconclusive out of {total_evidence} total"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
# If ANY evidence failed, the control fails
|
|
301
|
+
if failed_count > 0:
|
|
302
|
+
logger.info(
|
|
303
|
+
f"Control {self.control_id} FAILS: {failed_count} failed evidence item(s) out of {total_evidence}"
|
|
304
|
+
)
|
|
305
|
+
return "FAIL"
|
|
306
|
+
|
|
307
|
+
# If we have compliant evidence and no failures, control passes
|
|
308
|
+
if compliant_count > 0:
|
|
309
|
+
if inconclusive_count > 0 or not_applicable_count > 0:
|
|
310
|
+
logger.info(
|
|
311
|
+
f"Control {self.control_id} PASSES: {compliant_count} compliant, "
|
|
312
|
+
f"{not_applicable_count} not applicable, {inconclusive_count} inconclusive (no failures)"
|
|
313
|
+
)
|
|
314
|
+
else:
|
|
315
|
+
logger.info(f"Control {self.control_id} PASSES: All {compliant_count} evidence items compliant")
|
|
316
|
+
return "PASS"
|
|
317
|
+
|
|
318
|
+
# If all evidence is not applicable or inconclusive, we cannot determine status
|
|
319
|
+
self._log_inconclusive_status(not_applicable_count, inconclusive_count, total_evidence)
|
|
320
|
+
return None
|
|
321
|
+
|
|
322
|
+
@property
|
|
323
|
+
def compliance_result(self) -> Optional[str]:
|
|
324
|
+
"""
|
|
325
|
+
Result of compliance check (PASS, FAIL, etc).
|
|
326
|
+
|
|
327
|
+
IMPORTANT: AWS Audit Manager control 'status' (REVIEWED/UNDER_REVIEW/INACTIVE) is a
|
|
328
|
+
WORKFLOW STATUS, not a compliance result. The actual compliance determination requires
|
|
329
|
+
analyzing the individual evidence items' 'complianceCheck' fields.
|
|
330
|
+
|
|
331
|
+
This property aggregates evidence to determine actual compliance:
|
|
332
|
+
1. Collects all evidence items' complianceCheck fields (COMPLIANT/FAILED)
|
|
333
|
+
2. Determines overall control compliance (if ANY evidence FAILED -> control FAILS)
|
|
334
|
+
3. Returns PASS if all evidence is compliant, FAIL if any failures
|
|
335
|
+
|
|
336
|
+
If no evidence is available, returns None. The control status should NOT be updated
|
|
337
|
+
when evidence is unavailable - this signals the integration to skip the control.
|
|
338
|
+
|
|
339
|
+
:return: "PASS", "FAIL", or None (if no evidence available)
|
|
340
|
+
:rtype: Optional[str]
|
|
341
|
+
"""
|
|
342
|
+
# Use cached result if available (including None)
|
|
343
|
+
if self._aggregated_compliance_result is not None or hasattr(self, "_result_was_cached"):
|
|
344
|
+
return self._aggregated_compliance_result
|
|
345
|
+
|
|
346
|
+
# Aggregate evidence compliance checks
|
|
347
|
+
result = self._aggregate_evidence_compliance()
|
|
348
|
+
|
|
349
|
+
if result is None:
|
|
350
|
+
# No evidence or no compliance checks available
|
|
351
|
+
# Return None to signal that control should not be updated
|
|
352
|
+
logger.info(
|
|
353
|
+
f"Control {self.control_id}: No evidence available for compliance determination. "
|
|
354
|
+
f"Control status will not be updated. Evidence items: {len(self.evidence_items)}, "
|
|
355
|
+
f"Metadata evidence count: {self.evidence_count}"
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Cache the result (including None)
|
|
359
|
+
self._aggregated_compliance_result = result
|
|
360
|
+
self._result_was_cached = True
|
|
361
|
+
return result
|
|
362
|
+
|
|
363
|
+
@property
|
|
364
|
+
def severity(self) -> Optional[str]:
|
|
365
|
+
"""Severity level of the compliance violation (if failed)."""
|
|
366
|
+
return self._severity if self.compliance_result == "FAIL" else None
|
|
367
|
+
|
|
368
|
+
def _add_compliance_assessment_section(self, desc_parts: list) -> None:
|
|
369
|
+
"""Add compliance assessment section to description."""
|
|
370
|
+
compliance_result = self.compliance_result
|
|
371
|
+
compliant_count = sum(1 for e in self.evidence_items if self._get_evidence_compliance(e) == "COMPLIANT")
|
|
372
|
+
failed_count = sum(1 for e in self.evidence_items if self._get_evidence_compliance(e) == "FAILED")
|
|
373
|
+
inconclusive_count = len(self.evidence_items) - compliant_count - failed_count
|
|
374
|
+
|
|
375
|
+
desc_parts.append(f"{HTML_H4_OPEN}Compliance Assessment{HTML_H4_CLOSE}")
|
|
376
|
+
desc_parts.append(HTML_P_OPEN)
|
|
377
|
+
|
|
378
|
+
if compliance_result == "FAIL":
|
|
379
|
+
desc_parts.append(
|
|
380
|
+
f"<span style='color: red;'>{HTML_STRONG_OPEN}Result: FAILED{HTML_STRONG_CLOSE}</span>{HTML_BR}"
|
|
381
|
+
)
|
|
382
|
+
desc_parts.append(
|
|
383
|
+
f"This control has {HTML_STRONG_OPEN}{failed_count} failed evidence item(s){HTML_STRONG_CLOSE} "
|
|
384
|
+
f"out of {len(self.evidence_items)} total.{HTML_BR}"
|
|
385
|
+
)
|
|
386
|
+
if compliant_count > 0:
|
|
387
|
+
desc_parts.append(f"{compliant_count} evidence item(s) are compliant. ")
|
|
388
|
+
if inconclusive_count > 0:
|
|
389
|
+
desc_parts.append(f"{inconclusive_count} evidence item(s) have no compliance check available.")
|
|
390
|
+
elif compliance_result == "PASS":
|
|
391
|
+
desc_parts.append(
|
|
392
|
+
f"<span style='color: green;'>{HTML_STRONG_OPEN}Result: PASSED{HTML_STRONG_CLOSE}</span>{HTML_BR}"
|
|
393
|
+
)
|
|
394
|
+
desc_parts.append(f"All {compliant_count} evidence item(s) with compliance checks are compliant.")
|
|
395
|
+
if inconclusive_count > 0:
|
|
396
|
+
desc_parts.append(f" ({inconclusive_count} evidence item(s) have no compliance check available)")
|
|
397
|
+
else:
|
|
398
|
+
desc_parts.append(f"{HTML_STRONG_OPEN}Result: INCONCLUSIVE{HTML_STRONG_CLOSE}{HTML_BR}")
|
|
399
|
+
desc_parts.append(
|
|
400
|
+
f"No compliance checks are available for the {len(self.evidence_items)} evidence item(s) collected."
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
desc_parts.append(HTML_P_CLOSE)
|
|
404
|
+
|
|
405
|
+
def _add_remediation_section(self, desc_parts: list) -> None:
|
|
406
|
+
"""Add remediation section to description."""
|
|
407
|
+
if not (self.action_plan_title or self.action_plan_instructions):
|
|
408
|
+
return
|
|
409
|
+
|
|
410
|
+
desc_parts.append(f"{HTML_H4_OPEN}Remediation{HTML_H4_CLOSE}")
|
|
411
|
+
if self.action_plan_title:
|
|
412
|
+
desc_parts.append(
|
|
413
|
+
f"{HTML_P_OPEN}{HTML_STRONG_OPEN}Action Plan:{HTML_STRONG_CLOSE} {self.action_plan_title}"
|
|
414
|
+
f"{HTML_P_CLOSE}"
|
|
415
|
+
)
|
|
416
|
+
if self.action_plan_instructions:
|
|
417
|
+
desc_parts.append(
|
|
418
|
+
f"{HTML_P_OPEN}{HTML_STRONG_OPEN}Remediation Steps:{HTML_STRONG_CLOSE}{HTML_BR}"
|
|
419
|
+
f"{self.action_plan_instructions}{HTML_P_CLOSE}"
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
def _add_comments_section(self, desc_parts: list) -> None:
|
|
423
|
+
"""Add assessor comments section to description."""
|
|
424
|
+
if not self.control_comments:
|
|
425
|
+
return
|
|
426
|
+
|
|
427
|
+
desc_parts.append(f"{HTML_H4_OPEN}Assessor Comments{HTML_H4_CLOSE}")
|
|
428
|
+
desc_parts.append(HTML_UL_OPEN)
|
|
429
|
+
for comment in self.control_comments[:5]: # Show up to 5 comments
|
|
430
|
+
author = comment.get("authorName", "Unknown")
|
|
431
|
+
posted_date = comment.get("postedDate", "")
|
|
432
|
+
comment_body = comment.get("commentBody", "")
|
|
433
|
+
desc_parts.append(
|
|
434
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{author}{HTML_STRONG_CLOSE} ({posted_date}): {comment_body} {HTML_LI_CLOSE}"
|
|
435
|
+
)
|
|
436
|
+
desc_parts.append(HTML_UL_CLOSE)
|
|
437
|
+
|
|
438
|
+
@property
|
|
439
|
+
def description(self) -> str:
|
|
440
|
+
"""Description of the compliance check using HTML formatting."""
|
|
441
|
+
desc_parts = [
|
|
442
|
+
f"<h3>AWS Audit Manager assessment for control {self.control_id}</h3>",
|
|
443
|
+
HTML_P_OPEN,
|
|
444
|
+
f"{HTML_STRONG_OPEN}Control:{HTML_STRONG_CLOSE} {self._control_name}{HTML_BR}",
|
|
445
|
+
f"{HTML_STRONG_OPEN}Framework:{HTML_STRONG_CLOSE} {self.framework_name}{HTML_BR}",
|
|
446
|
+
f"{HTML_STRONG_OPEN}Assessment:{HTML_STRONG_CLOSE} {self.assessment_name}{HTML_BR}",
|
|
447
|
+
f"{HTML_STRONG_OPEN}Status:{HTML_STRONG_CLOSE} {self.control_status}{HTML_BR}",
|
|
448
|
+
f"{HTML_STRONG_OPEN}Evidence Count:{HTML_STRONG_CLOSE} {self.evidence_count}",
|
|
449
|
+
HTML_P_CLOSE,
|
|
450
|
+
]
|
|
451
|
+
|
|
452
|
+
if self.control_response:
|
|
453
|
+
desc_parts.extend(
|
|
454
|
+
[HTML_P_OPEN, f"{HTML_STRONG_OPEN}Response:{HTML_STRONG_CLOSE} {self.control_response}", HTML_P_CLOSE]
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
# Add compliance result analysis if evidence is available
|
|
458
|
+
if self.evidence_items:
|
|
459
|
+
self._add_compliance_assessment_section(desc_parts)
|
|
460
|
+
|
|
461
|
+
# Add remediation guidance if available
|
|
462
|
+
self._add_remediation_section(desc_parts)
|
|
463
|
+
|
|
464
|
+
# Add testing information if available
|
|
465
|
+
if self.testing_information:
|
|
466
|
+
desc_parts.extend(
|
|
467
|
+
[
|
|
468
|
+
f"{HTML_H4_OPEN}Testing Guidance{HTML_H4_CLOSE}",
|
|
469
|
+
f"{HTML_P_OPEN}{self.testing_information}{HTML_P_CLOSE}",
|
|
470
|
+
]
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# Add comments from assessors
|
|
474
|
+
self._add_comments_section(desc_parts)
|
|
475
|
+
|
|
476
|
+
return "\n".join(desc_parts)
|
|
477
|
+
|
|
478
|
+
@property
|
|
479
|
+
def framework(self) -> str:
|
|
480
|
+
"""Compliance framework (e.g., NIST800-53R5, CSF)."""
|
|
481
|
+
framework_mappings = {
|
|
482
|
+
"NIST SP 800-53 Revision 5": "NIST800-53R5",
|
|
483
|
+
"NIST SP 800-53 Rev 5": "NIST800-53R5",
|
|
484
|
+
"NIST 800-53 R5": "NIST800-53R5",
|
|
485
|
+
"NIST 800-53 Revision 5": "NIST800-53R5",
|
|
486
|
+
"SOC2": "SOC2",
|
|
487
|
+
"PCI DSS": "PCI DSS",
|
|
488
|
+
"HIPAA": "HIPAA",
|
|
489
|
+
"GDPR": "GDPR",
|
|
490
|
+
}
|
|
491
|
+
if not self.framework_name:
|
|
492
|
+
return "NIST800-53R5"
|
|
493
|
+
for key, value in framework_mappings.items():
|
|
494
|
+
if key.lower() in self.framework_name.lower():
|
|
495
|
+
return value
|
|
496
|
+
# Return framework name directly for custom frameworks
|
|
497
|
+
# Check framework_type first (STANDARD vs CUSTOM)
|
|
498
|
+
if self.framework_type == "CUSTOM":
|
|
499
|
+
return self.framework_name
|
|
500
|
+
# For unknown standard frameworks, return the name as-is
|
|
501
|
+
return self.framework_name
|
|
502
|
+
|
|
503
|
+
def _format_control_parts(self, prefix: str, base_num: str, enhancement: Optional[str] = None) -> str:
|
|
504
|
+
"""
|
|
505
|
+
Format control ID parts into standard RegScale format.
|
|
506
|
+
|
|
507
|
+
:param str prefix: Control prefix (e.g., AC, SI)
|
|
508
|
+
:param str base_num: Base number (e.g., 2, 3)
|
|
509
|
+
:param Optional[str] enhancement: Enhancement number (e.g., 1, 4)
|
|
510
|
+
:return: Formatted control ID
|
|
511
|
+
:rtype: str
|
|
512
|
+
"""
|
|
513
|
+
# Remove leading zeros
|
|
514
|
+
base = str(int(base_num))
|
|
515
|
+
if enhancement:
|
|
516
|
+
enh = str(int(enhancement))
|
|
517
|
+
return f"{prefix}-{base}({enh})"
|
|
518
|
+
return f"{prefix}-{base}"
|
|
519
|
+
|
|
520
|
+
def _try_parse_dot_notation(self, control_id: str) -> Optional[str]:
|
|
521
|
+
"""
|
|
522
|
+
Try to parse dot notation format: AC.2.1 or AC.2.
|
|
523
|
+
|
|
524
|
+
:param str control_id: Control ID to parse
|
|
525
|
+
:return: Normalized control ID or None
|
|
526
|
+
:rtype: Optional[str]
|
|
527
|
+
"""
|
|
528
|
+
import re
|
|
529
|
+
|
|
530
|
+
dot_pattern = r"^([A-Z]{2,3})\.(\d+)(?:\.(\d+))?$"
|
|
531
|
+
match = re.match(dot_pattern, control_id)
|
|
532
|
+
if match:
|
|
533
|
+
return self._format_control_parts(match.group(1), match.group(2), match.group(3))
|
|
534
|
+
return None
|
|
535
|
+
|
|
536
|
+
def _try_parse_standard_format(self, control_id: str) -> Optional[str]:
|
|
537
|
+
"""
|
|
538
|
+
Try to parse standard format: AC-2(1), AC-2 (1), AC-2-1, AC-2.1.
|
|
539
|
+
|
|
540
|
+
:param str control_id: Control ID to parse
|
|
541
|
+
:return: Normalized control ID or None
|
|
542
|
+
:rtype: Optional[str]
|
|
543
|
+
"""
|
|
544
|
+
import re
|
|
545
|
+
|
|
546
|
+
pattern = r"^([A-Z]{2,3})-(\d+)(?:[\s\-\.](\d+)|\s?\((\d+)\))?$"
|
|
547
|
+
match = re.match(pattern, control_id)
|
|
548
|
+
if match:
|
|
549
|
+
enhancement = match.group(3) or match.group(4)
|
|
550
|
+
return self._format_control_parts(match.group(1), match.group(2), enhancement)
|
|
551
|
+
return None
|
|
552
|
+
|
|
553
|
+
def _try_parse_hyphen_split(self, control_id: str) -> Optional[str]:
|
|
554
|
+
"""
|
|
555
|
+
Try to parse by splitting on hyphens.
|
|
556
|
+
|
|
557
|
+
:param str control_id: Control ID to parse
|
|
558
|
+
:return: Normalized control ID or None
|
|
559
|
+
:rtype: Optional[str]
|
|
560
|
+
"""
|
|
561
|
+
if "-" not in control_id:
|
|
562
|
+
return None
|
|
563
|
+
|
|
564
|
+
parts = control_id.split("-")
|
|
565
|
+
if len(parts) < 2:
|
|
566
|
+
return None
|
|
567
|
+
|
|
568
|
+
try:
|
|
569
|
+
enhancement = parts[2] if len(parts) > 2 else None
|
|
570
|
+
return self._format_control_parts(parts[0], parts[1], enhancement)
|
|
571
|
+
except (ValueError, IndexError):
|
|
572
|
+
return None
|
|
573
|
+
|
|
574
|
+
def _normalize_control_id(self, control_id: str) -> str:
|
|
575
|
+
"""
|
|
576
|
+
Normalize control ID to remove leading zeros and standardize format to match RegScale.
|
|
577
|
+
|
|
578
|
+
Handles various AWS Audit Manager formats:
|
|
579
|
+
- AC-2, AC-02
|
|
580
|
+
- AC-2(1), AC-02(04)
|
|
581
|
+
- AC-2 (1), AC-02 (04)
|
|
582
|
+
- AC-2-1, AC-02-04
|
|
583
|
+
- AC-2.1, AC-02.04, AC.2.1 (dot notation)
|
|
584
|
+
|
|
585
|
+
Returns format: AC-2 or AC-2(1) to match RegScale control IDs
|
|
586
|
+
|
|
587
|
+
:param str control_id: Raw control ID
|
|
588
|
+
:return: Normalized control ID in RegScale format
|
|
589
|
+
:rtype: str
|
|
590
|
+
"""
|
|
591
|
+
if not control_id:
|
|
592
|
+
return ""
|
|
593
|
+
|
|
594
|
+
control_id = control_id.strip().upper()
|
|
595
|
+
|
|
596
|
+
# Try parsing strategies in order
|
|
597
|
+
result = self._try_parse_dot_notation(control_id)
|
|
598
|
+
if result:
|
|
599
|
+
return result
|
|
600
|
+
|
|
601
|
+
result = self._try_parse_standard_format(control_id)
|
|
602
|
+
if result:
|
|
603
|
+
return result
|
|
604
|
+
|
|
605
|
+
result = self._try_parse_hyphen_split(control_id)
|
|
606
|
+
if result:
|
|
607
|
+
return result
|
|
608
|
+
|
|
609
|
+
logger.warning(f"Could not parse control ID format: '{control_id}'")
|
|
610
|
+
return control_id
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
@dataclass
|
|
614
|
+
class EvidenceCollectionConfig:
|
|
615
|
+
"""Configuration for evidence collection from AWS Audit Manager."""
|
|
616
|
+
|
|
617
|
+
collect_evidence: bool = False
|
|
618
|
+
evidence_control_ids: Optional[List[str]] = None
|
|
619
|
+
evidence_frequency: int = 30
|
|
620
|
+
max_evidence_per_control: int = 100
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
class AWSAuditManagerCompliance(ComplianceIntegration):
|
|
624
|
+
"""Process AWS Audit Manager assessments and create compliance records in RegScale."""
|
|
625
|
+
|
|
626
|
+
def __init__(
|
|
627
|
+
self,
|
|
628
|
+
plan_id: int,
|
|
629
|
+
region: str = "us-east-1",
|
|
630
|
+
framework: str = "NIST800-53R5",
|
|
631
|
+
assessment_id: Optional[str] = None,
|
|
632
|
+
create_issues: bool = True,
|
|
633
|
+
update_control_status: bool = True,
|
|
634
|
+
create_poams: bool = False,
|
|
635
|
+
parent_module: str = "securityplans",
|
|
636
|
+
evidence_config: Optional[EvidenceCollectionConfig] = None,
|
|
637
|
+
force_refresh: bool = False,
|
|
638
|
+
use_assessment_evidence_folders: bool = True,
|
|
639
|
+
**kwargs,
|
|
640
|
+
):
|
|
641
|
+
"""
|
|
642
|
+
Initialize AWS Audit Manager compliance integration.
|
|
643
|
+
|
|
644
|
+
:param int plan_id: RegScale plan ID
|
|
645
|
+
:param str region: AWS region
|
|
646
|
+
:param str framework: Compliance framework
|
|
647
|
+
:param Optional[str] assessment_id: Specific assessment ID to sync
|
|
648
|
+
:param bool create_issues: Whether to create issues for failed compliance
|
|
649
|
+
:param bool update_control_status: Whether to update control implementation status
|
|
650
|
+
:param bool create_poams: Whether to mark issues as POAMs
|
|
651
|
+
:param str parent_module: RegScale parent module
|
|
652
|
+
:param Optional[EvidenceCollectionConfig] evidence_config: Evidence collection configuration
|
|
653
|
+
:param bool force_refresh: Force refresh of compliance data by bypassing cache
|
|
654
|
+
:param bool use_assessment_evidence_folders: Use GetEvidenceFoldersByAssessment API for faster
|
|
655
|
+
evidence collection (default: False, uses per-control API)
|
|
656
|
+
:param kwargs: Additional parameters including AWS credentials (profile, aws_access_key_id,
|
|
657
|
+
aws_secret_access_key, aws_session_token)
|
|
658
|
+
"""
|
|
659
|
+
super().__init__(
|
|
660
|
+
plan_id=plan_id,
|
|
661
|
+
framework=framework,
|
|
662
|
+
create_issues=create_issues,
|
|
663
|
+
update_control_status=update_control_status,
|
|
664
|
+
create_poams=create_poams,
|
|
665
|
+
parent_module=parent_module,
|
|
666
|
+
**kwargs,
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
self.region = region
|
|
670
|
+
self.assessment_id = assessment_id
|
|
671
|
+
self.title = "AWS Audit Manager"
|
|
672
|
+
self.custom_framework_name = kwargs.get("custom_framework_name")
|
|
673
|
+
|
|
674
|
+
# Evidence collection parameters - support both evidence_config object and individual kwargs
|
|
675
|
+
if evidence_config:
|
|
676
|
+
# Use provided evidence_config object
|
|
677
|
+
self.evidence_config = evidence_config
|
|
678
|
+
self.collect_evidence = evidence_config.collect_evidence
|
|
679
|
+
self.evidence_control_ids = evidence_config.evidence_control_ids
|
|
680
|
+
self.evidence_frequency = evidence_config.evidence_frequency
|
|
681
|
+
self.max_evidence_per_control = min(evidence_config.max_evidence_per_control, 1000) # AWS API limit
|
|
682
|
+
else:
|
|
683
|
+
# Build evidence_config from kwargs (for CLI compatibility)
|
|
684
|
+
collect_evidence = kwargs.get("collect_evidence", False)
|
|
685
|
+
evidence_control_ids = kwargs.get("evidence_control_ids")
|
|
686
|
+
evidence_frequency = kwargs.get("evidence_frequency", 30)
|
|
687
|
+
max_evidence_per_control = kwargs.get("max_evidence_per_control", 100)
|
|
688
|
+
|
|
689
|
+
self.evidence_config = EvidenceCollectionConfig(
|
|
690
|
+
collect_evidence=collect_evidence,
|
|
691
|
+
evidence_control_ids=evidence_control_ids,
|
|
692
|
+
evidence_frequency=evidence_frequency,
|
|
693
|
+
max_evidence_per_control=max_evidence_per_control,
|
|
694
|
+
)
|
|
695
|
+
self.collect_evidence = collect_evidence
|
|
696
|
+
self.evidence_control_ids = evidence_control_ids
|
|
697
|
+
self.evidence_frequency = evidence_frequency
|
|
698
|
+
self.max_evidence_per_control = min(max_evidence_per_control, 1000) # AWS API limit
|
|
699
|
+
|
|
700
|
+
# Cache control
|
|
701
|
+
self.force_refresh = force_refresh
|
|
702
|
+
|
|
703
|
+
# Evidence collection method
|
|
704
|
+
self.use_assessment_evidence_folders = use_assessment_evidence_folders
|
|
705
|
+
|
|
706
|
+
# Extract AWS credentials from kwargs
|
|
707
|
+
profile = kwargs.get("profile")
|
|
708
|
+
aws_access_key_id = kwargs.get("aws_access_key_id")
|
|
709
|
+
aws_secret_access_key = kwargs.get("aws_secret_access_key")
|
|
710
|
+
aws_session_token = kwargs.get("aws_session_token")
|
|
711
|
+
|
|
712
|
+
# INFO-level logging for credential resolution
|
|
713
|
+
if aws_access_key_id and aws_secret_access_key:
|
|
714
|
+
logger.info("Initializing AWS Audit Manager client with explicit credentials")
|
|
715
|
+
self.session = boto3.Session(
|
|
716
|
+
region_name=region,
|
|
717
|
+
aws_access_key_id=aws_access_key_id,
|
|
718
|
+
aws_secret_access_key=aws_secret_access_key,
|
|
719
|
+
aws_session_token=aws_session_token,
|
|
720
|
+
)
|
|
721
|
+
else:
|
|
722
|
+
logger.info(f"Initializing AWS Audit Manager client with profile: {profile if profile else 'default'}")
|
|
723
|
+
self.session = boto3.Session(profile_name=profile, region_name=region)
|
|
724
|
+
|
|
725
|
+
try:
|
|
726
|
+
self.client = self.session.client("auditmanager")
|
|
727
|
+
logger.info("Successfully created AWS Audit Manager client")
|
|
728
|
+
except Exception as e:
|
|
729
|
+
logger.error(f"Failed to create AWS Audit Manager client: {e}")
|
|
730
|
+
raise
|
|
731
|
+
|
|
732
|
+
def _is_cache_valid(self) -> bool:
|
|
733
|
+
"""
|
|
734
|
+
Check if the cache file exists and is within the TTL.
|
|
735
|
+
|
|
736
|
+
:return: True if cache is valid, False otherwise
|
|
737
|
+
:rtype: bool
|
|
738
|
+
"""
|
|
739
|
+
if not os.path.exists(AUDIT_MANAGER_CACHE_FILE):
|
|
740
|
+
logger.debug("Cache file does not exist")
|
|
741
|
+
return False
|
|
742
|
+
|
|
743
|
+
file_age = time.time() - os.path.getmtime(AUDIT_MANAGER_CACHE_FILE)
|
|
744
|
+
is_valid = file_age < CACHE_TTL_SECONDS
|
|
745
|
+
|
|
746
|
+
if is_valid:
|
|
747
|
+
hours_old = file_age / 3600
|
|
748
|
+
logger.info(f"Using cached Audit Manager data (age: {hours_old:.1f} hours)")
|
|
749
|
+
else:
|
|
750
|
+
hours_old = file_age / 3600
|
|
751
|
+
logger.debug(f"Cache expired (age: {hours_old:.1f} hours, TTL: {CACHE_TTL_SECONDS / 3600} hours)")
|
|
752
|
+
|
|
753
|
+
return is_valid
|
|
754
|
+
|
|
755
|
+
def _load_cached_data(self) -> List[Dict[str, Any]]:
|
|
756
|
+
"""
|
|
757
|
+
Load compliance data from cache file.
|
|
758
|
+
|
|
759
|
+
:return: List of raw compliance data from cache
|
|
760
|
+
:rtype: List[Dict[str, Any]]
|
|
761
|
+
"""
|
|
762
|
+
try:
|
|
763
|
+
with open(AUDIT_MANAGER_CACHE_FILE, encoding="utf-8") as file:
|
|
764
|
+
cached_data = json.load(file)
|
|
765
|
+
logger.info(f"Loaded {len(cached_data)} compliance items from cache")
|
|
766
|
+
return cached_data
|
|
767
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
768
|
+
logger.warning(f"Error reading cache file: {e}. Fetching fresh data.")
|
|
769
|
+
return []
|
|
770
|
+
|
|
771
|
+
def _save_to_cache(self, compliance_data: List[Dict[str, Any]]) -> None:
|
|
772
|
+
"""
|
|
773
|
+
Save compliance data to cache file.
|
|
774
|
+
|
|
775
|
+
:param List[Dict[str, Any]] compliance_data: Data to cache
|
|
776
|
+
:return: None
|
|
777
|
+
:rtype: None
|
|
778
|
+
"""
|
|
779
|
+
try:
|
|
780
|
+
# Ensure the artifacts directory exists
|
|
781
|
+
os.makedirs(os.path.dirname(AUDIT_MANAGER_CACHE_FILE), exist_ok=True)
|
|
782
|
+
|
|
783
|
+
with open(AUDIT_MANAGER_CACHE_FILE, "w", encoding="utf-8") as file:
|
|
784
|
+
json.dump(compliance_data, file, indent=2, default=str)
|
|
785
|
+
|
|
786
|
+
logger.info(f"Cached {len(compliance_data)} compliance items to {AUDIT_MANAGER_CACHE_FILE}")
|
|
787
|
+
except IOError as e:
|
|
788
|
+
logger.warning(f"Error writing to cache file: {e}")
|
|
789
|
+
|
|
790
|
+
def _collect_evidence_for_control(
|
|
791
|
+
self, assessment_id: str, control_set_id: str, control: Dict[str, Any], assessment: Dict[str, Any]
|
|
792
|
+
) -> Optional[List[Dict[str, Any]]]:
|
|
793
|
+
"""
|
|
794
|
+
Collect evidence for a single control if enabled.
|
|
795
|
+
|
|
796
|
+
:param str assessment_id: Assessment ID
|
|
797
|
+
:param str control_set_id: Control set ID
|
|
798
|
+
:param Dict[str, Any] control: Control data
|
|
799
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
800
|
+
:return: List of evidence items or None if not collected
|
|
801
|
+
:rtype: Optional[List[Dict[str, Any]]]
|
|
802
|
+
"""
|
|
803
|
+
control_id_raw = control.get("id")
|
|
804
|
+
control_evidence_count = control.get("evidenceCount", 0)
|
|
805
|
+
|
|
806
|
+
# Create a temporary compliance item to get normalized control ID
|
|
807
|
+
temp_item = AWSAuditManagerComplianceItem(assessment, control)
|
|
808
|
+
control_id_normalized = temp_item.control_id
|
|
809
|
+
|
|
810
|
+
# Check if we should collect evidence for this control
|
|
811
|
+
if not self._should_collect_control_evidence(control_id_normalized):
|
|
812
|
+
logger.debug(
|
|
813
|
+
f"Skipping evidence collection for control {control_id_normalized} "
|
|
814
|
+
f"(evidenceCount: {control_evidence_count})"
|
|
815
|
+
)
|
|
816
|
+
return None
|
|
817
|
+
|
|
818
|
+
# Log INFO level for controls with evidence to show progress
|
|
819
|
+
if control_evidence_count > 0:
|
|
820
|
+
logger.info(
|
|
821
|
+
f"Collecting evidence for control {control_id_normalized} "
|
|
822
|
+
f"({control_evidence_count} evidence items available)..."
|
|
823
|
+
)
|
|
824
|
+
else:
|
|
825
|
+
logger.debug(
|
|
826
|
+
f"Fetching evidence inline for control {control_id_normalized} (evidenceCount: {control_evidence_count})"
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
# Fetch evidence for this control
|
|
830
|
+
evidence_items = self._get_control_evidence(
|
|
831
|
+
assessment_id=assessment_id, control_set_id=control_set_id, control_id=control_id_raw
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
if evidence_items:
|
|
835
|
+
logger.info(
|
|
836
|
+
f"Successfully collected {len(evidence_items)} evidence items for control {control_id_normalized}"
|
|
837
|
+
)
|
|
838
|
+
else:
|
|
839
|
+
logger.debug(f"No evidence items retrieved for control {control_id_normalized}")
|
|
840
|
+
|
|
841
|
+
return evidence_items
|
|
842
|
+
|
|
843
|
+
def _process_assessment_controls(self, assessment: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
844
|
+
"""
|
|
845
|
+
Process a single assessment and extract all control data.
|
|
846
|
+
|
|
847
|
+
If collect_evidence is True, fetches evidence inline for each control to enable
|
|
848
|
+
compliance determination based on evidence analysis.
|
|
849
|
+
|
|
850
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
851
|
+
:return: List of control data for this assessment (with optional evidence_items)
|
|
852
|
+
:rtype: List[Dict[str, Any]]
|
|
853
|
+
"""
|
|
854
|
+
compliance_data = []
|
|
855
|
+
control_sets = assessment.get("framework", {}).get("controlSets", [])
|
|
856
|
+
logger.debug(f"Found {len(control_sets)} control sets in assessment")
|
|
857
|
+
|
|
858
|
+
# Extract assessment ID for evidence collection
|
|
859
|
+
assessment_id = assessment.get("arn", "").split("/")[-1]
|
|
860
|
+
|
|
861
|
+
# Calculate total controls for progress tracking
|
|
862
|
+
total_controls = sum(len(cs.get("controls", [])) for cs in control_sets)
|
|
863
|
+
logger.info(f"Processing {total_controls} controls across {len(control_sets)} control sets...")
|
|
864
|
+
|
|
865
|
+
# Create progress bar for control processing
|
|
866
|
+
progress = create_progress_object()
|
|
867
|
+
with progress:
|
|
868
|
+
task = progress.add_task(
|
|
869
|
+
f"Processing controls for assessment '{assessment.get('name', 'Unknown')}'", total=total_controls
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
for control_set in control_sets:
|
|
873
|
+
control_set_id = control_set.get("id")
|
|
874
|
+
controls = control_set.get("controls", [])
|
|
875
|
+
logger.debug(f"Found {len(controls)} controls in control set")
|
|
876
|
+
|
|
877
|
+
for control in controls:
|
|
878
|
+
control_data = {"assessment": assessment, "control": control}
|
|
879
|
+
|
|
880
|
+
# If evidence collection is enabled, fetch evidence inline for compliance determination
|
|
881
|
+
if self.collect_evidence:
|
|
882
|
+
evidence_items = self._collect_evidence_for_control(
|
|
883
|
+
assessment_id, control_set_id, control, assessment
|
|
884
|
+
)
|
|
885
|
+
if evidence_items:
|
|
886
|
+
control_data["evidence_items"] = evidence_items
|
|
887
|
+
|
|
888
|
+
compliance_data.append(control_data)
|
|
889
|
+
progress.update(task, advance=1)
|
|
890
|
+
|
|
891
|
+
logger.info(f"Finished processing {len(compliance_data)} controls for assessment")
|
|
892
|
+
return compliance_data
|
|
893
|
+
|
|
894
|
+
def _should_process_assessment(self, assessment: Dict[str, Any]) -> bool:
|
|
895
|
+
"""
|
|
896
|
+
Check if assessment should be processed based on framework match.
|
|
897
|
+
|
|
898
|
+
For custom frameworks (--framework Custom), matches against the assessment name
|
|
899
|
+
using the custom_framework_name parameter.
|
|
900
|
+
|
|
901
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
902
|
+
:return: True if assessment should be processed
|
|
903
|
+
:rtype: bool
|
|
904
|
+
"""
|
|
905
|
+
if not assessment:
|
|
906
|
+
return False
|
|
907
|
+
|
|
908
|
+
assessment_name = assessment.get("name", "Unknown")
|
|
909
|
+
|
|
910
|
+
# Special handling for custom frameworks - match by framework name
|
|
911
|
+
if self.framework.upper() == "CUSTOM":
|
|
912
|
+
if not self.custom_framework_name:
|
|
913
|
+
logger.warning(
|
|
914
|
+
f"Skipping assessment '{assessment_name}' - framework is set to 'CUSTOM' "
|
|
915
|
+
"but no custom_framework_name provided. Use --custom-framework-name to specify."
|
|
916
|
+
)
|
|
917
|
+
return False
|
|
918
|
+
|
|
919
|
+
# Check the framework metadata for custom framework name
|
|
920
|
+
framework = assessment.get("framework", {})
|
|
921
|
+
framework_metadata = framework.get("metadata", {})
|
|
922
|
+
framework_name = framework_metadata.get("name", "")
|
|
923
|
+
|
|
924
|
+
# Debug logging to understand the structure
|
|
925
|
+
logger.debug(f"Assessment '{assessment_name}' framework metadata: {framework_metadata}")
|
|
926
|
+
|
|
927
|
+
# Normalize both names for comparison
|
|
928
|
+
custom_normalized = self.custom_framework_name.lower().replace(" ", "").replace("-", "").replace("_", "")
|
|
929
|
+
framework_normalized = framework_name.lower().replace(" ", "").replace("-", "").replace("_", "")
|
|
930
|
+
|
|
931
|
+
# Match against the framework name (not assessment name)
|
|
932
|
+
if (
|
|
933
|
+
custom_normalized == framework_normalized
|
|
934
|
+
or custom_normalized in framework_normalized
|
|
935
|
+
or framework_normalized in custom_normalized
|
|
936
|
+
):
|
|
937
|
+
logger.info(
|
|
938
|
+
f"Processing assessment '{assessment_name}' - uses custom framework '{framework_name}' matching '{self.custom_framework_name}'"
|
|
939
|
+
)
|
|
940
|
+
return True
|
|
941
|
+
|
|
942
|
+
logger.info(
|
|
943
|
+
f"Skipping assessment '{assessment_name}' - framework '{framework_name}' does not match custom framework name '{self.custom_framework_name}'"
|
|
944
|
+
)
|
|
945
|
+
return False
|
|
946
|
+
|
|
947
|
+
# For standard frameworks, match by framework type
|
|
948
|
+
assessment_framework = self._get_assessment_framework(assessment)
|
|
949
|
+
if not self._matches_framework(assessment_framework):
|
|
950
|
+
logger.info(
|
|
951
|
+
f"Skipping assessment '{assessment_name}' - framework '{assessment_framework}' "
|
|
952
|
+
f"does not match target framework '{self.framework}'"
|
|
953
|
+
)
|
|
954
|
+
return False
|
|
955
|
+
|
|
956
|
+
return True
|
|
957
|
+
|
|
958
|
+
def _fetch_fresh_compliance_data(self) -> List[Dict[str, Any]]:
|
|
959
|
+
"""
|
|
960
|
+
Fetch fresh compliance data from AWS Audit Manager.
|
|
961
|
+
|
|
962
|
+
:return: List of raw compliance data
|
|
963
|
+
:rtype: List[Dict[str, Any]]
|
|
964
|
+
"""
|
|
965
|
+
logger.info("Fetching compliance data from AWS Audit Manager...")
|
|
966
|
+
compliance_data = []
|
|
967
|
+
|
|
968
|
+
assessments = (
|
|
969
|
+
[self._get_assessment_details(self.assessment_id)] if self.assessment_id else self._list_all_assessments()
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
for assessment in assessments:
|
|
973
|
+
if not self._should_process_assessment(assessment):
|
|
974
|
+
continue
|
|
975
|
+
|
|
976
|
+
assessment_id = assessment.get("arn", "")
|
|
977
|
+
logger.info(f"Processing assessment: {assessment.get('name', assessment_id)}")
|
|
978
|
+
compliance_data.extend(self._process_assessment_controls(assessment))
|
|
979
|
+
|
|
980
|
+
logger.info(f"Fetched {len(compliance_data)} compliance items from AWS Audit Manager")
|
|
981
|
+
return compliance_data
|
|
982
|
+
|
|
983
|
+
def fetch_compliance_data(self) -> List[Dict[str, Any]]:
|
|
984
|
+
"""
|
|
985
|
+
Fetch raw compliance data from AWS Audit Manager.
|
|
986
|
+
|
|
987
|
+
Uses cached data if available and not expired (4-hour TTL), unless force_refresh is True.
|
|
988
|
+
Filters assessments to only include those matching the specified framework.
|
|
989
|
+
|
|
990
|
+
:return: List of raw compliance data (assessment + control combinations)
|
|
991
|
+
:rtype: List[Dict[str, Any]]
|
|
992
|
+
"""
|
|
993
|
+
# Check if we should use cached data
|
|
994
|
+
if not self.force_refresh and self._is_cache_valid():
|
|
995
|
+
cached_data = self._load_cached_data()
|
|
996
|
+
if cached_data:
|
|
997
|
+
return self._filter_by_framework(cached_data)
|
|
998
|
+
|
|
999
|
+
# Force refresh requested or no valid cache, fetch fresh data from AWS
|
|
1000
|
+
if self.force_refresh:
|
|
1001
|
+
logger.info("Force refresh requested, bypassing cache and fetching fresh data from AWS Audit Manager...")
|
|
1002
|
+
|
|
1003
|
+
try:
|
|
1004
|
+
compliance_data = self._fetch_fresh_compliance_data()
|
|
1005
|
+
self._save_to_cache(compliance_data)
|
|
1006
|
+
return compliance_data
|
|
1007
|
+
except ClientError as e:
|
|
1008
|
+
logger.error(f"Error fetching compliance data from AWS Audit Manager: {e}")
|
|
1009
|
+
return []
|
|
1010
|
+
|
|
1011
|
+
def create_compliance_item(self, raw_data: Dict[str, Any]) -> ComplianceItem:
|
|
1012
|
+
"""
|
|
1013
|
+
Create a ComplianceItem from raw compliance data.
|
|
1014
|
+
|
|
1015
|
+
:param Dict[str, Any] raw_data: Raw compliance data (assessment + control + optional evidence)
|
|
1016
|
+
:return: ComplianceItem instance
|
|
1017
|
+
:rtype: ComplianceItem
|
|
1018
|
+
"""
|
|
1019
|
+
assessment = raw_data.get("assessment", {})
|
|
1020
|
+
control = raw_data.get("control", {})
|
|
1021
|
+
evidence_items = raw_data.get("evidence_items", [])
|
|
1022
|
+
return AWSAuditManagerComplianceItem(assessment, control, evidence_items)
|
|
1023
|
+
|
|
1024
|
+
def _list_all_assessments(self) -> List[Dict[str, Any]]:
|
|
1025
|
+
"""
|
|
1026
|
+
List all active assessments.
|
|
1027
|
+
|
|
1028
|
+
:return: List of assessment details
|
|
1029
|
+
:rtype: List[Dict[str, Any]]
|
|
1030
|
+
"""
|
|
1031
|
+
assessments = []
|
|
1032
|
+
try:
|
|
1033
|
+
response = self.client.list_assessments()
|
|
1034
|
+
assessment_metadata_list = response.get("assessmentMetadata", [])
|
|
1035
|
+
|
|
1036
|
+
for metadata in assessment_metadata_list:
|
|
1037
|
+
status = metadata.get("status", "")
|
|
1038
|
+
if status in ["ACTIVE", "COMPLETED"]:
|
|
1039
|
+
assessment_id = metadata.get("id", "")
|
|
1040
|
+
assessment = self._get_assessment_details(assessment_id)
|
|
1041
|
+
if assessment:
|
|
1042
|
+
assessments.append(assessment)
|
|
1043
|
+
|
|
1044
|
+
except ClientError as e:
|
|
1045
|
+
logger.error(f"Error listing assessments: {e}")
|
|
1046
|
+
|
|
1047
|
+
return assessments
|
|
1048
|
+
|
|
1049
|
+
def _get_assessment_details(self, assessment_id: str) -> Optional[Dict[str, Any]]:
|
|
1050
|
+
"""
|
|
1051
|
+
Get full assessment details including controls and evidence.
|
|
1052
|
+
|
|
1053
|
+
:param str assessment_id: Assessment ID
|
|
1054
|
+
:return: Assessment details or None
|
|
1055
|
+
:rtype: Optional[Dict[str, Any]]
|
|
1056
|
+
"""
|
|
1057
|
+
try:
|
|
1058
|
+
response = self.client.get_assessment(assessmentId=assessment_id)
|
|
1059
|
+
assessment = response.get("assessment", {})
|
|
1060
|
+
|
|
1061
|
+
metadata = assessment.get("metadata", {})
|
|
1062
|
+
assessment_data = {
|
|
1063
|
+
"arn": assessment.get("arn", ""),
|
|
1064
|
+
"name": metadata.get("name", ""),
|
|
1065
|
+
"description": metadata.get("description", ""),
|
|
1066
|
+
"complianceType": metadata.get("complianceType", ""),
|
|
1067
|
+
"status": metadata.get("status", ""),
|
|
1068
|
+
"awsAccount": assessment.get("awsAccount", {}),
|
|
1069
|
+
"framework": assessment.get("framework", {}),
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
return assessment_data
|
|
1073
|
+
|
|
1074
|
+
except ClientError as e:
|
|
1075
|
+
if e.response["Error"]["Code"] not in ["ResourceNotFoundException", "AccessDeniedException"]:
|
|
1076
|
+
logger.error(f"Error getting assessment details for {assessment_id}: {e}")
|
|
1077
|
+
return None
|
|
1078
|
+
|
|
1079
|
+
def _get_assessment_framework(self, assessment: Dict[str, Any]) -> str:
|
|
1080
|
+
"""
|
|
1081
|
+
Extract framework name from assessment data.
|
|
1082
|
+
|
|
1083
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
1084
|
+
:return: Framework name
|
|
1085
|
+
:rtype: str
|
|
1086
|
+
"""
|
|
1087
|
+
# For custom frameworks, we need to handle the case where the assessment
|
|
1088
|
+
# was created from a custom framework. In this case, we should match
|
|
1089
|
+
# based on the assessment name if the framework is CUSTOM
|
|
1090
|
+
if self.framework.upper() == "CUSTOM" and self.custom_framework_name:
|
|
1091
|
+
# Return the assessment name for custom framework matching
|
|
1092
|
+
# This allows matching against the assessment name pattern
|
|
1093
|
+
return assessment.get("name", "")
|
|
1094
|
+
|
|
1095
|
+
framework_name = assessment.get("framework", {}).get("metadata", {}).get("name", "")
|
|
1096
|
+
compliance_type = assessment.get("complianceType", "")
|
|
1097
|
+
|
|
1098
|
+
# Prefer compliance type if available, otherwise use framework name
|
|
1099
|
+
return compliance_type or framework_name
|
|
1100
|
+
|
|
1101
|
+
def _normalize_framework_string(self, framework_string: str) -> str:
|
|
1102
|
+
"""
|
|
1103
|
+
Normalize a framework string by removing spaces, hyphens, and underscores.
|
|
1104
|
+
|
|
1105
|
+
:param str framework_string: The string to normalize
|
|
1106
|
+
:return: Normalized string
|
|
1107
|
+
:rtype: str
|
|
1108
|
+
"""
|
|
1109
|
+
return framework_string.lower().replace(" ", "").replace("-", "").replace("_", "")
|
|
1110
|
+
|
|
1111
|
+
def _check_custom_framework_match(self, assessment_framework: str) -> bool:
|
|
1112
|
+
"""
|
|
1113
|
+
Check if an assessment framework matches a custom framework.
|
|
1114
|
+
|
|
1115
|
+
:param str assessment_framework: Framework name from AWS assessment
|
|
1116
|
+
:return: True if framework matches custom target
|
|
1117
|
+
:rtype: bool
|
|
1118
|
+
"""
|
|
1119
|
+
if not self.custom_framework_name:
|
|
1120
|
+
logger.warning(
|
|
1121
|
+
"Framework is set to 'CUSTOM' but no custom_framework_name provided. "
|
|
1122
|
+
"Use --custom-framework-name to specify the custom framework name."
|
|
1123
|
+
)
|
|
1124
|
+
return False
|
|
1125
|
+
|
|
1126
|
+
custom_normalized = self._normalize_framework_string(self.custom_framework_name)
|
|
1127
|
+
actual_normalized = self._normalize_framework_string(assessment_framework)
|
|
1128
|
+
|
|
1129
|
+
# Allow flexible matching for custom frameworks
|
|
1130
|
+
matches = (
|
|
1131
|
+
custom_normalized == actual_normalized
|
|
1132
|
+
or custom_normalized in actual_normalized
|
|
1133
|
+
or actual_normalized in custom_normalized
|
|
1134
|
+
or "customframework" in actual_normalized
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
if matches:
|
|
1138
|
+
logger.debug(f"Custom framework match: '{assessment_framework}' matches '{self.custom_framework_name}'")
|
|
1139
|
+
|
|
1140
|
+
return matches
|
|
1141
|
+
|
|
1142
|
+
def _check_framework_aliases(self, target: str, actual: str) -> bool:
|
|
1143
|
+
"""
|
|
1144
|
+
Check if target and actual frameworks match using known aliases.
|
|
1145
|
+
|
|
1146
|
+
:param str target: Normalized target framework
|
|
1147
|
+
:param str actual: Normalized actual framework
|
|
1148
|
+
:return: True if frameworks match via aliases
|
|
1149
|
+
:rtype: bool
|
|
1150
|
+
"""
|
|
1151
|
+
framework_aliases = {
|
|
1152
|
+
"nist80053r5": ["nist", "nistsp80053", "nist80053", "80053"],
|
|
1153
|
+
"soc2": ["soc", "soc2typeii", "soc2type2"],
|
|
1154
|
+
"pcidss": ["pci", "pcidss3.2.1", "pcidss3.2"],
|
|
1155
|
+
"hipaa": ["hipaa", "hipaasecurityrule"],
|
|
1156
|
+
"gdpr": ["gdpr", "generaldataprotectionregulation"],
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
for key, aliases in framework_aliases.items():
|
|
1160
|
+
if target.startswith(key) or any(target.startswith(alias) for alias in aliases):
|
|
1161
|
+
if any(alias in actual for alias in aliases):
|
|
1162
|
+
return True
|
|
1163
|
+
return False
|
|
1164
|
+
|
|
1165
|
+
def _matches_framework(self, assessment_framework: str) -> bool:
|
|
1166
|
+
"""
|
|
1167
|
+
Check if an assessment framework matches the target framework.
|
|
1168
|
+
|
|
1169
|
+
Handles various naming conventions:
|
|
1170
|
+
- NIST 800-53: "NIST SP 800-53 Revision 5", "NIST800-53R5", "NIST 800-53 R5"
|
|
1171
|
+
- SOC 2: "SOC2", "SOC 2", "SOC 2 Type II"
|
|
1172
|
+
- PCI DSS: "PCI DSS", "PCI DSS 3.2.1"
|
|
1173
|
+
- HIPAA: "HIPAA", "HIPAA Security Rule"
|
|
1174
|
+
- GDPR: "GDPR", "General Data Protection Regulation"
|
|
1175
|
+
- CUSTOM: Matches against custom_framework_name parameter or assessment name patterns
|
|
1176
|
+
|
|
1177
|
+
:param str assessment_framework: Framework name from AWS assessment (or assessment name for custom frameworks)
|
|
1178
|
+
:return: True if framework matches target
|
|
1179
|
+
:rtype: bool
|
|
1180
|
+
"""
|
|
1181
|
+
if not assessment_framework:
|
|
1182
|
+
return False
|
|
1183
|
+
|
|
1184
|
+
# Special handling for custom frameworks
|
|
1185
|
+
if self.framework.upper() == "CUSTOM":
|
|
1186
|
+
return self._check_custom_framework_match(assessment_framework)
|
|
1187
|
+
|
|
1188
|
+
# Normalize both for comparison
|
|
1189
|
+
target = self._normalize_framework_string(self.framework)
|
|
1190
|
+
actual = self._normalize_framework_string(assessment_framework)
|
|
1191
|
+
|
|
1192
|
+
# Direct match
|
|
1193
|
+
if target in actual or actual in target:
|
|
1194
|
+
return True
|
|
1195
|
+
|
|
1196
|
+
# Check framework aliases
|
|
1197
|
+
return self._check_framework_aliases(target, actual)
|
|
1198
|
+
|
|
1199
|
+
def _filter_by_framework(self, compliance_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1200
|
+
"""
|
|
1201
|
+
Filter compliance data to only include items from matching framework.
|
|
1202
|
+
|
|
1203
|
+
:param List[Dict[str, Any]] compliance_data: Raw compliance data
|
|
1204
|
+
:return: Filtered compliance data matching the target framework
|
|
1205
|
+
:rtype: List[Dict[str, Any]]
|
|
1206
|
+
"""
|
|
1207
|
+
filtered_data = []
|
|
1208
|
+
frameworks_seen = set()
|
|
1209
|
+
|
|
1210
|
+
for item in compliance_data:
|
|
1211
|
+
assessment = item.get("assessment", {})
|
|
1212
|
+
assessment_framework = self._get_assessment_framework(assessment)
|
|
1213
|
+
frameworks_seen.add(assessment_framework)
|
|
1214
|
+
|
|
1215
|
+
if self._matches_framework(assessment_framework):
|
|
1216
|
+
filtered_data.append(item)
|
|
1217
|
+
|
|
1218
|
+
if filtered_data != compliance_data:
|
|
1219
|
+
logger.info(
|
|
1220
|
+
f"Filtered compliance data by framework: {len(compliance_data)} total items, "
|
|
1221
|
+
f"{len(filtered_data)} matching '{self.framework}'"
|
|
1222
|
+
)
|
|
1223
|
+
logger.debug(f"Frameworks found in cached data: {sorted(frameworks_seen)}")
|
|
1224
|
+
|
|
1225
|
+
return filtered_data
|
|
1226
|
+
|
|
1227
|
+
def _map_resource_type_to_asset_type(self, compliance_item: ComplianceItem) -> str:
|
|
1228
|
+
"""
|
|
1229
|
+
Map AWS resource type to RegScale asset type.
|
|
1230
|
+
|
|
1231
|
+
:param ComplianceItem compliance_item: Compliance item with resource information
|
|
1232
|
+
:return: Asset type string
|
|
1233
|
+
:rtype: str
|
|
1234
|
+
"""
|
|
1235
|
+
return "AWS Account"
|
|
1236
|
+
|
|
1237
|
+
def _map_severity(self, severity: Optional[str]) -> regscale_models.IssueSeverity:
|
|
1238
|
+
"""
|
|
1239
|
+
Map AWS severity to RegScale severity.
|
|
1240
|
+
|
|
1241
|
+
:param Optional[str] severity: Severity string from AWS
|
|
1242
|
+
:return: Mapped RegScale severity enum value
|
|
1243
|
+
:rtype: regscale_models.IssueSeverity
|
|
1244
|
+
"""
|
|
1245
|
+
if not severity:
|
|
1246
|
+
return regscale_models.IssueSeverity.Moderate
|
|
1247
|
+
|
|
1248
|
+
severity_mapping = {
|
|
1249
|
+
"CRITICAL": regscale_models.IssueSeverity.Critical,
|
|
1250
|
+
"HIGH": regscale_models.IssueSeverity.High,
|
|
1251
|
+
"MEDIUM": regscale_models.IssueSeverity.Moderate,
|
|
1252
|
+
"LOW": regscale_models.IssueSeverity.Low,
|
|
1253
|
+
}
|
|
1254
|
+
|
|
1255
|
+
return severity_mapping.get(severity.upper(), regscale_models.IssueSeverity.Moderate)
|
|
1256
|
+
|
|
1257
|
+
def _should_collect_control_evidence(self, control_id_normalized: str) -> bool:
|
|
1258
|
+
"""
|
|
1259
|
+
Check if evidence should be collected for a control.
|
|
1260
|
+
|
|
1261
|
+
Note: AWS Audit Manager's evidenceCount field in control metadata is not always accurate.
|
|
1262
|
+
We attempt to fetch evidence for all controls (or filtered controls if specified) and
|
|
1263
|
+
let the API determine if evidence exists.
|
|
1264
|
+
|
|
1265
|
+
:param str control_id_normalized: Normalized control ID
|
|
1266
|
+
:return: True if evidence should be collected
|
|
1267
|
+
:rtype: bool
|
|
1268
|
+
"""
|
|
1269
|
+
# Filter by control IDs if specified
|
|
1270
|
+
if self.evidence_control_ids:
|
|
1271
|
+
if control_id_normalized not in self.evidence_control_ids:
|
|
1272
|
+
logger.debug(f"Skipping evidence collection for control {control_id_normalized} (not in filter list)")
|
|
1273
|
+
return False
|
|
1274
|
+
|
|
1275
|
+
# Don't skip based on evidenceCount - AWS Audit Manager metadata may be inaccurate
|
|
1276
|
+
# The API call will return empty list if no evidence exists
|
|
1277
|
+
return True
|
|
1278
|
+
|
|
1279
|
+
def _process_control_evidence(
|
|
1280
|
+
self,
|
|
1281
|
+
assessment_id: str,
|
|
1282
|
+
control_set_id: str,
|
|
1283
|
+
control: Dict[str, Any],
|
|
1284
|
+
assessment: Dict[str, Any],
|
|
1285
|
+
all_evidence_items: List[Dict[str, Any]],
|
|
1286
|
+
control_summary: Dict[str, Dict[str, Any]],
|
|
1287
|
+
) -> None:
|
|
1288
|
+
"""
|
|
1289
|
+
Process evidence collection for a single control.
|
|
1290
|
+
|
|
1291
|
+
:param str assessment_id: Assessment ID
|
|
1292
|
+
:param str control_set_id: Control set ID
|
|
1293
|
+
:param Dict[str, Any] control: Control data
|
|
1294
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
1295
|
+
:param List[Dict[str, Any]] all_evidence_items: List to append evidence to
|
|
1296
|
+
:param Dict[str, Dict[str, Any]] control_summary: Summary dict to update
|
|
1297
|
+
:return: None
|
|
1298
|
+
:rtype: None
|
|
1299
|
+
"""
|
|
1300
|
+
control_id_raw = control.get("id")
|
|
1301
|
+
control_name = control.get("name", "")
|
|
1302
|
+
control_evidence_count = control.get("evidenceCount", 0)
|
|
1303
|
+
|
|
1304
|
+
# Extract and normalize control ID (e.g., AU-2, AC-3)
|
|
1305
|
+
compliance_item = AWSAuditManagerComplianceItem(assessment, control)
|
|
1306
|
+
control_id_normalized = compliance_item.control_id
|
|
1307
|
+
|
|
1308
|
+
# Check if we should collect evidence for this control
|
|
1309
|
+
if not self._should_collect_control_evidence(control_id_normalized):
|
|
1310
|
+
return
|
|
1311
|
+
|
|
1312
|
+
logger.debug(
|
|
1313
|
+
f"Collecting evidence for control: {control_id_normalized} (evidenceCount: {control_evidence_count})"
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
# Collect evidence for this control
|
|
1317
|
+
evidence_items = self._get_control_evidence(
|
|
1318
|
+
assessment_id=assessment_id, control_set_id=control_set_id, control_id=control_id_raw
|
|
1319
|
+
)
|
|
1320
|
+
|
|
1321
|
+
if evidence_items:
|
|
1322
|
+
# Tag each evidence item with control information for traceability
|
|
1323
|
+
for item in evidence_items:
|
|
1324
|
+
item["_control_id"] = control_id_normalized
|
|
1325
|
+
item["_control_name"] = control_name
|
|
1326
|
+
|
|
1327
|
+
all_evidence_items.extend(evidence_items)
|
|
1328
|
+
control_summary[control_id_normalized] = {
|
|
1329
|
+
"control_name": control_name,
|
|
1330
|
+
"evidence_count": len(evidence_items),
|
|
1331
|
+
}
|
|
1332
|
+
logger.info(f"Collected {len(evidence_items)} evidence items for control {control_id_normalized}")
|
|
1333
|
+
|
|
1334
|
+
def _collect_assessment_control_evidence(self, assessment: Dict[str, Any]) -> tuple:
|
|
1335
|
+
"""
|
|
1336
|
+
Collect evidence for all controls in an assessment.
|
|
1337
|
+
|
|
1338
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
1339
|
+
:return: Tuple of (all_evidence_items, control_summary, controls_processed)
|
|
1340
|
+
:rtype: tuple
|
|
1341
|
+
"""
|
|
1342
|
+
assessment_id = assessment.get("arn", "").split("/")[-1]
|
|
1343
|
+
all_evidence_items = []
|
|
1344
|
+
control_summary = {}
|
|
1345
|
+
controls_processed = 0
|
|
1346
|
+
|
|
1347
|
+
# Get control sets from assessment framework
|
|
1348
|
+
control_sets = assessment.get("framework", {}).get("controlSets", [])
|
|
1349
|
+
|
|
1350
|
+
for control_set in control_sets:
|
|
1351
|
+
control_set_id = control_set.get("id")
|
|
1352
|
+
controls = control_set.get("controls", [])
|
|
1353
|
+
|
|
1354
|
+
for control in controls:
|
|
1355
|
+
self._process_control_evidence(
|
|
1356
|
+
assessment_id=assessment_id,
|
|
1357
|
+
control_set_id=control_set_id,
|
|
1358
|
+
control=control,
|
|
1359
|
+
assessment=assessment,
|
|
1360
|
+
all_evidence_items=all_evidence_items,
|
|
1361
|
+
control_summary=control_summary,
|
|
1362
|
+
)
|
|
1363
|
+
controls_processed += 1
|
|
1364
|
+
|
|
1365
|
+
return all_evidence_items, control_summary, controls_processed
|
|
1366
|
+
|
|
1367
|
+
def _get_all_evidence_folders_for_assessment(self, assessment_id: str) -> Dict[str, List[Dict[str, Any]]]:
|
|
1368
|
+
"""
|
|
1369
|
+
Get ALL evidence folders for an assessment using GetEvidenceFoldersByAssessment API.
|
|
1370
|
+
|
|
1371
|
+
This is faster than iterating through controls individually because it retrieves
|
|
1372
|
+
all evidence folders in a single paginated operation. Evidence folders are grouped
|
|
1373
|
+
by control ID for easier processing.
|
|
1374
|
+
|
|
1375
|
+
:param str assessment_id: Assessment ID
|
|
1376
|
+
:return: Dict mapping control_id -> list of evidence folders
|
|
1377
|
+
:rtype: Dict[str, List[Dict[str, Any]]]
|
|
1378
|
+
"""
|
|
1379
|
+
evidence_folders_by_control = {}
|
|
1380
|
+
next_token = None
|
|
1381
|
+
|
|
1382
|
+
logger.info(f"Fetching all evidence folders for assessment {assessment_id} using assessment-level API")
|
|
1383
|
+
|
|
1384
|
+
try:
|
|
1385
|
+
while True:
|
|
1386
|
+
params = {"assessmentId": assessment_id, "maxResults": 1000}
|
|
1387
|
+
if next_token:
|
|
1388
|
+
params["nextToken"] = next_token
|
|
1389
|
+
|
|
1390
|
+
logger.debug(f"Calling get_evidence_folders_by_assessment (maxResults={params['maxResults']})")
|
|
1391
|
+
response = self.client.get_evidence_folders_by_assessment(**params)
|
|
1392
|
+
evidence_folders = response.get("evidenceFolders", [])
|
|
1393
|
+
|
|
1394
|
+
# Group by control ID
|
|
1395
|
+
for folder in evidence_folders:
|
|
1396
|
+
control_id = folder.get("controlId")
|
|
1397
|
+
if control_id:
|
|
1398
|
+
if control_id not in evidence_folders_by_control:
|
|
1399
|
+
evidence_folders_by_control[control_id] = []
|
|
1400
|
+
evidence_folders_by_control[control_id].append(folder)
|
|
1401
|
+
|
|
1402
|
+
logger.debug(f"Retrieved {len(evidence_folders)} evidence folder(s) in this page")
|
|
1403
|
+
|
|
1404
|
+
next_token = response.get("nextToken")
|
|
1405
|
+
if not next_token:
|
|
1406
|
+
break
|
|
1407
|
+
|
|
1408
|
+
total_folders = sum(len(folders) for folders in evidence_folders_by_control.values())
|
|
1409
|
+
logger.info(
|
|
1410
|
+
f"Found {total_folders} evidence folder(s) across {len(evidence_folders_by_control)} control(s)"
|
|
1411
|
+
)
|
|
1412
|
+
return evidence_folders_by_control
|
|
1413
|
+
|
|
1414
|
+
except ClientError as e:
|
|
1415
|
+
error_code = e.response["Error"]["Code"]
|
|
1416
|
+
error_message = e.response["Error"].get("Message", "")
|
|
1417
|
+
logger.error(
|
|
1418
|
+
f"Error fetching evidence folders by assessment {assessment_id}: {error_code} - {error_message}"
|
|
1419
|
+
)
|
|
1420
|
+
return {}
|
|
1421
|
+
|
|
1422
|
+
def _collect_evidence_assessment_level(self, assessment: Dict[str, Any], assessment_id: str) -> tuple:
|
|
1423
|
+
"""
|
|
1424
|
+
Collect evidence using assessment-level API (faster method).
|
|
1425
|
+
|
|
1426
|
+
Uses GetEvidenceFoldersByAssessment to retrieve all evidence folders at once,
|
|
1427
|
+
then processes each control's evidence. This is much faster than iterating
|
|
1428
|
+
through controls individually.
|
|
1429
|
+
|
|
1430
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
1431
|
+
:param str assessment_id: Assessment ID
|
|
1432
|
+
:return: Tuple of (all_evidence_items, control_summary)
|
|
1433
|
+
:rtype: tuple
|
|
1434
|
+
"""
|
|
1435
|
+
# Get ALL evidence folders at once
|
|
1436
|
+
evidence_folders_by_control = self._get_all_evidence_folders_for_assessment(assessment_id)
|
|
1437
|
+
|
|
1438
|
+
if not evidence_folders_by_control:
|
|
1439
|
+
logger.warning(f"No evidence folders found for assessment {assessment_id}")
|
|
1440
|
+
return [], {}
|
|
1441
|
+
|
|
1442
|
+
all_evidence_items = []
|
|
1443
|
+
control_summary = {}
|
|
1444
|
+
|
|
1445
|
+
# Process each control that has evidence folders
|
|
1446
|
+
for control_id_raw, folders in evidence_folders_by_control.items():
|
|
1447
|
+
# Get control info from first folder
|
|
1448
|
+
control_set_id = folders[0].get("controlSetId")
|
|
1449
|
+
control_name = folders[0].get("controlName", control_id_raw)
|
|
1450
|
+
|
|
1451
|
+
# Normalize control ID by creating a temporary compliance item
|
|
1452
|
+
# We need to build control data from the folder metadata
|
|
1453
|
+
control_data = {"id": control_id_raw, "name": control_name}
|
|
1454
|
+
temp_item = AWSAuditManagerComplianceItem(assessment, control_data)
|
|
1455
|
+
control_id_normalized = temp_item.control_id
|
|
1456
|
+
|
|
1457
|
+
# Filter by evidence_control_ids if specified
|
|
1458
|
+
if self.evidence_control_ids and control_id_normalized not in self.evidence_control_ids:
|
|
1459
|
+
logger.debug(f"Skipping control {control_id_normalized} (not in filter list)")
|
|
1460
|
+
continue
|
|
1461
|
+
|
|
1462
|
+
logger.info(f"Collecting evidence for control {control_id_normalized} ({len(folders)} evidence folder(s))")
|
|
1463
|
+
|
|
1464
|
+
# Collect evidence from these folders
|
|
1465
|
+
evidence_items = self._process_evidence_folders(assessment_id, control_set_id, control_id_raw, folders)
|
|
1466
|
+
|
|
1467
|
+
if evidence_items:
|
|
1468
|
+
# Tag each evidence item with control information
|
|
1469
|
+
for item in evidence_items:
|
|
1470
|
+
item["_control_id"] = control_id_normalized
|
|
1471
|
+
item["_control_name"] = control_name
|
|
1472
|
+
|
|
1473
|
+
all_evidence_items.extend(evidence_items)
|
|
1474
|
+
control_summary[control_id_normalized] = {
|
|
1475
|
+
"control_name": control_name,
|
|
1476
|
+
"evidence_count": len(evidence_items),
|
|
1477
|
+
}
|
|
1478
|
+
logger.info(f"Collected {len(evidence_items)} evidence items for control {control_id_normalized}")
|
|
1479
|
+
|
|
1480
|
+
return all_evidence_items, control_summary
|
|
1481
|
+
|
|
1482
|
+
def collect_assessment_evidence(self, assessments: List[Dict[str, Any]]) -> None:
|
|
1483
|
+
"""
|
|
1484
|
+
Collect evidence artifacts from AWS Audit Manager assessments.
|
|
1485
|
+
|
|
1486
|
+
Supports two collection methods:
|
|
1487
|
+
1. Assessment-level: GetEvidenceFoldersByAssessment (faster, single API call)
|
|
1488
|
+
2. Control-level: GetEvidenceFoldersByAssessmentControl (current, per-control iteration)
|
|
1489
|
+
|
|
1490
|
+
Aggregates all evidence across all controls in each assessment and creates
|
|
1491
|
+
a single consolidated JSONL file per assessment stored in the artifacts directory.
|
|
1492
|
+
Creates one RegScale Evidence record per assessment with the consolidated file attached.
|
|
1493
|
+
|
|
1494
|
+
:param List[Dict[str, Any]] assessments: List of assessment data
|
|
1495
|
+
:return: None
|
|
1496
|
+
:rtype: None
|
|
1497
|
+
"""
|
|
1498
|
+
if not self.collect_evidence:
|
|
1499
|
+
logger.debug("Evidence collection disabled, skipping")
|
|
1500
|
+
return
|
|
1501
|
+
|
|
1502
|
+
collection_method = "assessment-level" if self.use_assessment_evidence_folders else "control-level"
|
|
1503
|
+
logger.info(f"Starting evidence collection from AWS Audit Manager using {collection_method} API...")
|
|
1504
|
+
|
|
1505
|
+
evidence_records_created = 0
|
|
1506
|
+
|
|
1507
|
+
for assessment in assessments:
|
|
1508
|
+
assessment_name = assessment.get("name", "Unknown Assessment")
|
|
1509
|
+
assessment_id = assessment.get("arn", "").split("/")[-1]
|
|
1510
|
+
|
|
1511
|
+
logger.info(f"Collecting evidence for assessment: {assessment_name}")
|
|
1512
|
+
|
|
1513
|
+
# Choose collection method based on flag
|
|
1514
|
+
if self.use_assessment_evidence_folders:
|
|
1515
|
+
# NEW: Fast method - get all evidence folders at once
|
|
1516
|
+
all_evidence_items, control_summary = self._collect_evidence_assessment_level(assessment, assessment_id)
|
|
1517
|
+
controls_processed = len(control_summary)
|
|
1518
|
+
else:
|
|
1519
|
+
# EXISTING: Per-control iteration method (backward compatible)
|
|
1520
|
+
all_evidence_items, control_summary, controls_processed = self._collect_assessment_control_evidence(
|
|
1521
|
+
assessment
|
|
1522
|
+
)
|
|
1523
|
+
|
|
1524
|
+
# Create consolidated evidence record if we collected any evidence
|
|
1525
|
+
if all_evidence_items:
|
|
1526
|
+
evidence_record = self._create_consolidated_evidence_record(
|
|
1527
|
+
assessment=assessment,
|
|
1528
|
+
assessment_name=assessment_name,
|
|
1529
|
+
all_evidence_items=all_evidence_items,
|
|
1530
|
+
control_summary=control_summary,
|
|
1531
|
+
controls_processed=controls_processed,
|
|
1532
|
+
)
|
|
1533
|
+
|
|
1534
|
+
if evidence_record:
|
|
1535
|
+
evidence_records_created += 1
|
|
1536
|
+
else:
|
|
1537
|
+
logger.info(f"No evidence collected for assessment: {assessment_name}")
|
|
1538
|
+
|
|
1539
|
+
logger.info(f"Evidence collection complete: {evidence_records_created} consolidated evidence record(s) created")
|
|
1540
|
+
|
|
1541
|
+
def _get_evidence_folders(self, assessment_id: str, control_set_id: str, control_id: str) -> List[Dict[str, Any]]:
|
|
1542
|
+
"""
|
|
1543
|
+
Get all evidence folders for a specific control.
|
|
1544
|
+
|
|
1545
|
+
:param str assessment_id: Assessment ID
|
|
1546
|
+
:param str control_set_id: Control set ID
|
|
1547
|
+
:param str control_id: Control ID (AWS internal ID)
|
|
1548
|
+
:return: List of evidence folders
|
|
1549
|
+
:rtype: List[Dict[str, Any]]
|
|
1550
|
+
"""
|
|
1551
|
+
logger.debug(
|
|
1552
|
+
f"Getting evidence folders for control: assessmentId={assessment_id}, "
|
|
1553
|
+
f"controlSetId={control_set_id}, controlId={control_id}"
|
|
1554
|
+
)
|
|
1555
|
+
|
|
1556
|
+
try:
|
|
1557
|
+
folders_response = self.client.get_evidence_folders_by_assessment_control(
|
|
1558
|
+
assessmentId=assessment_id, controlSetId=control_set_id, controlId=control_id
|
|
1559
|
+
)
|
|
1560
|
+
|
|
1561
|
+
evidence_folders = folders_response.get("evidenceFolders", [])
|
|
1562
|
+
logger.debug(f"Found {len(evidence_folders)} evidence folder(s) for control {control_id}")
|
|
1563
|
+
|
|
1564
|
+
return evidence_folders
|
|
1565
|
+
|
|
1566
|
+
except ClientError as e:
|
|
1567
|
+
error_code = e.response["Error"]["Code"]
|
|
1568
|
+
error_message = e.response["Error"].get("Message", "")
|
|
1569
|
+
logger.error(f"Error fetching evidence folders for control {control_id}: {error_code} - {error_message}")
|
|
1570
|
+
raise
|
|
1571
|
+
|
|
1572
|
+
def _collect_evidence_from_folder(
|
|
1573
|
+
self,
|
|
1574
|
+
assessment_id: str,
|
|
1575
|
+
control_set_id: str,
|
|
1576
|
+
evidence_folder_id: str,
|
|
1577
|
+
evidence_items: List[Dict[str, Any]],
|
|
1578
|
+
) -> None:
|
|
1579
|
+
"""
|
|
1580
|
+
Collect evidence from a single evidence folder with pagination.
|
|
1581
|
+
|
|
1582
|
+
:param str assessment_id: Assessment ID
|
|
1583
|
+
:param str control_set_id: Control set ID
|
|
1584
|
+
:param str evidence_folder_id: Evidence folder ID
|
|
1585
|
+
:param List[Dict[str, Any]] evidence_items: List to append evidence to
|
|
1586
|
+
:return: None
|
|
1587
|
+
:rtype: None
|
|
1588
|
+
"""
|
|
1589
|
+
next_token = None
|
|
1590
|
+
max_results = 50 # API maximum per request
|
|
1591
|
+
|
|
1592
|
+
while len(evidence_items) < self.max_evidence_per_control:
|
|
1593
|
+
# Build request parameters
|
|
1594
|
+
params = {
|
|
1595
|
+
"assessmentId": assessment_id,
|
|
1596
|
+
"controlSetId": control_set_id,
|
|
1597
|
+
"evidenceFolderId": evidence_folder_id,
|
|
1598
|
+
"maxResults": min(max_results, self.max_evidence_per_control - len(evidence_items)),
|
|
1599
|
+
}
|
|
1600
|
+
|
|
1601
|
+
if next_token:
|
|
1602
|
+
params["nextToken"] = next_token
|
|
1603
|
+
|
|
1604
|
+
logger.debug(
|
|
1605
|
+
f"Calling get_evidence_by_evidence_folder: "
|
|
1606
|
+
f"evidenceFolderId={evidence_folder_id}, maxResults={params['maxResults']}"
|
|
1607
|
+
)
|
|
1608
|
+
|
|
1609
|
+
# Make API call
|
|
1610
|
+
response = self.client.get_evidence_by_evidence_folder(**params)
|
|
1611
|
+
|
|
1612
|
+
# Add evidence items from this page
|
|
1613
|
+
page_evidence = response.get("evidence", [])
|
|
1614
|
+
evidence_items.extend(page_evidence)
|
|
1615
|
+
|
|
1616
|
+
logger.debug(
|
|
1617
|
+
f"Retrieved {len(page_evidence)} evidence item(s) from folder {evidence_folder_id} "
|
|
1618
|
+
f"(total so far: {len(evidence_items)})"
|
|
1619
|
+
)
|
|
1620
|
+
|
|
1621
|
+
# Check for more pages
|
|
1622
|
+
next_token = response.get("nextToken")
|
|
1623
|
+
if not next_token or not page_evidence:
|
|
1624
|
+
break
|
|
1625
|
+
|
|
1626
|
+
def _process_evidence_folders(
|
|
1627
|
+
self, assessment_id: str, control_set_id: str, control_id: str, evidence_folders: List[Dict[str, Any]]
|
|
1628
|
+
) -> List[Dict[str, Any]]:
|
|
1629
|
+
"""
|
|
1630
|
+
Process all evidence folders for a control and collect evidence items.
|
|
1631
|
+
|
|
1632
|
+
:param str assessment_id: Assessment ID
|
|
1633
|
+
:param str control_set_id: Control set ID
|
|
1634
|
+
:param str control_id: Control ID (AWS internal ID)
|
|
1635
|
+
:param List[Dict[str, Any]] evidence_folders: List of evidence folders
|
|
1636
|
+
:return: List of evidence items
|
|
1637
|
+
:rtype: List[Dict[str, Any]]
|
|
1638
|
+
"""
|
|
1639
|
+
evidence_items = []
|
|
1640
|
+
|
|
1641
|
+
for folder in evidence_folders:
|
|
1642
|
+
if len(evidence_items) >= self.max_evidence_per_control:
|
|
1643
|
+
logger.debug(
|
|
1644
|
+
f"Reached max evidence limit ({self.max_evidence_per_control}), "
|
|
1645
|
+
f"stopping evidence collection for control {control_id}"
|
|
1646
|
+
)
|
|
1647
|
+
break
|
|
1648
|
+
|
|
1649
|
+
evidence_folder_id = folder.get("id")
|
|
1650
|
+
folder_date = folder.get("date")
|
|
1651
|
+
folder_evidence_count = folder.get("evidenceResourcesIncludedCount", 0)
|
|
1652
|
+
|
|
1653
|
+
logger.debug(
|
|
1654
|
+
f"Processing evidence folder {evidence_folder_id} (date: {folder_date}, "
|
|
1655
|
+
f"evidence count: {folder_evidence_count})"
|
|
1656
|
+
)
|
|
1657
|
+
|
|
1658
|
+
self._collect_evidence_from_folder(assessment_id, control_set_id, evidence_folder_id, evidence_items)
|
|
1659
|
+
|
|
1660
|
+
return evidence_items
|
|
1661
|
+
|
|
1662
|
+
def _get_control_evidence(self, assessment_id: str, control_set_id: str, control_id: str) -> List[Dict[str, Any]]:
|
|
1663
|
+
"""
|
|
1664
|
+
Get evidence items for a specific control from AWS Audit Manager.
|
|
1665
|
+
|
|
1666
|
+
AWS Audit Manager organizes evidence in daily evidence folders within each control.
|
|
1667
|
+
This method first retrieves all evidence folders for the control, then collects
|
|
1668
|
+
evidence from each folder up to max_evidence_per_control items.
|
|
1669
|
+
|
|
1670
|
+
:param str assessment_id: Assessment ID
|
|
1671
|
+
:param str control_set_id: Control set ID
|
|
1672
|
+
:param str control_id: Control ID (AWS internal ID)
|
|
1673
|
+
:return: List of evidence items
|
|
1674
|
+
:rtype: List[Dict[str, Any]]
|
|
1675
|
+
"""
|
|
1676
|
+
evidence_items = []
|
|
1677
|
+
|
|
1678
|
+
try:
|
|
1679
|
+
# Step 1: Get all evidence folders for this control
|
|
1680
|
+
evidence_folders = self._get_evidence_folders(assessment_id, control_set_id, control_id)
|
|
1681
|
+
|
|
1682
|
+
if not evidence_folders:
|
|
1683
|
+
logger.debug(f"No evidence folders found for control {control_id}")
|
|
1684
|
+
return evidence_items
|
|
1685
|
+
|
|
1686
|
+
# Step 2: Process all folders and collect evidence
|
|
1687
|
+
evidence_items = self._process_evidence_folders(assessment_id, control_set_id, control_id, evidence_folders)
|
|
1688
|
+
|
|
1689
|
+
logger.debug(
|
|
1690
|
+
f"Retrieved {len(evidence_items)} evidence item(s) for control {control_id} "
|
|
1691
|
+
f"from {len(evidence_folders)} evidence folder(s)"
|
|
1692
|
+
)
|
|
1693
|
+
|
|
1694
|
+
except ClientError as e:
|
|
1695
|
+
error_code = e.response["Error"]["Code"]
|
|
1696
|
+
error_message = e.response["Error"].get("Message", "")
|
|
1697
|
+
if error_code in ["ResourceNotFoundException", "AccessDeniedException"]:
|
|
1698
|
+
logger.warning(f"Cannot access evidence for control {control_id}: {error_code} - {error_message}")
|
|
1699
|
+
else:
|
|
1700
|
+
logger.error(f"Error retrieving evidence for control {control_id}: {error_code} - {error_message}")
|
|
1701
|
+
|
|
1702
|
+
return evidence_items
|
|
1703
|
+
|
|
1704
|
+
def _create_consolidated_evidence_record(
|
|
1705
|
+
self,
|
|
1706
|
+
assessment: Dict[str, Any],
|
|
1707
|
+
assessment_name: str,
|
|
1708
|
+
all_evidence_items: List[Dict[str, Any]],
|
|
1709
|
+
control_summary: Dict[str, Dict[str, Any]],
|
|
1710
|
+
controls_processed: int,
|
|
1711
|
+
) -> Optional[Any]:
|
|
1712
|
+
"""
|
|
1713
|
+
Create a RegScale Evidence record with consolidated evidence from all controls.
|
|
1714
|
+
|
|
1715
|
+
Saves a consolidated JSONL file to the artifacts directory and attaches it to
|
|
1716
|
+
a single Evidence record per assessment.
|
|
1717
|
+
|
|
1718
|
+
:param Dict[str, Any] assessment: Full assessment data from AWS Audit Manager
|
|
1719
|
+
:param str assessment_name: Assessment name
|
|
1720
|
+
:param List[Dict[str, Any]] all_evidence_items: All evidence items across controls
|
|
1721
|
+
:param Dict[str, Dict[str, Any]] control_summary: Summary of evidence per control
|
|
1722
|
+
:param int controls_processed: Number of controls processed
|
|
1723
|
+
:return: Created Evidence record or None
|
|
1724
|
+
:rtype: Optional[Any]
|
|
1725
|
+
"""
|
|
1726
|
+
from datetime import datetime, timedelta
|
|
1727
|
+
|
|
1728
|
+
from regscale.models.regscale_models.evidence import Evidence
|
|
1729
|
+
|
|
1730
|
+
# Build evidence title and description
|
|
1731
|
+
scan_date = get_current_datetime(dt_format="%Y-%m-%d")
|
|
1732
|
+
title = f"AWS Audit Manager Evidence - {assessment_name} - {scan_date}"
|
|
1733
|
+
|
|
1734
|
+
# Analyze evidence and build description
|
|
1735
|
+
description, safe_assessment_name, file_name = self._build_evidence_description(
|
|
1736
|
+
assessment=assessment,
|
|
1737
|
+
assessment_name=assessment_name,
|
|
1738
|
+
all_evidence_items=all_evidence_items,
|
|
1739
|
+
control_summary=control_summary,
|
|
1740
|
+
controls_processed=controls_processed,
|
|
1741
|
+
scan_date=scan_date,
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
# Calculate due date
|
|
1745
|
+
due_date = (datetime.now() + timedelta(days=self.evidence_frequency)).isoformat()
|
|
1746
|
+
|
|
1747
|
+
try:
|
|
1748
|
+
# Create Evidence record
|
|
1749
|
+
evidence = Evidence(
|
|
1750
|
+
title=title,
|
|
1751
|
+
description=description,
|
|
1752
|
+
status="Collected",
|
|
1753
|
+
updateFrequency=self.evidence_frequency,
|
|
1754
|
+
dueDate=due_date,
|
|
1755
|
+
)
|
|
1756
|
+
|
|
1757
|
+
created_evidence = evidence.create()
|
|
1758
|
+
if not created_evidence or not created_evidence.id:
|
|
1759
|
+
logger.error("Failed to create evidence record")
|
|
1760
|
+
return None
|
|
1761
|
+
|
|
1762
|
+
logger.info(f"Created evidence record {created_evidence.id}: {title}")
|
|
1763
|
+
|
|
1764
|
+
# Save and upload consolidated evidence file
|
|
1765
|
+
self._upload_consolidated_evidence(
|
|
1766
|
+
created_evidence_id=created_evidence.id,
|
|
1767
|
+
safe_assessment_name=safe_assessment_name,
|
|
1768
|
+
scan_date=scan_date,
|
|
1769
|
+
file_name=file_name,
|
|
1770
|
+
all_evidence_items=all_evidence_items,
|
|
1771
|
+
)
|
|
1772
|
+
|
|
1773
|
+
# Link evidence to SSP
|
|
1774
|
+
self._link_evidence_to_ssp(created_evidence.id)
|
|
1775
|
+
|
|
1776
|
+
return created_evidence
|
|
1777
|
+
|
|
1778
|
+
except Exception as ex:
|
|
1779
|
+
logger.error(
|
|
1780
|
+
f"Failed to create consolidated evidence for assessment {assessment_name}: {ex}", exc_info=True
|
|
1781
|
+
)
|
|
1782
|
+
return None
|
|
1783
|
+
|
|
1784
|
+
def _get_compliance_check_from_resources(self, resources_included: List[Dict[str, Any]]) -> Optional[str]:
|
|
1785
|
+
"""
|
|
1786
|
+
Determine compliance check status from resource-level checks.
|
|
1787
|
+
|
|
1788
|
+
:param List[Dict[str, Any]] resources_included: List of resources with complianceCheck fields
|
|
1789
|
+
:return: Aggregated compliance check status or None
|
|
1790
|
+
:rtype: Optional[str]
|
|
1791
|
+
"""
|
|
1792
|
+
if not resources_included:
|
|
1793
|
+
return None
|
|
1794
|
+
|
|
1795
|
+
resource_checks = [r.get("complianceCheck") for r in resources_included]
|
|
1796
|
+
if "FAILED" in resource_checks:
|
|
1797
|
+
return "FAILED"
|
|
1798
|
+
if any(check == "COMPLIANT" for check in resource_checks):
|
|
1799
|
+
return "COMPLIANT"
|
|
1800
|
+
if any(check == "NOT_APPLICABLE" for check in resource_checks):
|
|
1801
|
+
return "NOT_APPLICABLE"
|
|
1802
|
+
return None
|
|
1803
|
+
|
|
1804
|
+
def _track_failed_control(
|
|
1805
|
+
self, control_id: Optional[str], failed_controls: set, failed_by_control: Dict[str, int]
|
|
1806
|
+
) -> None:
|
|
1807
|
+
"""
|
|
1808
|
+
Track a failed control for reporting.
|
|
1809
|
+
|
|
1810
|
+
:param Optional[str] control_id: Control ID to track
|
|
1811
|
+
:param set failed_controls: Set of failed control IDs
|
|
1812
|
+
:param Dict[str, int] failed_by_control: Dictionary tracking failure count per control
|
|
1813
|
+
:return: None
|
|
1814
|
+
:rtype: None
|
|
1815
|
+
"""
|
|
1816
|
+
if control_id:
|
|
1817
|
+
failed_controls.add(control_id)
|
|
1818
|
+
failed_by_control[control_id] = failed_by_control.get(control_id, 0) + 1
|
|
1819
|
+
|
|
1820
|
+
def _count_compliance_status(
|
|
1821
|
+
self,
|
|
1822
|
+
compliance_check: Optional[str],
|
|
1823
|
+
evidence: Dict[str, Any],
|
|
1824
|
+
compliant_count: int,
|
|
1825
|
+
failed_count: int,
|
|
1826
|
+
not_applicable_count: int,
|
|
1827
|
+
inconclusive_count: int,
|
|
1828
|
+
failed_controls: set,
|
|
1829
|
+
failed_by_control: Dict[str, int],
|
|
1830
|
+
) -> tuple:
|
|
1831
|
+
"""
|
|
1832
|
+
Count compliance status and update tracking collections.
|
|
1833
|
+
|
|
1834
|
+
:param Optional[str] compliance_check: Compliance check status
|
|
1835
|
+
:param Dict[str, Any] evidence: Evidence item with control_id
|
|
1836
|
+
:param int compliant_count: Current compliant count
|
|
1837
|
+
:param int failed_count: Current failed count
|
|
1838
|
+
:param int not_applicable_count: Current not applicable count
|
|
1839
|
+
:param int inconclusive_count: Current inconclusive count
|
|
1840
|
+
:param set failed_controls: Set of failed control IDs
|
|
1841
|
+
:param Dict[str, int] failed_by_control: Dictionary tracking failure count per control
|
|
1842
|
+
:return: Tuple of updated counts
|
|
1843
|
+
:rtype: tuple
|
|
1844
|
+
"""
|
|
1845
|
+
if compliance_check == "FAILED":
|
|
1846
|
+
failed_count += 1
|
|
1847
|
+
control_id = evidence.get("_control_id")
|
|
1848
|
+
self._track_failed_control(control_id, failed_controls, failed_by_control)
|
|
1849
|
+
elif compliance_check == "COMPLIANT":
|
|
1850
|
+
compliant_count += 1
|
|
1851
|
+
elif compliance_check == "NOT_APPLICABLE":
|
|
1852
|
+
not_applicable_count += 1
|
|
1853
|
+
else:
|
|
1854
|
+
inconclusive_count += 1
|
|
1855
|
+
|
|
1856
|
+
return compliant_count, failed_count, not_applicable_count, inconclusive_count
|
|
1857
|
+
|
|
1858
|
+
def _analyze_compliance_results(self, evidence_items: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
1859
|
+
"""
|
|
1860
|
+
Analyze compliance results from evidence items to determine pass/fail statistics.
|
|
1861
|
+
|
|
1862
|
+
Checks both root-level and resource-level complianceCheck fields (same logic
|
|
1863
|
+
as _aggregate_evidence_compliance).
|
|
1864
|
+
|
|
1865
|
+
:param List[Dict[str, Any]] evidence_items: Evidence items to analyze
|
|
1866
|
+
:return: Dictionary with compliance statistics
|
|
1867
|
+
:rtype: Dict[str, Any]
|
|
1868
|
+
"""
|
|
1869
|
+
compliant_count = 0
|
|
1870
|
+
failed_count = 0
|
|
1871
|
+
inconclusive_count = 0
|
|
1872
|
+
not_applicable_count = 0
|
|
1873
|
+
failed_controls = set()
|
|
1874
|
+
failed_by_control = {}
|
|
1875
|
+
|
|
1876
|
+
for evidence in evidence_items:
|
|
1877
|
+
# Check root-level complianceCheck first
|
|
1878
|
+
compliance_check = evidence.get("complianceCheck")
|
|
1879
|
+
|
|
1880
|
+
# If no root-level check, look in resourcesIncluded
|
|
1881
|
+
if compliance_check is None:
|
|
1882
|
+
resources_included = evidence.get("resourcesIncluded", [])
|
|
1883
|
+
compliance_check = self._get_compliance_check_from_resources(resources_included)
|
|
1884
|
+
|
|
1885
|
+
# Count compliance results
|
|
1886
|
+
compliant_count, failed_count, not_applicable_count, inconclusive_count = self._count_compliance_status(
|
|
1887
|
+
compliance_check,
|
|
1888
|
+
evidence,
|
|
1889
|
+
compliant_count,
|
|
1890
|
+
failed_count,
|
|
1891
|
+
not_applicable_count,
|
|
1892
|
+
inconclusive_count,
|
|
1893
|
+
failed_controls,
|
|
1894
|
+
failed_by_control,
|
|
1895
|
+
)
|
|
1896
|
+
|
|
1897
|
+
total_with_checks = compliant_count + failed_count
|
|
1898
|
+
|
|
1899
|
+
return {
|
|
1900
|
+
"compliant_count": compliant_count,
|
|
1901
|
+
"failed_count": failed_count,
|
|
1902
|
+
"inconclusive_count": inconclusive_count,
|
|
1903
|
+
"not_applicable_count": not_applicable_count,
|
|
1904
|
+
"total_with_checks": total_with_checks,
|
|
1905
|
+
"failed_controls": failed_controls,
|
|
1906
|
+
"failed_by_control": failed_by_control,
|
|
1907
|
+
}
|
|
1908
|
+
|
|
1909
|
+
def _extract_assessment_metadata(self, assessment: Dict[str, Any]) -> Dict[str, Any]:
|
|
1910
|
+
"""
|
|
1911
|
+
Extract assessment metadata for description building.
|
|
1912
|
+
|
|
1913
|
+
:param Dict[str, Any] assessment: Assessment data
|
|
1914
|
+
:return: Dictionary with extracted metadata
|
|
1915
|
+
:rtype: Dict[str, Any]
|
|
1916
|
+
"""
|
|
1917
|
+
assessment_arn = assessment.get("arn", "N/A")
|
|
1918
|
+
assessment_id = assessment_arn.split("/")[-1] if assessment_arn != "N/A" else "N/A"
|
|
1919
|
+
assessment_status = assessment.get("status", "Unknown")
|
|
1920
|
+
|
|
1921
|
+
framework = assessment.get("framework", {})
|
|
1922
|
+
framework_name = framework.get("metadata", {}).get("name", "N/A")
|
|
1923
|
+
framework_type = framework.get("type", "N/A")
|
|
1924
|
+
|
|
1925
|
+
aws_account = assessment.get("awsAccount", {})
|
|
1926
|
+
account_id = aws_account.get("id", "N/A")
|
|
1927
|
+
account_name = aws_account.get("name", "N/A")
|
|
1928
|
+
account_display = f"{account_name} ({account_id})" if account_name != "N/A" else account_id
|
|
1929
|
+
|
|
1930
|
+
metadata = assessment.get("metadata", {})
|
|
1931
|
+
assessment_description = metadata.get("description", "")
|
|
1932
|
+
creation_time = metadata.get("creationTime")
|
|
1933
|
+
last_updated = metadata.get("lastUpdated")
|
|
1934
|
+
|
|
1935
|
+
return {
|
|
1936
|
+
"assessment_id": assessment_id,
|
|
1937
|
+
"assessment_status": assessment_status,
|
|
1938
|
+
"framework_name": framework_name,
|
|
1939
|
+
"framework_type": framework_type,
|
|
1940
|
+
"account_display": account_display,
|
|
1941
|
+
"assessment_description": assessment_description,
|
|
1942
|
+
"creation_time": creation_time,
|
|
1943
|
+
"last_updated": last_updated,
|
|
1944
|
+
}
|
|
1945
|
+
|
|
1946
|
+
def _add_timestamp_to_description(self, description_parts: list, label: str, timestamp: Any) -> None:
|
|
1947
|
+
"""
|
|
1948
|
+
Add formatted timestamp to description if valid.
|
|
1949
|
+
|
|
1950
|
+
:param list description_parts: List to append timestamp HTML to
|
|
1951
|
+
:param str label: Label for the timestamp (e.g., 'Created', 'Last Updated')
|
|
1952
|
+
:param Any timestamp: Timestamp value (int, float, or datetime)
|
|
1953
|
+
:return: None
|
|
1954
|
+
:rtype: None
|
|
1955
|
+
"""
|
|
1956
|
+
from datetime import datetime
|
|
1957
|
+
|
|
1958
|
+
if not timestamp:
|
|
1959
|
+
return
|
|
1960
|
+
|
|
1961
|
+
try:
|
|
1962
|
+
if isinstance(timestamp, (int, float)):
|
|
1963
|
+
dt_obj = datetime.fromtimestamp(timestamp)
|
|
1964
|
+
else:
|
|
1965
|
+
dt_obj = timestamp
|
|
1966
|
+
description_parts.append(
|
|
1967
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{label}:{HTML_STRONG_CLOSE} "
|
|
1968
|
+
f"{dt_obj.strftime('%Y-%m-%d %H:%M:%S')}{HTML_LI_CLOSE}"
|
|
1969
|
+
)
|
|
1970
|
+
except Exception:
|
|
1971
|
+
pass
|
|
1972
|
+
|
|
1973
|
+
def _add_assessment_details_section(
|
|
1974
|
+
self, description_parts: list, assessment_name: str, metadata: Dict[str, Any]
|
|
1975
|
+
) -> None:
|
|
1976
|
+
"""
|
|
1977
|
+
Add assessment details section to description.
|
|
1978
|
+
|
|
1979
|
+
:param list description_parts: List to append HTML sections to
|
|
1980
|
+
:param str assessment_name: Assessment name
|
|
1981
|
+
:param Dict[str, Any] metadata: Extracted metadata dictionary
|
|
1982
|
+
:return: None
|
|
1983
|
+
:rtype: None
|
|
1984
|
+
"""
|
|
1985
|
+
description_parts.extend(
|
|
1986
|
+
[
|
|
1987
|
+
"<h1>AWS Audit Manager Evidence</h1>",
|
|
1988
|
+
f"{HTML_P_OPEN}{HTML_STRONG_OPEN}Assessment:{HTML_STRONG_CLOSE} {assessment_name}{HTML_P_CLOSE}",
|
|
1989
|
+
]
|
|
1990
|
+
)
|
|
1991
|
+
|
|
1992
|
+
if metadata["assessment_description"]:
|
|
1993
|
+
description_parts.append(f"{HTML_P_OPEN}{metadata['assessment_description']}{HTML_P_CLOSE}")
|
|
1994
|
+
|
|
1995
|
+
description_parts.extend(
|
|
1996
|
+
[
|
|
1997
|
+
"<h2>Assessment Details</h2>",
|
|
1998
|
+
HTML_UL_OPEN,
|
|
1999
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Assessment ID:{HTML_STRONG_CLOSE} "
|
|
2000
|
+
f"<code>{metadata['assessment_id']}</code>{HTML_LI_CLOSE}",
|
|
2001
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Status:{HTML_STRONG_CLOSE} "
|
|
2002
|
+
f"{metadata['assessment_status']}{HTML_LI_CLOSE}",
|
|
2003
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}AWS Account:{HTML_STRONG_CLOSE} "
|
|
2004
|
+
f"{metadata['account_display']}{HTML_LI_CLOSE}",
|
|
2005
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Framework:{HTML_STRONG_CLOSE} "
|
|
2006
|
+
f"{metadata['framework_name']} ({metadata['framework_type']}){HTML_LI_CLOSE}",
|
|
2007
|
+
]
|
|
2008
|
+
)
|
|
2009
|
+
|
|
2010
|
+
self._add_timestamp_to_description(description_parts, "Created", metadata["creation_time"])
|
|
2011
|
+
self._add_timestamp_to_description(description_parts, "Last Updated", metadata["last_updated"])
|
|
2012
|
+
description_parts.append(HTML_UL_CLOSE)
|
|
2013
|
+
|
|
2014
|
+
def _add_compliance_results_section(
|
|
2015
|
+
self,
|
|
2016
|
+
description_parts: list,
|
|
2017
|
+
compliance_stats: Dict[str, Any],
|
|
2018
|
+
control_summary: Dict[str, Dict[str, Any]],
|
|
2019
|
+
total_evidence_count: int,
|
|
2020
|
+
) -> None:
|
|
2021
|
+
"""
|
|
2022
|
+
Add compliance results section to description.
|
|
2023
|
+
|
|
2024
|
+
:param list description_parts: List to append HTML sections to
|
|
2025
|
+
:param Dict[str, Any] compliance_stats: Compliance statistics
|
|
2026
|
+
:param Dict[str, Dict[str, Any]] control_summary: Control summary
|
|
2027
|
+
:param int total_evidence_count: Total evidence count
|
|
2028
|
+
:return: None
|
|
2029
|
+
:rtype: None
|
|
2030
|
+
"""
|
|
2031
|
+
description_parts.extend(
|
|
2032
|
+
[
|
|
2033
|
+
"<h2>Compliance Results</h2>",
|
|
2034
|
+
HTML_UL_OPEN,
|
|
2035
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Evidence with Compliance Checks:{HTML_STRONG_CLOSE} "
|
|
2036
|
+
f"{compliance_stats['total_with_checks']:,} of {total_evidence_count:,}{HTML_LI_CLOSE}",
|
|
2037
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Compliant:{HTML_STRONG_CLOSE} "
|
|
2038
|
+
f"{compliance_stats['compliant_count']:,}{HTML_LI_CLOSE}",
|
|
2039
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Failed:{HTML_STRONG_CLOSE} "
|
|
2040
|
+
f"<span style='color: red;'>{compliance_stats['failed_count']:,}</span>{HTML_LI_CLOSE}",
|
|
2041
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Not Applicable:{HTML_STRONG_CLOSE} "
|
|
2042
|
+
f"{compliance_stats['not_applicable_count']:,}{HTML_LI_CLOSE}",
|
|
2043
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Inconclusive:{HTML_STRONG_CLOSE} "
|
|
2044
|
+
f"{compliance_stats['inconclusive_count']:,}{HTML_LI_CLOSE}",
|
|
2045
|
+
HTML_UL_CLOSE,
|
|
2046
|
+
]
|
|
2047
|
+
)
|
|
2048
|
+
|
|
2049
|
+
if compliance_stats["failed_controls"]:
|
|
2050
|
+
description_parts.extend(
|
|
2051
|
+
[
|
|
2052
|
+
"<h3>Failed Controls</h3>",
|
|
2053
|
+
f"{HTML_P_OPEN}<span style='color: red;'>{HTML_STRONG_OPEN}The following controls have "
|
|
2054
|
+
f"failed compliance checks:{HTML_STRONG_CLOSE}</span>{HTML_P_CLOSE}",
|
|
2055
|
+
HTML_UL_OPEN,
|
|
2056
|
+
]
|
|
2057
|
+
)
|
|
2058
|
+
for control_id in sorted(compliance_stats["failed_controls"]):
|
|
2059
|
+
control_name = control_summary.get(control_id, {}).get("control_name", control_id)
|
|
2060
|
+
failed_evidence_count = compliance_stats["failed_by_control"].get(control_id, 0)
|
|
2061
|
+
description_parts.append(
|
|
2062
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{control_id}:{HTML_STRONG_CLOSE} {control_name} "
|
|
2063
|
+
f"({failed_evidence_count} failed evidence item(s)){HTML_LI_CLOSE}"
|
|
2064
|
+
)
|
|
2065
|
+
description_parts.append(HTML_UL_CLOSE)
|
|
2066
|
+
|
|
2067
|
+
def _add_evidence_summary_sections(
|
|
2068
|
+
self,
|
|
2069
|
+
description_parts: list,
|
|
2070
|
+
total_evidence_count: int,
|
|
2071
|
+
control_summary: Dict[str, Dict[str, Any]],
|
|
2072
|
+
controls_processed: int,
|
|
2073
|
+
analysis: Dict[str, Any],
|
|
2074
|
+
) -> None:
|
|
2075
|
+
"""
|
|
2076
|
+
Add evidence summary and related sections to description.
|
|
2077
|
+
|
|
2078
|
+
:param list description_parts: List to append HTML sections to
|
|
2079
|
+
:param int total_evidence_count: Total evidence count
|
|
2080
|
+
:param Dict[str, Dict[str, Any]] control_summary: Control summary
|
|
2081
|
+
:param int controls_processed: Number of controls processed
|
|
2082
|
+
:param Dict[str, Any] analysis: Evidence analysis results
|
|
2083
|
+
:return: None
|
|
2084
|
+
:rtype: None
|
|
2085
|
+
"""
|
|
2086
|
+
description_parts.extend(
|
|
2087
|
+
[
|
|
2088
|
+
"<h2>Evidence Summary</h2>",
|
|
2089
|
+
HTML_UL_OPEN,
|
|
2090
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Total Evidence Items:{HTML_STRONG_CLOSE} "
|
|
2091
|
+
f"{total_evidence_count:,}{HTML_LI_CLOSE}",
|
|
2092
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Controls with Evidence:{HTML_STRONG_CLOSE} "
|
|
2093
|
+
f"{len(control_summary)}{HTML_LI_CLOSE}",
|
|
2094
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Controls Processed:{HTML_STRONG_CLOSE} "
|
|
2095
|
+
f"{controls_processed}{HTML_LI_CLOSE}",
|
|
2096
|
+
HTML_UL_CLOSE,
|
|
2097
|
+
"<h2>Controls Summary</h2>",
|
|
2098
|
+
HTML_UL_OPEN,
|
|
2099
|
+
]
|
|
2100
|
+
)
|
|
2101
|
+
|
|
2102
|
+
for control_id in sorted(control_summary.keys()):
|
|
2103
|
+
control_info = control_summary[control_id]
|
|
2104
|
+
description_parts.append(
|
|
2105
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{control_id}:{HTML_STRONG_CLOSE} "
|
|
2106
|
+
f"{control_info['evidence_count']:,} items - <em>{control_info['control_name']}</em>{HTML_LI_CLOSE}"
|
|
2107
|
+
)
|
|
2108
|
+
|
|
2109
|
+
description_parts.append(HTML_UL_CLOSE)
|
|
2110
|
+
description_parts.append("<h2>Data Sources</h2>")
|
|
2111
|
+
description_parts.append(HTML_UL_OPEN)
|
|
2112
|
+
for source in sorted(analysis["data_sources"]):
|
|
2113
|
+
description_parts.append(f"{HTML_LI_OPEN}{source}{HTML_LI_CLOSE}")
|
|
2114
|
+
description_parts.append(HTML_UL_CLOSE)
|
|
2115
|
+
|
|
2116
|
+
if analysis["event_names"]:
|
|
2117
|
+
description_parts.append("<h2>Event Types (Sample)</h2>")
|
|
2118
|
+
description_parts.append(HTML_UL_OPEN)
|
|
2119
|
+
for event_name in sorted(list(analysis["event_names"])[:10]):
|
|
2120
|
+
description_parts.append(f"{HTML_LI_OPEN}<code>{event_name}</code>{HTML_LI_CLOSE}")
|
|
2121
|
+
description_parts.append(HTML_UL_CLOSE)
|
|
2122
|
+
|
|
2123
|
+
if analysis["date_range_start"] and analysis["date_range_end"]:
|
|
2124
|
+
description_parts.extend(
|
|
2125
|
+
[
|
|
2126
|
+
"<h2>Evidence Date Range</h2>",
|
|
2127
|
+
HTML_UL_OPEN,
|
|
2128
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}From:{HTML_STRONG_CLOSE} "
|
|
2129
|
+
f"{analysis['date_range_start'].strftime('%Y-%m-%d %H:%M:%S')}{HTML_LI_CLOSE}",
|
|
2130
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}To:{HTML_STRONG_CLOSE} "
|
|
2131
|
+
f"{analysis['date_range_end'].strftime('%Y-%m-%d %H:%M:%S')}{HTML_LI_CLOSE}",
|
|
2132
|
+
HTML_UL_CLOSE,
|
|
2133
|
+
]
|
|
2134
|
+
)
|
|
2135
|
+
|
|
2136
|
+
def _build_evidence_description(
|
|
2137
|
+
self,
|
|
2138
|
+
assessment: Dict[str, Any],
|
|
2139
|
+
assessment_name: str,
|
|
2140
|
+
all_evidence_items: List[Dict[str, Any]],
|
|
2141
|
+
control_summary: Dict[str, Dict[str, Any]],
|
|
2142
|
+
controls_processed: int,
|
|
2143
|
+
scan_date: str,
|
|
2144
|
+
) -> tuple:
|
|
2145
|
+
"""
|
|
2146
|
+
Build evidence description with analysis of evidence items using HTML formatting.
|
|
2147
|
+
|
|
2148
|
+
:param Dict[str, Any] assessment: Full assessment data from AWS Audit Manager
|
|
2149
|
+
:param str assessment_name: Assessment name
|
|
2150
|
+
:param List[Dict[str, Any]] all_evidence_items: All evidence items
|
|
2151
|
+
:param Dict[str, Dict[str, Any]] control_summary: Control summary
|
|
2152
|
+
:param int controls_processed: Number of controls processed
|
|
2153
|
+
:param str scan_date: Scan date
|
|
2154
|
+
:return: Tuple of (description, safe_assessment_name, file_name)
|
|
2155
|
+
:rtype: tuple
|
|
2156
|
+
"""
|
|
2157
|
+
# Extract assessment metadata
|
|
2158
|
+
metadata = self._extract_assessment_metadata(assessment)
|
|
2159
|
+
|
|
2160
|
+
# Analyze evidence items
|
|
2161
|
+
total_evidence_count = len(all_evidence_items)
|
|
2162
|
+
analysis = self._analyze_evidence_items(all_evidence_items)
|
|
2163
|
+
compliance_stats = self._analyze_compliance_results(all_evidence_items)
|
|
2164
|
+
|
|
2165
|
+
# Build description parts
|
|
2166
|
+
description_parts = []
|
|
2167
|
+
|
|
2168
|
+
self._add_assessment_details_section(description_parts, assessment_name, metadata)
|
|
2169
|
+
self._add_compliance_results_section(description_parts, compliance_stats, control_summary, total_evidence_count)
|
|
2170
|
+
self._add_evidence_summary_sections(
|
|
2171
|
+
description_parts, total_evidence_count, control_summary, controls_processed, analysis
|
|
2172
|
+
)
|
|
2173
|
+
|
|
2174
|
+
# Generate safe filename from assessment name
|
|
2175
|
+
safe_assessment_name = assessment_name.replace(" ", "_").replace("/", "_")[:50]
|
|
2176
|
+
file_name = f"audit_manager_evidence_{safe_assessment_name}_{scan_date}.jsonl.gz"
|
|
2177
|
+
|
|
2178
|
+
description_parts.extend(
|
|
2179
|
+
[
|
|
2180
|
+
"<h2>Attached Files</h2>",
|
|
2181
|
+
HTML_UL_OPEN,
|
|
2182
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{file_name}{HTML_STRONG_CLOSE} (gzipped JSONL format){HTML_LI_CLOSE}",
|
|
2183
|
+
HTML_UL_CLOSE,
|
|
2184
|
+
]
|
|
2185
|
+
)
|
|
2186
|
+
|
|
2187
|
+
return "\n".join(description_parts), safe_assessment_name, file_name
|
|
2188
|
+
|
|
2189
|
+
def _convert_evidence_timestamp(self, evidence_time: Any) -> Optional[Any]:
|
|
2190
|
+
"""
|
|
2191
|
+
Convert evidence timestamp to datetime if needed.
|
|
2192
|
+
|
|
2193
|
+
:param Any evidence_time: Evidence time value (datetime, int, float, or other)
|
|
2194
|
+
:return: Datetime object or None if invalid
|
|
2195
|
+
:rtype: Optional[Any]
|
|
2196
|
+
"""
|
|
2197
|
+
from datetime import datetime
|
|
2198
|
+
|
|
2199
|
+
# Handle both datetime objects and timestamp integers
|
|
2200
|
+
if isinstance(evidence_time, (int, float)):
|
|
2201
|
+
return datetime.fromtimestamp(evidence_time / 1000)
|
|
2202
|
+
if isinstance(evidence_time, datetime):
|
|
2203
|
+
return evidence_time
|
|
2204
|
+
return None
|
|
2205
|
+
|
|
2206
|
+
def _update_date_range(self, evidence_time: Any, date_range_start: Any, date_range_end: Any) -> tuple:
|
|
2207
|
+
"""
|
|
2208
|
+
Update date range with new evidence timestamp.
|
|
2209
|
+
|
|
2210
|
+
:param Any evidence_time: Datetime object
|
|
2211
|
+
:param Any date_range_start: Current start datetime or None
|
|
2212
|
+
:param Any date_range_end: Current end datetime or None
|
|
2213
|
+
:return: Tuple of (updated_start, updated_end)
|
|
2214
|
+
:rtype: tuple
|
|
2215
|
+
"""
|
|
2216
|
+
updated_start = date_range_start
|
|
2217
|
+
updated_end = date_range_end
|
|
2218
|
+
|
|
2219
|
+
if not date_range_start or evidence_time < date_range_start:
|
|
2220
|
+
updated_start = evidence_time
|
|
2221
|
+
if not date_range_end or evidence_time > date_range_end:
|
|
2222
|
+
updated_end = evidence_time
|
|
2223
|
+
|
|
2224
|
+
return updated_start, updated_end
|
|
2225
|
+
|
|
2226
|
+
def _process_evidence_item(
|
|
2227
|
+
self, item: Dict[str, Any], data_sources: set, event_names: set, date_range_start: Any, date_range_end: Any
|
|
2228
|
+
) -> tuple:
|
|
2229
|
+
"""
|
|
2230
|
+
Process a single evidence item to extract analysis data.
|
|
2231
|
+
|
|
2232
|
+
:param Dict[str, Any] item: Evidence item
|
|
2233
|
+
:param set data_sources: Set to update with data sources
|
|
2234
|
+
:param set event_names: Set to update with event names
|
|
2235
|
+
:param Any date_range_start: Current start datetime
|
|
2236
|
+
:param Any date_range_end: Current end datetime
|
|
2237
|
+
:return: Tuple of (date_range_start, date_range_end)
|
|
2238
|
+
:rtype: tuple
|
|
2239
|
+
"""
|
|
2240
|
+
# Extract data source
|
|
2241
|
+
if "dataSource" in item:
|
|
2242
|
+
data_sources.add(item["dataSource"])
|
|
2243
|
+
|
|
2244
|
+
# Extract event name
|
|
2245
|
+
if "eventName" in item:
|
|
2246
|
+
event_names.add(item["eventName"])
|
|
2247
|
+
|
|
2248
|
+
# Extract and process timestamp
|
|
2249
|
+
if "time" in item:
|
|
2250
|
+
evidence_time = self._convert_evidence_timestamp(item["time"])
|
|
2251
|
+
if evidence_time:
|
|
2252
|
+
date_range_start, date_range_end = self._update_date_range(
|
|
2253
|
+
evidence_time, date_range_start, date_range_end
|
|
2254
|
+
)
|
|
2255
|
+
|
|
2256
|
+
return date_range_start, date_range_end
|
|
2257
|
+
|
|
2258
|
+
def _analyze_evidence_items(self, evidence_items: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
2259
|
+
"""
|
|
2260
|
+
Analyze evidence items to extract data sources, event names, and date ranges.
|
|
2261
|
+
|
|
2262
|
+
:param List[Dict[str, Any]] evidence_items: Evidence items to analyze
|
|
2263
|
+
:return: Dictionary with analysis results
|
|
2264
|
+
:rtype: Dict[str, Any]
|
|
2265
|
+
"""
|
|
2266
|
+
data_sources = set()
|
|
2267
|
+
event_names = set()
|
|
2268
|
+
date_range_start = None
|
|
2269
|
+
date_range_end = None
|
|
2270
|
+
|
|
2271
|
+
for item in evidence_items:
|
|
2272
|
+
date_range_start, date_range_end = self._process_evidence_item(
|
|
2273
|
+
item, data_sources, event_names, date_range_start, date_range_end
|
|
2274
|
+
)
|
|
2275
|
+
|
|
2276
|
+
return {
|
|
2277
|
+
"data_sources": data_sources,
|
|
2278
|
+
"event_names": event_names,
|
|
2279
|
+
"date_range_start": date_range_start,
|
|
2280
|
+
"date_range_end": date_range_end,
|
|
2281
|
+
}
|
|
2282
|
+
|
|
2283
|
+
def _upload_consolidated_evidence(
|
|
2284
|
+
self,
|
|
2285
|
+
created_evidence_id: int,
|
|
2286
|
+
safe_assessment_name: str,
|
|
2287
|
+
scan_date: str,
|
|
2288
|
+
file_name: str,
|
|
2289
|
+
all_evidence_items: List[Dict[str, Any]],
|
|
2290
|
+
) -> None:
|
|
2291
|
+
"""
|
|
2292
|
+
Save and upload consolidated evidence file as gzipped JSONL.
|
|
2293
|
+
|
|
2294
|
+
:param int created_evidence_id: Evidence record ID
|
|
2295
|
+
:param str safe_assessment_name: Safe assessment name for filename
|
|
2296
|
+
:param str scan_date: Scan date
|
|
2297
|
+
:param str file_name: File name for upload (will be modified to add .gz)
|
|
2298
|
+
:param List[Dict[str, Any]] all_evidence_items: All evidence items
|
|
2299
|
+
"""
|
|
2300
|
+
import gzip
|
|
2301
|
+
from io import BytesIO
|
|
2302
|
+
|
|
2303
|
+
from regscale.core.app.api import Api
|
|
2304
|
+
from regscale.models.regscale_models.file import File
|
|
2305
|
+
|
|
2306
|
+
# Save consolidated JSONL file to artifacts directory (already gzipped)
|
|
2307
|
+
artifacts_file_path = self._save_consolidated_evidence_file(
|
|
2308
|
+
assessment_name=safe_assessment_name, scan_date=scan_date, evidence_items=all_evidence_items
|
|
2309
|
+
)
|
|
2310
|
+
|
|
2311
|
+
if artifacts_file_path:
|
|
2312
|
+
logger.info(f"Saved consolidated evidence to: {artifacts_file_path}")
|
|
2313
|
+
|
|
2314
|
+
# Compress evidence data for upload to RegScale
|
|
2315
|
+
api = Api()
|
|
2316
|
+
jsonl_content = "\n".join([json.dumps(item, default=str) for item in all_evidence_items])
|
|
2317
|
+
|
|
2318
|
+
# Compress the JSONL content in memory
|
|
2319
|
+
compressed_buffer = BytesIO()
|
|
2320
|
+
with gzip.open(compressed_buffer, "wt", encoding="utf-8", compresslevel=9) as gz_file:
|
|
2321
|
+
gz_file.write(jsonl_content)
|
|
2322
|
+
|
|
2323
|
+
compressed_data = compressed_buffer.getvalue()
|
|
2324
|
+
compressed_size_mb = len(compressed_data) / (1024 * 1024)
|
|
2325
|
+
uncompressed_size_mb = len(jsonl_content.encode("utf-8")) / (1024 * 1024)
|
|
2326
|
+
compression_ratio = (1 - (len(compressed_data) / len(jsonl_content.encode("utf-8")))) * 100
|
|
2327
|
+
comp_reduction_log = f"({compression_ratio:.1f}% reduction)"
|
|
2328
|
+
logger.info(
|
|
2329
|
+
f"Compressed evidence: {uncompressed_size_mb:.2f} MB -> {compressed_size_mb:.2f} MB {comp_reduction_log}",
|
|
2330
|
+
)
|
|
2331
|
+
|
|
2332
|
+
# Upload with .gz extension
|
|
2333
|
+
gzipped_file_name = file_name if file_name.endswith(".gz") else f"{file_name}.gz"
|
|
2334
|
+
|
|
2335
|
+
success = File.upload_file_to_regscale(
|
|
2336
|
+
file_name=gzipped_file_name,
|
|
2337
|
+
parent_id=created_evidence_id,
|
|
2338
|
+
parent_module="evidence",
|
|
2339
|
+
api=api,
|
|
2340
|
+
file_data=compressed_data,
|
|
2341
|
+
tags=f"aws,audit-manager,{safe_assessment_name.lower()}",
|
|
2342
|
+
)
|
|
2343
|
+
|
|
2344
|
+
if success:
|
|
2345
|
+
logger.info(f"Uploaded compressed evidence file for evidence {created_evidence_id}")
|
|
2346
|
+
else:
|
|
2347
|
+
logger.warning(f"Failed to upload compressed evidence file for evidence {created_evidence_id}")
|
|
2348
|
+
|
|
2349
|
+
def _save_consolidated_evidence_file(
|
|
2350
|
+
self, assessment_name: str, scan_date: str, evidence_items: List[Dict[str, Any]]
|
|
2351
|
+
) -> Optional[str]:
|
|
2352
|
+
"""
|
|
2353
|
+
Save consolidated evidence items to gzipped JSONL file in artifacts directory.
|
|
2354
|
+
|
|
2355
|
+
:param str assessment_name: Safe assessment name for filename
|
|
2356
|
+
:param str scan_date: Scan date string
|
|
2357
|
+
:param List[Dict[str, Any]] evidence_items: All evidence items
|
|
2358
|
+
:return: File path if successful, None otherwise
|
|
2359
|
+
:rtype: Optional[str]
|
|
2360
|
+
"""
|
|
2361
|
+
import gzip
|
|
2362
|
+
|
|
2363
|
+
try:
|
|
2364
|
+
# Ensure artifacts directory exists
|
|
2365
|
+
artifacts_dir = os.path.join("artifacts", "aws", "audit_manager_evidence")
|
|
2366
|
+
os.makedirs(artifacts_dir, exist_ok=True)
|
|
2367
|
+
|
|
2368
|
+
# Create file path with .gz extension
|
|
2369
|
+
file_name = f"audit_manager_evidence_{assessment_name}_{scan_date}.jsonl.gz"
|
|
2370
|
+
file_path = os.path.join(artifacts_dir, file_name)
|
|
2371
|
+
|
|
2372
|
+
# Write compressed JSONL file
|
|
2373
|
+
with gzip.open(file_path, "wt", encoding="utf-8", compresslevel=9) as f:
|
|
2374
|
+
for item in evidence_items:
|
|
2375
|
+
f.write(json.dumps(item, default=str) + "\n")
|
|
2376
|
+
|
|
2377
|
+
# Get file size for logging
|
|
2378
|
+
file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
|
|
2379
|
+
logger.info(f"Saved {len(evidence_items)} evidence items to {file_path} ({file_size_mb:.2f} MB compressed)")
|
|
2380
|
+
return file_path
|
|
2381
|
+
|
|
2382
|
+
except IOError as ex:
|
|
2383
|
+
logger.error(f"Failed to save consolidated evidence file: {ex}")
|
|
2384
|
+
return None
|
|
2385
|
+
|
|
2386
|
+
def _link_evidence_to_ssp(self, evidence_id: int) -> None:
|
|
2387
|
+
"""
|
|
2388
|
+
Link evidence to Security Plan.
|
|
2389
|
+
|
|
2390
|
+
:param int evidence_id: Evidence record ID
|
|
2391
|
+
:return: None
|
|
2392
|
+
:rtype: None
|
|
2393
|
+
"""
|
|
2394
|
+
from regscale.models.regscale_models.evidence_mapping import EvidenceMapping
|
|
2395
|
+
|
|
2396
|
+
mapping = EvidenceMapping(evidenceID=evidence_id, mappedID=self.plan_id, mappingType="securityplans")
|
|
2397
|
+
|
|
2398
|
+
try:
|
|
2399
|
+
mapping.create()
|
|
2400
|
+
logger.info(f"Linked evidence {evidence_id} to SSP {self.plan_id}")
|
|
2401
|
+
except Exception as ex:
|
|
2402
|
+
logger.warning(f"Failed to link evidence to SSP: {ex}")
|
|
2403
|
+
|
|
2404
|
+
def _handle_permission_error(self, permission_name: str, error: ClientError) -> bool:
|
|
2405
|
+
"""
|
|
2406
|
+
Handle permission test error and log appropriately.
|
|
2407
|
+
|
|
2408
|
+
:param str permission_name: Permission being tested
|
|
2409
|
+
:param ClientError error: AWS ClientError exception
|
|
2410
|
+
:return: False (permission denied/failed)
|
|
2411
|
+
:rtype: bool
|
|
2412
|
+
"""
|
|
2413
|
+
if error.response["Error"]["Code"] in ["AccessDeniedException", "UnauthorizedException"]:
|
|
2414
|
+
logger.error(f"✗ {permission_name} - DENIED: {error.response['Error']['Message']}")
|
|
2415
|
+
else:
|
|
2416
|
+
logger.warning(f"? {permission_name} - Error: {error}")
|
|
2417
|
+
return False
|
|
2418
|
+
|
|
2419
|
+
def _test_list_assessments_permission(self, permissions: Dict[str, bool]) -> None:
|
|
2420
|
+
"""
|
|
2421
|
+
Test auditmanager:ListAssessments permission.
|
|
2422
|
+
|
|
2423
|
+
:param Dict[str, bool] permissions: Dictionary to update with test result
|
|
2424
|
+
:return: None
|
|
2425
|
+
:rtype: None
|
|
2426
|
+
"""
|
|
2427
|
+
try:
|
|
2428
|
+
self.client.list_assessments(maxResults=1)
|
|
2429
|
+
permissions[IAM_PERMISSION_LIST_ASSESSMENTS] = True
|
|
2430
|
+
logger.info(f"✓ {IAM_PERMISSION_LIST_ASSESSMENTS} - OK")
|
|
2431
|
+
except ClientError as e:
|
|
2432
|
+
permissions[IAM_PERMISSION_LIST_ASSESSMENTS] = self._handle_permission_error(
|
|
2433
|
+
IAM_PERMISSION_LIST_ASSESSMENTS, e
|
|
2434
|
+
)
|
|
2435
|
+
|
|
2436
|
+
def _test_get_assessment_permission(self, permissions: Dict[str, bool]) -> None:
|
|
2437
|
+
"""
|
|
2438
|
+
Test auditmanager:GetAssessment permission.
|
|
2439
|
+
|
|
2440
|
+
:param Dict[str, bool] permissions: Dictionary to update with test result
|
|
2441
|
+
:return: None
|
|
2442
|
+
:rtype: None
|
|
2443
|
+
"""
|
|
2444
|
+
if not self.assessment_id:
|
|
2445
|
+
logger.info(f"⊘ {IAM_PERMISSION_GET_ASSESSMENT} - Skipped (no assessment_id provided)")
|
|
2446
|
+
return
|
|
2447
|
+
|
|
2448
|
+
try:
|
|
2449
|
+
self.client.get_assessment(assessmentId=self.assessment_id)
|
|
2450
|
+
permissions[IAM_PERMISSION_GET_ASSESSMENT] = True
|
|
2451
|
+
logger.info(f"✓ {IAM_PERMISSION_GET_ASSESSMENT} - OK")
|
|
2452
|
+
except ClientError as e:
|
|
2453
|
+
permissions[IAM_PERMISSION_GET_ASSESSMENT] = self._handle_permission_error(IAM_PERMISSION_GET_ASSESSMENT, e)
|
|
2454
|
+
|
|
2455
|
+
def _get_first_control_from_assessment(self) -> Optional[tuple]:
|
|
2456
|
+
"""
|
|
2457
|
+
Get first control from assessment for permission testing.
|
|
2458
|
+
|
|
2459
|
+
:return: Tuple of (control_set_id, control_id) or None
|
|
2460
|
+
:rtype: Optional[tuple]
|
|
2461
|
+
"""
|
|
2462
|
+
try:
|
|
2463
|
+
response = self.client.get_assessment(assessmentId=self.assessment_id)
|
|
2464
|
+
assessment = response.get("assessment", {})
|
|
2465
|
+
control_sets = assessment.get("framework", {}).get("controlSets", [])
|
|
2466
|
+
|
|
2467
|
+
if control_sets and control_sets[0].get("controls"):
|
|
2468
|
+
control_set_id = control_sets[0].get("id")
|
|
2469
|
+
control_id = control_sets[0].get("controls", [])[0].get("id")
|
|
2470
|
+
return control_set_id, control_id
|
|
2471
|
+
except ClientError as e:
|
|
2472
|
+
logger.warning(f"Could not retrieve assessment for permission testing: {e}")
|
|
2473
|
+
return None
|
|
2474
|
+
|
|
2475
|
+
def _test_evidence_folders_permission(
|
|
2476
|
+
self, permissions: Dict[str, bool], control_set_id: str, control_id: str
|
|
2477
|
+
) -> None:
|
|
2478
|
+
"""
|
|
2479
|
+
Test auditmanager:GetEvidenceFoldersByAssessmentControl permission.
|
|
2480
|
+
|
|
2481
|
+
:param Dict[str, bool] permissions: Dictionary to update with test result
|
|
2482
|
+
:param str control_set_id: Control set ID for testing
|
|
2483
|
+
:param str control_id: Control ID for testing
|
|
2484
|
+
:return: None
|
|
2485
|
+
:rtype: None
|
|
2486
|
+
"""
|
|
2487
|
+
try:
|
|
2488
|
+
self.client.get_evidence_folders_by_assessment_control(
|
|
2489
|
+
assessmentId=self.assessment_id, controlSetId=control_set_id, controlId=control_id
|
|
2490
|
+
)
|
|
2491
|
+
permissions[IAM_PERMISSION_GET_EVIDENCE_FOLDERS] = True
|
|
2492
|
+
logger.info(f"✓ {IAM_PERMISSION_GET_EVIDENCE_FOLDERS} - OK")
|
|
2493
|
+
except ClientError as e:
|
|
2494
|
+
permissions[IAM_PERMISSION_GET_EVIDENCE_FOLDERS] = self._handle_permission_error(
|
|
2495
|
+
IAM_PERMISSION_GET_EVIDENCE_FOLDERS, e
|
|
2496
|
+
)
|
|
2497
|
+
|
|
2498
|
+
def _test_evidence_permissions(self, permissions: Dict[str, bool]) -> None:
|
|
2499
|
+
"""
|
|
2500
|
+
Test evidence-related permissions (requires assessment with controls).
|
|
2501
|
+
|
|
2502
|
+
:param Dict[str, bool] permissions: Dictionary to update with test results
|
|
2503
|
+
:return: None
|
|
2504
|
+
:rtype: None
|
|
2505
|
+
"""
|
|
2506
|
+
if not self.assessment_id:
|
|
2507
|
+
logger.info(f"⊘ {IAM_PERMISSION_GET_EVIDENCE_FOLDERS} - Skipped (no assessment_id provided)")
|
|
2508
|
+
logger.info("⊘ auditmanager:GetEvidenceByEvidenceFolder - Skipped (no assessment_id provided)")
|
|
2509
|
+
return
|
|
2510
|
+
|
|
2511
|
+
control_info = self._get_first_control_from_assessment()
|
|
2512
|
+
if control_info:
|
|
2513
|
+
control_set_id, control_id = control_info
|
|
2514
|
+
self._test_evidence_folders_permission(permissions, control_set_id, control_id)
|
|
2515
|
+
logger.info("⊘ auditmanager:GetEvidenceByEvidenceFolder - Cannot test (requires evidence folder ID)")
|
|
2516
|
+
else:
|
|
2517
|
+
logger.info(f"⊘ {IAM_PERMISSION_GET_EVIDENCE_FOLDERS} - Skipped (no controls in assessment)")
|
|
2518
|
+
logger.info("⊘ auditmanager:GetEvidenceByEvidenceFolder - Skipped (no controls in assessment)")
|
|
2519
|
+
|
|
2520
|
+
def _log_permission_test_summary(self, permissions: Dict[str, bool]) -> None:
|
|
2521
|
+
"""
|
|
2522
|
+
Log summary of permission test results.
|
|
2523
|
+
|
|
2524
|
+
:param Dict[str, bool] permissions: Dictionary of permission test results
|
|
2525
|
+
:return: None
|
|
2526
|
+
:rtype: None
|
|
2527
|
+
"""
|
|
2528
|
+
passed = sum(1 for v in permissions.values() if v)
|
|
2529
|
+
total = len(permissions)
|
|
2530
|
+
logger.info(f"\nPermission Test Summary: {passed}/{total} permissions verified")
|
|
2531
|
+
|
|
2532
|
+
if passed < total:
|
|
2533
|
+
logger.warning(
|
|
2534
|
+
"\nSome permissions are missing. Evidence collection may fail. "
|
|
2535
|
+
"Please ensure your IAM role/user has the required AWS Audit Manager permissions."
|
|
2536
|
+
)
|
|
2537
|
+
else:
|
|
2538
|
+
logger.info("\nAll tested permissions are OK!")
|
|
2539
|
+
|
|
2540
|
+
def test_iam_permissions(self) -> Dict[str, bool]:
|
|
2541
|
+
"""
|
|
2542
|
+
Test IAM permissions required for AWS Audit Manager evidence collection.
|
|
2543
|
+
|
|
2544
|
+
Tests the following permissions:
|
|
2545
|
+
- auditmanager:ListAssessments
|
|
2546
|
+
- auditmanager:GetAssessment
|
|
2547
|
+
- auditmanager:GetEvidenceFoldersByAssessmentControl
|
|
2548
|
+
- auditmanager:GetEvidenceByEvidenceFolder
|
|
2549
|
+
|
|
2550
|
+
:return: Dictionary mapping permission names to test results (True=success, False=denied)
|
|
2551
|
+
:rtype: Dict[str, bool]
|
|
2552
|
+
"""
|
|
2553
|
+
logger.info("Testing IAM permissions for AWS Audit Manager...")
|
|
2554
|
+
permissions = {}
|
|
2555
|
+
|
|
2556
|
+
self._test_list_assessments_permission(permissions)
|
|
2557
|
+
self._test_get_assessment_permission(permissions)
|
|
2558
|
+
self._test_evidence_permissions(permissions)
|
|
2559
|
+
|
|
2560
|
+
self._log_permission_test_summary(permissions)
|
|
2561
|
+
|
|
2562
|
+
return permissions
|
|
2563
|
+
|
|
2564
|
+
def _fetch_assessments_for_evidence(self) -> List[Dict[str, Any]]:
|
|
2565
|
+
"""
|
|
2566
|
+
Fetch assessments for evidence collection.
|
|
2567
|
+
|
|
2568
|
+
:return: List of assessments
|
|
2569
|
+
:rtype: List[Dict[str, Any]]
|
|
2570
|
+
"""
|
|
2571
|
+
if self.assessment_id:
|
|
2572
|
+
assessments = [self._get_assessment_details(self.assessment_id)]
|
|
2573
|
+
logger.debug(f"Using specific assessment ID: {self.assessment_id}")
|
|
2574
|
+
else:
|
|
2575
|
+
assessments = self._list_all_assessments()
|
|
2576
|
+
logger.debug(f"Listed {len(assessments)} total assessments")
|
|
2577
|
+
return assessments
|
|
2578
|
+
|
|
2579
|
+
def _log_assessment_details(self, assessments: List[Dict[str, Any]]) -> None:
|
|
2580
|
+
"""
|
|
2581
|
+
Log assessment details before filtering.
|
|
2582
|
+
|
|
2583
|
+
:param List[Dict[str, Any]] assessments: List of assessments to log
|
|
2584
|
+
:return: None
|
|
2585
|
+
:rtype: None
|
|
2586
|
+
"""
|
|
2587
|
+
for assessment in assessments:
|
|
2588
|
+
if not assessment:
|
|
2589
|
+
continue
|
|
2590
|
+
framework_info = self._get_assessment_framework(assessment)
|
|
2591
|
+
logger.debug(
|
|
2592
|
+
f"Assessment '{assessment.get('name', 'Unknown')}' - "
|
|
2593
|
+
f"Framework info: '{framework_info}', "
|
|
2594
|
+
f"ComplianceType: '{assessment.get('complianceType', '')}'"
|
|
2595
|
+
)
|
|
2596
|
+
|
|
2597
|
+
def _filter_assessments_by_framework(self, assessments: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
2598
|
+
"""
|
|
2599
|
+
Filter assessments by framework.
|
|
2600
|
+
|
|
2601
|
+
:param List[Dict[str, Any]] assessments: Assessments to filter
|
|
2602
|
+
:return: Filtered assessments
|
|
2603
|
+
:rtype: List[Dict[str, Any]]
|
|
2604
|
+
"""
|
|
2605
|
+
filtered_assessments = []
|
|
2606
|
+
for assessment in assessments:
|
|
2607
|
+
if not assessment:
|
|
2608
|
+
continue
|
|
2609
|
+
|
|
2610
|
+
framework_info = self._get_assessment_framework(assessment)
|
|
2611
|
+
if self._matches_framework(framework_info):
|
|
2612
|
+
filtered_assessments.append(assessment)
|
|
2613
|
+
logger.debug(f"✓ Assessment '{assessment.get('name')}' passed framework filter")
|
|
2614
|
+
else:
|
|
2615
|
+
logger.debug(
|
|
2616
|
+
f"✗ Assessment '{assessment.get('name')}' filtered out - "
|
|
2617
|
+
f"framework '{framework_info}' does not match '{self.framework}' "
|
|
2618
|
+
f"(custom name: '{self.custom_framework_name or 'N/A'}')"
|
|
2619
|
+
)
|
|
2620
|
+
return filtered_assessments
|
|
2621
|
+
|
|
2622
|
+
def _process_evidence_collection(self, assessments: List[Dict[str, Any]]) -> None:
|
|
2623
|
+
"""
|
|
2624
|
+
Process evidence collection for filtered assessments.
|
|
2625
|
+
|
|
2626
|
+
:param List[Dict[str, Any]] assessments: Filtered assessments
|
|
2627
|
+
:return: None
|
|
2628
|
+
:rtype: None
|
|
2629
|
+
"""
|
|
2630
|
+
if assessments:
|
|
2631
|
+
logger.info(f"Found {len(assessments)} assessment(s) matching framework filter for evidence collection")
|
|
2632
|
+
self.collect_assessment_evidence(assessments)
|
|
2633
|
+
else:
|
|
2634
|
+
logger.warning(
|
|
2635
|
+
f"No assessments found for evidence collection. "
|
|
2636
|
+
f"Framework: '{self.framework}', Custom name: '{self.custom_framework_name or 'N/A'}'"
|
|
2637
|
+
)
|
|
2638
|
+
|
|
2639
|
+
def sync_compliance(self) -> None:
|
|
2640
|
+
"""
|
|
2641
|
+
Sync compliance data from AWS Audit Manager to RegScale.
|
|
2642
|
+
|
|
2643
|
+
Extends the base sync_compliance method to add evidence collection.
|
|
2644
|
+
|
|
2645
|
+
:return: None
|
|
2646
|
+
:rtype: None
|
|
2647
|
+
"""
|
|
2648
|
+
# Call the base class sync_compliance to handle control assessments
|
|
2649
|
+
super().sync_compliance()
|
|
2650
|
+
|
|
2651
|
+
# If evidence collection is enabled, collect evidence after compliance sync
|
|
2652
|
+
if not self.collect_evidence:
|
|
2653
|
+
return
|
|
2654
|
+
|
|
2655
|
+
logger.info("Evidence collection enabled, starting evidence collection...")
|
|
2656
|
+
|
|
2657
|
+
try:
|
|
2658
|
+
# Fetch assessments
|
|
2659
|
+
assessments = self._fetch_assessments_for_evidence()
|
|
2660
|
+
|
|
2661
|
+
# Log assessment details
|
|
2662
|
+
self._log_assessment_details(assessments)
|
|
2663
|
+
|
|
2664
|
+
# Filter by framework
|
|
2665
|
+
filtered_assessments = self._filter_assessments_by_framework(assessments)
|
|
2666
|
+
|
|
2667
|
+
# Process evidence collection
|
|
2668
|
+
self._process_evidence_collection(filtered_assessments)
|
|
2669
|
+
|
|
2670
|
+
except Exception as e:
|
|
2671
|
+
logger.error(f"Error during evidence collection: {e}", exc_info=True)
|