regscale-cli 6.27.3.0__py3-none-any.whl → 6.28.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/core/app/utils/app_utils.py +11 -2
- regscale/dev/cli.py +26 -0
- regscale/dev/version.py +72 -0
- regscale/integrations/commercial/__init__.py +15 -1
- regscale/integrations/commercial/amazon/amazon/__init__.py +0 -0
- regscale/integrations/commercial/amazon/amazon/common.py +204 -0
- regscale/integrations/commercial/amazon/common.py +48 -58
- regscale/integrations/commercial/aws/audit_manager_compliance.py +2671 -0
- regscale/integrations/commercial/aws/cli.py +3093 -55
- regscale/integrations/commercial/aws/cloudtrail_control_mappings.py +333 -0
- regscale/integrations/commercial/aws/cloudtrail_evidence.py +501 -0
- regscale/integrations/commercial/aws/cloudwatch_control_mappings.py +357 -0
- regscale/integrations/commercial/aws/cloudwatch_evidence.py +490 -0
- regscale/integrations/commercial/aws/config_compliance.py +914 -0
- regscale/integrations/commercial/aws/conformance_pack_mappings.py +198 -0
- regscale/integrations/commercial/aws/evidence_generator.py +283 -0
- regscale/integrations/commercial/aws/guardduty_control_mappings.py +340 -0
- regscale/integrations/commercial/aws/guardduty_evidence.py +1053 -0
- regscale/integrations/commercial/aws/iam_control_mappings.py +368 -0
- regscale/integrations/commercial/aws/iam_evidence.py +574 -0
- regscale/integrations/commercial/aws/inventory/__init__.py +223 -22
- regscale/integrations/commercial/aws/inventory/base.py +107 -5
- regscale/integrations/commercial/aws/inventory/resources/audit_manager.py +513 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudtrail.py +315 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudtrail_logs_metadata.py +476 -0
- regscale/integrations/commercial/aws/inventory/resources/cloudwatch.py +191 -0
- regscale/integrations/commercial/aws/inventory/resources/compute.py +66 -9
- regscale/integrations/commercial/aws/inventory/resources/config.py +464 -0
- regscale/integrations/commercial/aws/inventory/resources/containers.py +74 -9
- regscale/integrations/commercial/aws/inventory/resources/database.py +106 -31
- regscale/integrations/commercial/aws/inventory/resources/guardduty.py +286 -0
- regscale/integrations/commercial/aws/inventory/resources/iam.py +470 -0
- regscale/integrations/commercial/aws/inventory/resources/inspector.py +476 -0
- regscale/integrations/commercial/aws/inventory/resources/integration.py +175 -61
- regscale/integrations/commercial/aws/inventory/resources/kms.py +447 -0
- regscale/integrations/commercial/aws/inventory/resources/networking.py +103 -67
- regscale/integrations/commercial/aws/inventory/resources/s3.py +394 -0
- regscale/integrations/commercial/aws/inventory/resources/security.py +268 -72
- regscale/integrations/commercial/aws/inventory/resources/securityhub.py +473 -0
- regscale/integrations/commercial/aws/inventory/resources/storage.py +53 -29
- regscale/integrations/commercial/aws/inventory/resources/systems_manager.py +657 -0
- regscale/integrations/commercial/aws/inventory/resources/vpc.py +655 -0
- regscale/integrations/commercial/aws/kms_control_mappings.py +288 -0
- regscale/integrations/commercial/aws/kms_evidence.py +879 -0
- regscale/integrations/commercial/aws/ocsf/__init__.py +7 -0
- regscale/integrations/commercial/aws/ocsf/constants.py +115 -0
- regscale/integrations/commercial/aws/ocsf/mapper.py +435 -0
- regscale/integrations/commercial/aws/org_control_mappings.py +286 -0
- regscale/integrations/commercial/aws/org_evidence.py +666 -0
- regscale/integrations/commercial/aws/s3_control_mappings.py +356 -0
- regscale/integrations/commercial/aws/s3_evidence.py +632 -0
- regscale/integrations/commercial/aws/scanner.py +851 -206
- regscale/integrations/commercial/aws/security_hub.py +319 -0
- regscale/integrations/commercial/aws/session_manager.py +282 -0
- regscale/integrations/commercial/aws/ssm_control_mappings.py +291 -0
- regscale/integrations/commercial/aws/ssm_evidence.py +492 -0
- regscale/integrations/commercial/synqly/ticketing.py +27 -0
- regscale/integrations/compliance_integration.py +308 -38
- regscale/integrations/due_date_handler.py +3 -0
- regscale/integrations/scanner_integration.py +399 -84
- regscale/models/integration_models/cisa_kev_data.json +65 -5
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/synqly_models/connectors/vulnerabilities.py +17 -9
- regscale/models/regscale_models/assessment.py +2 -1
- regscale/models/regscale_models/control_objective.py +74 -5
- regscale/models/regscale_models/file.py +2 -0
- regscale/models/regscale_models/issue.py +2 -5
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/METADATA +1 -1
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/RECORD +113 -34
- tests/regscale/integrations/commercial/aws/__init__.py +0 -0
- tests/regscale/integrations/commercial/aws/test_audit_manager_compliance.py +1304 -0
- tests/regscale/integrations/commercial/aws/test_audit_manager_evidence_aggregation.py +341 -0
- tests/regscale/integrations/commercial/aws/test_aws_audit_manager_collector.py +1155 -0
- tests/regscale/integrations/commercial/aws/test_aws_cloudtrail_collector.py +534 -0
- tests/regscale/integrations/commercial/aws/test_aws_config_collector.py +400 -0
- tests/regscale/integrations/commercial/aws/test_aws_guardduty_collector.py +315 -0
- tests/regscale/integrations/commercial/aws/test_aws_iam_collector.py +458 -0
- tests/regscale/integrations/commercial/aws/test_aws_inspector_collector.py +353 -0
- tests/regscale/integrations/commercial/aws/test_aws_inventory_integration.py +530 -0
- tests/regscale/integrations/commercial/aws/test_aws_kms_collector.py +919 -0
- tests/regscale/integrations/commercial/aws/test_aws_s3_collector.py +722 -0
- tests/regscale/integrations/commercial/aws/test_aws_scanner_integration.py +722 -0
- tests/regscale/integrations/commercial/aws/test_aws_securityhub_collector.py +792 -0
- tests/regscale/integrations/commercial/aws/test_aws_systems_manager_collector.py +918 -0
- tests/regscale/integrations/commercial/aws/test_aws_vpc_collector.py +996 -0
- tests/regscale/integrations/commercial/aws/test_cli_evidence.py +431 -0
- tests/regscale/integrations/commercial/aws/test_cloudtrail_control_mappings.py +452 -0
- tests/regscale/integrations/commercial/aws/test_cloudtrail_evidence.py +788 -0
- tests/regscale/integrations/commercial/aws/test_config_compliance.py +298 -0
- tests/regscale/integrations/commercial/aws/test_conformance_pack_mappings.py +200 -0
- tests/regscale/integrations/commercial/aws/test_evidence_generator.py +386 -0
- tests/regscale/integrations/commercial/aws/test_guardduty_control_mappings.py +564 -0
- tests/regscale/integrations/commercial/aws/test_guardduty_evidence.py +1041 -0
- tests/regscale/integrations/commercial/aws/test_iam_control_mappings.py +718 -0
- tests/regscale/integrations/commercial/aws/test_iam_evidence.py +1375 -0
- tests/regscale/integrations/commercial/aws/test_kms_control_mappings.py +656 -0
- tests/regscale/integrations/commercial/aws/test_kms_evidence.py +1163 -0
- tests/regscale/integrations/commercial/aws/test_ocsf_mapper.py +370 -0
- tests/regscale/integrations/commercial/aws/test_org_control_mappings.py +546 -0
- tests/regscale/integrations/commercial/aws/test_org_evidence.py +1240 -0
- tests/regscale/integrations/commercial/aws/test_s3_control_mappings.py +672 -0
- tests/regscale/integrations/commercial/aws/test_s3_evidence.py +987 -0
- tests/regscale/integrations/commercial/aws/test_scanner_evidence.py +373 -0
- tests/regscale/integrations/commercial/aws/test_security_hub_config_filtering.py +539 -0
- tests/regscale/integrations/commercial/aws/test_session_manager.py +516 -0
- tests/regscale/integrations/commercial/aws/test_ssm_control_mappings.py +588 -0
- tests/regscale/integrations/commercial/aws/test_ssm_evidence.py +735 -0
- tests/regscale/integrations/commercial/test_aws.py +55 -56
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.27.3.0.dist-info → regscale_cli-6.28.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1053 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""AWS GuardDuty Evidence Integration for RegScale CLI."""
|
|
4
|
+
|
|
5
|
+
import gzip
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import time
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from io import BytesIO
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
import boto3
|
|
16
|
+
from botocore.exceptions import ClientError
|
|
17
|
+
|
|
18
|
+
from regscale.core.app.api import Api
|
|
19
|
+
from regscale.core.app.utils.app_utils import get_current_datetime
|
|
20
|
+
from regscale.integrations.commercial.aws.guardduty_control_mappings import GuardDutyControlMapper
|
|
21
|
+
from regscale.integrations.compliance_integration import ComplianceIntegration, ComplianceItem
|
|
22
|
+
from regscale.integrations.scanner_integration import IntegrationFinding
|
|
23
|
+
from regscale.models.regscale_models.evidence import Evidence
|
|
24
|
+
from regscale.models.regscale_models.evidence_mapping import EvidenceMapping
|
|
25
|
+
from regscale.models.regscale_models.file import File
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger("regscale")
|
|
28
|
+
|
|
29
|
+
GUARDDUTY_CACHE_FILE = os.path.join("artifacts", "aws", "guardduty_data.json")
|
|
30
|
+
CACHE_TTL_SECONDS = 4 * 60 * 60
|
|
31
|
+
|
|
32
|
+
HTML_STRONG_OPEN = "<strong>"
|
|
33
|
+
HTML_STRONG_CLOSE = "</strong>"
|
|
34
|
+
HTML_P_OPEN = "<p>"
|
|
35
|
+
HTML_P_CLOSE = "</p>"
|
|
36
|
+
HTML_UL_OPEN = "<ul>"
|
|
37
|
+
HTML_UL_CLOSE = "</ul>"
|
|
38
|
+
HTML_LI_OPEN = "<li>"
|
|
39
|
+
HTML_LI_CLOSE = "</li>"
|
|
40
|
+
HTML_H2_OPEN = "<h2>"
|
|
41
|
+
HTML_H2_CLOSE = "</h2>"
|
|
42
|
+
HTML_H3_OPEN = "<h3>"
|
|
43
|
+
HTML_H3_CLOSE = "</h3>"
|
|
44
|
+
HTML_BR = "<br>"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class GuardDutyEvidenceConfig:
|
|
49
|
+
"""Configuration for AWS GuardDuty evidence collection."""
|
|
50
|
+
|
|
51
|
+
plan_id: int
|
|
52
|
+
region: str = "us-east-1"
|
|
53
|
+
framework: str = "NIST800-53R5"
|
|
54
|
+
create_issues: bool = True
|
|
55
|
+
update_control_status: bool = True
|
|
56
|
+
create_poams: bool = False
|
|
57
|
+
create_vulnerabilities: bool = True
|
|
58
|
+
parent_module: str = "securityplans"
|
|
59
|
+
collect_evidence: bool = False
|
|
60
|
+
evidence_as_attachments: bool = True
|
|
61
|
+
evidence_control_ids: Optional[List[str]] = None
|
|
62
|
+
evidence_frequency: int = 30
|
|
63
|
+
force_refresh: bool = False
|
|
64
|
+
account_id: Optional[str] = None
|
|
65
|
+
tags: Optional[Dict[str, str]] = None
|
|
66
|
+
profile: Optional[str] = None
|
|
67
|
+
aws_access_key_id: Optional[str] = None
|
|
68
|
+
aws_secret_access_key: Optional[str] = None
|
|
69
|
+
aws_session_token: Optional[str] = None
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class GuardDutyComplianceItem(ComplianceItem):
|
|
73
|
+
"""
|
|
74
|
+
Compliance item representing a GuardDuty assessment for a specific control.
|
|
75
|
+
|
|
76
|
+
Maps GuardDuty detector and finding data to compliance control requirements.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self, control_id: str, guardduty_data: Dict[str, Any], control_mapper: GuardDutyControlMapper):
|
|
80
|
+
"""
|
|
81
|
+
Initialize GuardDuty compliance item.
|
|
82
|
+
|
|
83
|
+
:param str control_id: The control ID being assessed (e.g., 'SI-4', 'IR-4')
|
|
84
|
+
:param Dict[str, Any] guardduty_data: Complete GuardDuty data including detectors and findings
|
|
85
|
+
:param GuardDutyControlMapper control_mapper: Control mapper for compliance assessment
|
|
86
|
+
"""
|
|
87
|
+
self._control_id = control_id
|
|
88
|
+
self.guardduty_data = guardduty_data
|
|
89
|
+
self.control_mapper = control_mapper
|
|
90
|
+
|
|
91
|
+
# Assess compliance for this specific control
|
|
92
|
+
all_results = control_mapper.assess_guardduty_compliance(guardduty_data)
|
|
93
|
+
self._compliance_result = all_results.get(control_id, "PASS")
|
|
94
|
+
|
|
95
|
+
# Extract detector and finding statistics
|
|
96
|
+
self.detectors = guardduty_data.get("Detectors", [])
|
|
97
|
+
self.findings = guardduty_data.get("Findings", [])
|
|
98
|
+
|
|
99
|
+
# Count findings by severity
|
|
100
|
+
self.high_severity_findings = self._count_findings_by_severity("HIGH")
|
|
101
|
+
self.critical_severity_findings = self._count_findings_by_severity("CRITICAL")
|
|
102
|
+
self.medium_severity_findings = self._count_findings_by_severity("MEDIUM")
|
|
103
|
+
self.low_severity_findings = self._count_findings_by_severity("LOW")
|
|
104
|
+
|
|
105
|
+
# Get the account ID from the first detector if available
|
|
106
|
+
self._account_id = self._extract_account_id()
|
|
107
|
+
|
|
108
|
+
def _count_findings_by_severity(self, severity: str) -> int:
|
|
109
|
+
"""Count findings matching the specified severity level."""
|
|
110
|
+
count = 0
|
|
111
|
+
for finding in self.findings:
|
|
112
|
+
finding_severity = self.control_mapper._get_severity_level(finding.get("Severity", 0))
|
|
113
|
+
if finding_severity == severity:
|
|
114
|
+
count += 1
|
|
115
|
+
return count
|
|
116
|
+
|
|
117
|
+
def _extract_account_id(self) -> str:
|
|
118
|
+
"""Extract AWS account ID from detector or finding data."""
|
|
119
|
+
if self.detectors:
|
|
120
|
+
# Try to get from first detector
|
|
121
|
+
detector = self.detectors[0]
|
|
122
|
+
if isinstance(detector, dict):
|
|
123
|
+
return detector.get("AccountId", "")
|
|
124
|
+
|
|
125
|
+
if self.findings:
|
|
126
|
+
# Try to get from first finding
|
|
127
|
+
finding = self.findings[0]
|
|
128
|
+
if isinstance(finding, dict):
|
|
129
|
+
return finding.get("AccountId", "")
|
|
130
|
+
|
|
131
|
+
return ""
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def resource_id(self) -> str:
|
|
135
|
+
"""Unique identifier for the GuardDuty service in this account/region."""
|
|
136
|
+
if self._account_id:
|
|
137
|
+
return f"guardduty-{self._account_id}"
|
|
138
|
+
return "guardduty-service"
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
def resource_name(self) -> str:
|
|
142
|
+
"""Human-readable name of the GuardDuty service."""
|
|
143
|
+
if self._account_id:
|
|
144
|
+
return f"AWS GuardDuty - Account {self._account_id}"
|
|
145
|
+
return "AWS GuardDuty Service"
|
|
146
|
+
|
|
147
|
+
@property
|
|
148
|
+
def control_id(self) -> str:
|
|
149
|
+
"""Control identifier (e.g., SI-4, IR-4)."""
|
|
150
|
+
return self._control_id
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def compliance_result(self) -> str:
|
|
154
|
+
"""Result of compliance check (PASS, FAIL)."""
|
|
155
|
+
return self._compliance_result
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def severity(self) -> Optional[str]:
|
|
159
|
+
"""Severity level based on the control and findings."""
|
|
160
|
+
if self.compliance_result == "PASS":
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
# Determine severity based on control and findings
|
|
164
|
+
if self._control_id == "IR-4":
|
|
165
|
+
# IR-4 fails if there are high/critical severity findings
|
|
166
|
+
if self.critical_severity_findings > 0:
|
|
167
|
+
return "CRITICAL"
|
|
168
|
+
elif self.high_severity_findings > 0:
|
|
169
|
+
return "HIGH"
|
|
170
|
+
else:
|
|
171
|
+
return "MEDIUM"
|
|
172
|
+
elif self._control_id == "SI-4":
|
|
173
|
+
# SI-4 fails if detector is disabled
|
|
174
|
+
return "HIGH"
|
|
175
|
+
elif self._control_id in ["IR-5", "SI-3", "RA-5"]:
|
|
176
|
+
# Other controls have medium severity for failures
|
|
177
|
+
return "MEDIUM"
|
|
178
|
+
|
|
179
|
+
return "MEDIUM"
|
|
180
|
+
|
|
181
|
+
@property
|
|
182
|
+
def description(self) -> str:
|
|
183
|
+
"""Detailed description of the GuardDuty compliance assessment."""
|
|
184
|
+
desc_parts = self._build_assessment_header()
|
|
185
|
+
desc_parts.extend(self._build_detector_summary())
|
|
186
|
+
desc_parts.extend(self._build_findings_summary())
|
|
187
|
+
desc_parts.extend(self._build_control_assessment())
|
|
188
|
+
|
|
189
|
+
if self.compliance_result == "FAIL":
|
|
190
|
+
desc_parts.extend(self._build_remediation_guidance())
|
|
191
|
+
|
|
192
|
+
return "\n".join(desc_parts)
|
|
193
|
+
|
|
194
|
+
def _build_assessment_header(self) -> List[str]:
|
|
195
|
+
"""Build the assessment header section."""
|
|
196
|
+
control_desc = self.control_mapper.get_control_description(self._control_id)
|
|
197
|
+
return [
|
|
198
|
+
f"{HTML_H3_OPEN}GuardDuty Compliance Assessment{HTML_H3_CLOSE}",
|
|
199
|
+
HTML_P_OPEN,
|
|
200
|
+
f"{HTML_STRONG_OPEN}Control:{HTML_STRONG_CLOSE} {self._control_id} - {control_desc}{HTML_BR}",
|
|
201
|
+
f"{HTML_STRONG_OPEN}Result:{HTML_STRONG_CLOSE} {self._compliance_result}{HTML_BR}",
|
|
202
|
+
f"{HTML_STRONG_OPEN}Account:{HTML_STRONG_CLOSE} {self._account_id or 'N/A'}",
|
|
203
|
+
HTML_P_CLOSE,
|
|
204
|
+
]
|
|
205
|
+
|
|
206
|
+
def _build_detector_summary(self) -> List[str]:
|
|
207
|
+
"""Build detector status summary."""
|
|
208
|
+
enabled_count = sum(1 for d in self.detectors if d.get("Status") == "ENABLED")
|
|
209
|
+
disabled_count = len(self.detectors) - enabled_count
|
|
210
|
+
|
|
211
|
+
return [
|
|
212
|
+
f"{HTML_H3_OPEN}Detector Status{HTML_H3_CLOSE}",
|
|
213
|
+
HTML_UL_OPEN,
|
|
214
|
+
f"{HTML_LI_OPEN}Total Detectors: {len(self.detectors)}{HTML_LI_CLOSE}",
|
|
215
|
+
f"{HTML_LI_OPEN}Enabled: {enabled_count}{HTML_LI_CLOSE}",
|
|
216
|
+
f"{HTML_LI_OPEN}Disabled/Suspended: {disabled_count}{HTML_LI_CLOSE}",
|
|
217
|
+
HTML_UL_CLOSE,
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
def _build_findings_summary(self) -> List[str]:
|
|
221
|
+
"""Build findings summary."""
|
|
222
|
+
total_findings = len(self.findings)
|
|
223
|
+
|
|
224
|
+
return [
|
|
225
|
+
f"{HTML_H3_OPEN}Findings Summary{HTML_H3_CLOSE}",
|
|
226
|
+
HTML_UL_OPEN,
|
|
227
|
+
f"{HTML_LI_OPEN}Total Findings: {total_findings}{HTML_LI_CLOSE}",
|
|
228
|
+
f"{HTML_LI_OPEN}Critical: {self.critical_severity_findings}{HTML_LI_CLOSE}",
|
|
229
|
+
f"{HTML_LI_OPEN}High: {self.high_severity_findings}{HTML_LI_CLOSE}",
|
|
230
|
+
f"{HTML_LI_OPEN}Medium: {self.medium_severity_findings}{HTML_LI_CLOSE}",
|
|
231
|
+
f"{HTML_LI_OPEN}Low: {self.low_severity_findings}{HTML_LI_CLOSE}",
|
|
232
|
+
HTML_UL_CLOSE,
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
def _build_control_assessment(self) -> List[str]:
|
|
236
|
+
"""Build control-specific assessment details."""
|
|
237
|
+
control_mapping = self.control_mapper.mappings.get(self._control_id, {})
|
|
238
|
+
checks = control_mapping.get("checks", {})
|
|
239
|
+
|
|
240
|
+
section_parts = [
|
|
241
|
+
f"{HTML_H3_OPEN}Control Assessment Details{HTML_H3_CLOSE}",
|
|
242
|
+
HTML_UL_OPEN,
|
|
243
|
+
]
|
|
244
|
+
|
|
245
|
+
for check_name, check_info in checks.items():
|
|
246
|
+
if self.compliance_result == "PASS":
|
|
247
|
+
criteria = check_info.get("pass_criteria", "")
|
|
248
|
+
else:
|
|
249
|
+
criteria = check_info.get("fail_criteria", "")
|
|
250
|
+
section_parts.append(f"{HTML_LI_OPEN}{criteria}{HTML_LI_CLOSE}")
|
|
251
|
+
|
|
252
|
+
section_parts.append(HTML_UL_CLOSE)
|
|
253
|
+
return section_parts
|
|
254
|
+
|
|
255
|
+
def _build_remediation_guidance(self) -> List[str]:
|
|
256
|
+
"""Build remediation guidance for failed controls."""
|
|
257
|
+
section_parts = [
|
|
258
|
+
f"{HTML_H3_OPEN}Remediation Guidance{HTML_H3_CLOSE}",
|
|
259
|
+
HTML_UL_OPEN,
|
|
260
|
+
]
|
|
261
|
+
|
|
262
|
+
if self._control_id == "SI-4":
|
|
263
|
+
section_parts.append(f"{HTML_LI_OPEN}Enable all GuardDuty detectors in the region{HTML_LI_CLOSE}")
|
|
264
|
+
section_parts.append(f"{HTML_LI_OPEN}Ensure detectors are actively monitoring{HTML_LI_CLOSE}")
|
|
265
|
+
elif self._control_id == "IR-4":
|
|
266
|
+
section_parts.append(
|
|
267
|
+
f"{HTML_LI_OPEN}Review and remediate all high and critical severity findings{HTML_LI_CLOSE}"
|
|
268
|
+
)
|
|
269
|
+
section_parts.append(f"{HTML_LI_OPEN}Integrate GuardDuty with incident response workflow{HTML_LI_CLOSE}")
|
|
270
|
+
elif self._control_id == "IR-5":
|
|
271
|
+
section_parts.append(
|
|
272
|
+
f"{HTML_LI_OPEN}Implement systematic tracking of all GuardDuty findings{HTML_LI_CLOSE}"
|
|
273
|
+
)
|
|
274
|
+
section_parts.append(f"{HTML_LI_OPEN}Document incident response for each finding{HTML_LI_CLOSE}")
|
|
275
|
+
elif self._control_id == "SI-3":
|
|
276
|
+
section_parts.append(f"{HTML_LI_OPEN}Enable malware protection features in GuardDuty{HTML_LI_CLOSE}")
|
|
277
|
+
section_parts.append(f"{HTML_LI_OPEN}Monitor and respond to malware-related findings{HTML_LI_CLOSE}")
|
|
278
|
+
elif self._control_id == "RA-5":
|
|
279
|
+
section_parts.append(f"{HTML_LI_OPEN}Enable GuardDuty threat intelligence feeds{HTML_LI_CLOSE}")
|
|
280
|
+
section_parts.append(f"{HTML_LI_OPEN}Keep threat intelligence sources current{HTML_LI_CLOSE}")
|
|
281
|
+
|
|
282
|
+
section_parts.append(HTML_UL_CLOSE)
|
|
283
|
+
return section_parts
|
|
284
|
+
|
|
285
|
+
@property
|
|
286
|
+
def framework(self) -> str:
|
|
287
|
+
"""Compliance framework used for assessment."""
|
|
288
|
+
return self.control_mapper.framework
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
class AWSGuardDutyEvidenceIntegration(ComplianceIntegration):
|
|
292
|
+
"""Process AWS GuardDuty findings and create assessments/issues in RegScale."""
|
|
293
|
+
|
|
294
|
+
def __init__(self, config: GuardDutyEvidenceConfig):
|
|
295
|
+
"""
|
|
296
|
+
Initialize AWS GuardDuty evidence integration.
|
|
297
|
+
|
|
298
|
+
:param GuardDutyEvidenceConfig config: Configuration object containing all parameters
|
|
299
|
+
"""
|
|
300
|
+
super().__init__(
|
|
301
|
+
plan_id=config.plan_id,
|
|
302
|
+
framework=config.framework,
|
|
303
|
+
create_issues=config.create_issues,
|
|
304
|
+
update_control_status=config.update_control_status,
|
|
305
|
+
create_poams=config.create_poams,
|
|
306
|
+
parent_module=config.parent_module,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# Initialize API for file operations
|
|
310
|
+
self.api = Api()
|
|
311
|
+
|
|
312
|
+
self.region = config.region
|
|
313
|
+
self.title = "AWS GuardDuty"
|
|
314
|
+
self.framework = config.framework
|
|
315
|
+
self.create_issues = config.create_issues
|
|
316
|
+
self.create_vulnerabilities = config.create_vulnerabilities
|
|
317
|
+
self.collect_evidence = config.collect_evidence
|
|
318
|
+
self.evidence_as_attachments = config.evidence_as_attachments
|
|
319
|
+
self.evidence_control_ids = config.evidence_control_ids
|
|
320
|
+
self.evidence_frequency = config.evidence_frequency
|
|
321
|
+
self.force_refresh = config.force_refresh
|
|
322
|
+
self.account_id = config.account_id
|
|
323
|
+
self.tags = config.tags or {}
|
|
324
|
+
|
|
325
|
+
self.control_mapper = GuardDutyControlMapper(framework=config.framework)
|
|
326
|
+
|
|
327
|
+
profile = config.profile
|
|
328
|
+
aws_access_key_id = config.aws_access_key_id
|
|
329
|
+
aws_secret_access_key = config.aws_secret_access_key
|
|
330
|
+
aws_session_token = config.aws_session_token
|
|
331
|
+
|
|
332
|
+
if aws_access_key_id and aws_secret_access_key:
|
|
333
|
+
logger.info("Initializing AWS GuardDuty client with explicit credentials")
|
|
334
|
+
self.session = boto3.Session(
|
|
335
|
+
region_name=config.region,
|
|
336
|
+
aws_access_key_id=aws_access_key_id,
|
|
337
|
+
aws_secret_access_key=aws_secret_access_key,
|
|
338
|
+
aws_session_token=aws_session_token,
|
|
339
|
+
)
|
|
340
|
+
else:
|
|
341
|
+
logger.info(f"Initializing AWS GuardDuty client with profile: {profile if profile else 'default'}")
|
|
342
|
+
self.session = boto3.Session(profile_name=profile, region_name=config.region)
|
|
343
|
+
|
|
344
|
+
try:
|
|
345
|
+
self.client = self.session.client("guardduty")
|
|
346
|
+
logger.info("Successfully created AWS GuardDuty client")
|
|
347
|
+
except Exception as e:
|
|
348
|
+
logger.error(f"Failed to create AWS GuardDuty client: {e}")
|
|
349
|
+
raise
|
|
350
|
+
|
|
351
|
+
self.raw_guardduty_data: Dict[str, Any] = {}
|
|
352
|
+
self.findings_with_cves: List[Dict[str, Any]] = []
|
|
353
|
+
self.findings_without_cves: List[Dict[str, Any]] = []
|
|
354
|
+
|
|
355
|
+
def fetch_compliance_data(self) -> List[Dict[str, Any]]:
|
|
356
|
+
"""
|
|
357
|
+
Fetch GuardDuty compliance data.
|
|
358
|
+
|
|
359
|
+
Returns the raw GuardDuty data which will be used to create compliance items
|
|
360
|
+
for each control that GuardDuty maps to.
|
|
361
|
+
|
|
362
|
+
:return: List containing raw GuardDuty data
|
|
363
|
+
:rtype: List[Dict[str, Any]]
|
|
364
|
+
"""
|
|
365
|
+
self.fetch_guardduty_data()
|
|
366
|
+
|
|
367
|
+
# Return the raw data wrapped in a list
|
|
368
|
+
# We'll create multiple compliance items from this single data set
|
|
369
|
+
return [self.raw_guardduty_data] if self.raw_guardduty_data else []
|
|
370
|
+
|
|
371
|
+
def create_compliance_item(self, raw_data: Dict[str, Any]) -> List[ComplianceItem]:
|
|
372
|
+
"""
|
|
373
|
+
Create compliance items from GuardDuty data.
|
|
374
|
+
|
|
375
|
+
Unlike other integrations that map 1:1 from raw data to compliance items,
|
|
376
|
+
GuardDuty creates multiple compliance items (one per control) from the same data set.
|
|
377
|
+
|
|
378
|
+
:param Dict[str, Any] raw_data: Raw GuardDuty data
|
|
379
|
+
:return: List of compliance items for each control
|
|
380
|
+
:rtype: List[ComplianceItem]
|
|
381
|
+
"""
|
|
382
|
+
compliance_items = []
|
|
383
|
+
|
|
384
|
+
# Get all controls that GuardDuty maps to
|
|
385
|
+
control_results = self.control_mapper.assess_guardduty_compliance(raw_data)
|
|
386
|
+
|
|
387
|
+
# Create a compliance item for each control
|
|
388
|
+
for control_id in control_results:
|
|
389
|
+
compliance_item = GuardDutyComplianceItem(
|
|
390
|
+
control_id=control_id, guardduty_data=raw_data, control_mapper=self.control_mapper
|
|
391
|
+
)
|
|
392
|
+
compliance_items.append(compliance_item)
|
|
393
|
+
|
|
394
|
+
return compliance_items
|
|
395
|
+
|
|
396
|
+
def process_compliance_data(self) -> None:
|
|
397
|
+
"""
|
|
398
|
+
Override process_compliance_data to handle GuardDuty's unique pattern.
|
|
399
|
+
|
|
400
|
+
GuardDuty creates multiple compliance items from a single data fetch,
|
|
401
|
+
so we need to handle this differently than the base implementation.
|
|
402
|
+
"""
|
|
403
|
+
logger.info("Processing GuardDuty compliance data...")
|
|
404
|
+
|
|
405
|
+
self._reset_compliance_state()
|
|
406
|
+
# GuardDuty doesn't need control filtering since it maps to specific controls
|
|
407
|
+
# allowed_controls = self._build_allowed_controls_set()
|
|
408
|
+
raw_compliance_data = self.fetch_compliance_data()
|
|
409
|
+
|
|
410
|
+
# Process the raw data - GuardDuty returns a list with one item
|
|
411
|
+
for raw_item in raw_compliance_data:
|
|
412
|
+
try:
|
|
413
|
+
# Create multiple compliance items from the single raw data
|
|
414
|
+
compliance_items = self.create_compliance_item(raw_item)
|
|
415
|
+
|
|
416
|
+
# Process each compliance item
|
|
417
|
+
for compliance_item in compliance_items:
|
|
418
|
+
control_id = getattr(compliance_item, "control_id", "")
|
|
419
|
+
resource_id = getattr(compliance_item, "resource_id", "")
|
|
420
|
+
|
|
421
|
+
if not control_id or not resource_id:
|
|
422
|
+
continue
|
|
423
|
+
|
|
424
|
+
# Add to collections
|
|
425
|
+
self.all_compliance_items.append(compliance_item)
|
|
426
|
+
|
|
427
|
+
# Build asset mapping
|
|
428
|
+
self.asset_compliance_map[compliance_item.resource_id].append(compliance_item)
|
|
429
|
+
|
|
430
|
+
# Categorize by result
|
|
431
|
+
if compliance_item.compliance_result in self.FAIL_STATUSES:
|
|
432
|
+
self.failed_compliance_items.append(compliance_item)
|
|
433
|
+
self.failing_controls[control_id.lower()] = compliance_item
|
|
434
|
+
else:
|
|
435
|
+
self.passing_controls[control_id.lower()] = compliance_item
|
|
436
|
+
|
|
437
|
+
except Exception as e:
|
|
438
|
+
logger.error(f"Error processing GuardDuty compliance data: {e}")
|
|
439
|
+
continue
|
|
440
|
+
|
|
441
|
+
logger.info(
|
|
442
|
+
f"Processed {len(self.all_compliance_items)} compliance items: "
|
|
443
|
+
f"{len(self.passing_controls)} passing controls, "
|
|
444
|
+
f"{len(self.failing_controls)} failing controls"
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
def _is_cache_valid(self) -> bool:
|
|
448
|
+
if not os.path.exists(GUARDDUTY_CACHE_FILE):
|
|
449
|
+
return False
|
|
450
|
+
file_age = time.time() - os.path.getmtime(GUARDDUTY_CACHE_FILE)
|
|
451
|
+
is_valid = file_age < CACHE_TTL_SECONDS
|
|
452
|
+
if is_valid:
|
|
453
|
+
logger.info(f"Using cached GuardDuty data (age: {file_age / 3600:.1f} hours)")
|
|
454
|
+
return is_valid
|
|
455
|
+
|
|
456
|
+
def _load_cached_data(self) -> Dict[str, Any]:
|
|
457
|
+
try:
|
|
458
|
+
with open(GUARDDUTY_CACHE_FILE, encoding="utf-8") as file:
|
|
459
|
+
data = json.load(file)
|
|
460
|
+
|
|
461
|
+
# Validate cache format - must be a dict
|
|
462
|
+
if not isinstance(data, dict):
|
|
463
|
+
logger.warning("Invalid cache format detected (not a dict). Invalidating cache.")
|
|
464
|
+
return {}
|
|
465
|
+
|
|
466
|
+
return data
|
|
467
|
+
except (json.JSONDecodeError, IOError) as e:
|
|
468
|
+
logger.warning(f"Error reading cache: {e}")
|
|
469
|
+
return {}
|
|
470
|
+
|
|
471
|
+
def _save_to_cache(self, guardduty_data: Dict[str, Any]) -> None:
|
|
472
|
+
try:
|
|
473
|
+
os.makedirs(os.path.dirname(GUARDDUTY_CACHE_FILE), exist_ok=True)
|
|
474
|
+
with open(GUARDDUTY_CACHE_FILE, "w", encoding="utf-8") as file:
|
|
475
|
+
json.dump(guardduty_data, file, indent=2, default=str)
|
|
476
|
+
logger.info(f"Cached GuardDuty data to {GUARDDUTY_CACHE_FILE}")
|
|
477
|
+
except IOError as e:
|
|
478
|
+
logger.warning(f"Error writing cache: {e}")
|
|
479
|
+
|
|
480
|
+
def _fetch_fresh_guardduty_data(self) -> Dict[str, Any]:
|
|
481
|
+
logger.info("Fetching GuardDuty data from AWS...")
|
|
482
|
+
|
|
483
|
+
from regscale.integrations.commercial.aws.inventory.resources.guardduty import GuardDutyCollector
|
|
484
|
+
|
|
485
|
+
collector = GuardDutyCollector(
|
|
486
|
+
session=self.session, region=self.region, account_id=self.account_id, tags=self.tags
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
guardduty_data = collector.collect()
|
|
490
|
+
logger.info(
|
|
491
|
+
f"Fetched {len(guardduty_data.get('Detectors', []))} detector(s), "
|
|
492
|
+
f"{len(guardduty_data.get('Findings', []))} finding(s)"
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
return guardduty_data
|
|
496
|
+
|
|
497
|
+
def fetch_guardduty_data(self) -> Dict[str, Any]:
|
|
498
|
+
if not self.force_refresh and self._is_cache_valid():
|
|
499
|
+
cached_data = self._load_cached_data()
|
|
500
|
+
if cached_data:
|
|
501
|
+
self.raw_guardduty_data = cached_data
|
|
502
|
+
return cached_data
|
|
503
|
+
|
|
504
|
+
if self.force_refresh:
|
|
505
|
+
logger.info("Force refresh requested, fetching fresh GuardDuty data...")
|
|
506
|
+
|
|
507
|
+
try:
|
|
508
|
+
guardduty_data = self._fetch_fresh_guardduty_data()
|
|
509
|
+
self.raw_guardduty_data = guardduty_data
|
|
510
|
+
self._save_to_cache(guardduty_data)
|
|
511
|
+
return guardduty_data
|
|
512
|
+
except ClientError as e:
|
|
513
|
+
logger.error(f"Error fetching GuardDuty data: {e}")
|
|
514
|
+
return {}
|
|
515
|
+
|
|
516
|
+
def _classify_findings(self) -> None:
|
|
517
|
+
"""Classify findings into those with CVEs (vulnerabilities) and those without (issues)."""
|
|
518
|
+
findings = self.raw_guardduty_data.get("Findings", [])
|
|
519
|
+
|
|
520
|
+
for finding in findings:
|
|
521
|
+
if self.control_mapper.has_cve_reference(finding):
|
|
522
|
+
self.findings_with_cves.append(finding)
|
|
523
|
+
else:
|
|
524
|
+
self.findings_without_cves.append(finding)
|
|
525
|
+
|
|
526
|
+
logger.info(
|
|
527
|
+
f"Classified findings: {len(self.findings_with_cves)} with CVEs (vulnerabilities), "
|
|
528
|
+
f"{len(self.findings_without_cves)} without CVEs (issues)"
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
def _extract_resource_identifier(self, finding: Dict[str, Any]) -> str:
|
|
532
|
+
"""
|
|
533
|
+
Extract resource identifier from GuardDuty finding.
|
|
534
|
+
|
|
535
|
+
:param Dict[str, Any] finding: GuardDuty finding
|
|
536
|
+
:return: Resource identifier (instance ID, ARN, or account ID)
|
|
537
|
+
:rtype: str
|
|
538
|
+
"""
|
|
539
|
+
resource = finding.get("Resource", {})
|
|
540
|
+
|
|
541
|
+
# Try to get EC2 instance ID first
|
|
542
|
+
instance_id = self._extract_ec2_instance_id(resource)
|
|
543
|
+
if instance_id:
|
|
544
|
+
return instance_id
|
|
545
|
+
|
|
546
|
+
# Try to get resource ARN based on type
|
|
547
|
+
resource_arn = self._extract_resource_arn(resource)
|
|
548
|
+
if resource_arn:
|
|
549
|
+
return resource_arn
|
|
550
|
+
|
|
551
|
+
# Fallback to account ID if no specific resource found
|
|
552
|
+
account_id = finding.get("AccountId", "")
|
|
553
|
+
return account_id if account_id else ""
|
|
554
|
+
|
|
555
|
+
def _extract_ec2_instance_id(self, resource: Dict[str, Any]) -> str:
|
|
556
|
+
"""Extract EC2 instance ID from resource details."""
|
|
557
|
+
instance_details = resource.get("InstanceDetails", {})
|
|
558
|
+
if instance_details:
|
|
559
|
+
instance_id = instance_details.get("InstanceId")
|
|
560
|
+
if instance_id:
|
|
561
|
+
return instance_id
|
|
562
|
+
return ""
|
|
563
|
+
|
|
564
|
+
def _extract_resource_arn(self, resource: Dict[str, Any]) -> str:
|
|
565
|
+
"""Extract ARN based on resource type."""
|
|
566
|
+
resource_type = resource.get("ResourceType", "")
|
|
567
|
+
|
|
568
|
+
resource_extractors = {
|
|
569
|
+
"S3Bucket": lambda: self._extract_s3_arn(resource),
|
|
570
|
+
"EKSCluster": lambda: resource.get("EksClusterDetails", {}).get("Arn", ""),
|
|
571
|
+
"ECSCluster": lambda: resource.get("EcsClusterDetails", {}).get("Arn", ""),
|
|
572
|
+
"Lambda": lambda: resource.get("LambdaDetails", {}).get("FunctionArn", ""),
|
|
573
|
+
"RDSDBInstance": lambda: resource.get("RdsDbInstanceDetails", {}).get("DbInstanceArn", ""),
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
extractor = resource_extractors.get(resource_type)
|
|
577
|
+
if extractor:
|
|
578
|
+
return extractor() or ""
|
|
579
|
+
return ""
|
|
580
|
+
|
|
581
|
+
def _extract_s3_arn(self, resource: Dict[str, Any]) -> str:
|
|
582
|
+
"""Extract S3 bucket ARN from resource details."""
|
|
583
|
+
s3_details = resource.get("S3BucketDetails", [])
|
|
584
|
+
if s3_details:
|
|
585
|
+
return s3_details[0].get("Arn", "")
|
|
586
|
+
return ""
|
|
587
|
+
|
|
588
|
+
def _parse_guardduty_finding_as_issue(self, finding: Dict[str, Any]) -> IntegrationFinding:
|
|
589
|
+
"""Parse GuardDuty finding as RegScale Issue."""
|
|
590
|
+
finding_id = finding.get("Id", "")
|
|
591
|
+
finding_type = finding.get("Type", "")
|
|
592
|
+
title = finding.get("Title", "")
|
|
593
|
+
severity = self.control_mapper._get_severity_level(finding.get("Severity", 0))
|
|
594
|
+
|
|
595
|
+
# Extract resource identifier
|
|
596
|
+
asset_identifier = self._extract_resource_identifier(finding)
|
|
597
|
+
|
|
598
|
+
# Create URL-safe external_id by replacing colons with dashes
|
|
599
|
+
# GuardDuty IDs are like "41:UnauthorizedAccess:EC2/SSHBruteForce"
|
|
600
|
+
external_id = finding_id.replace(":", "-")
|
|
601
|
+
|
|
602
|
+
# Build detailed description with original finding ID
|
|
603
|
+
detailed_description = self._build_finding_description(finding)
|
|
604
|
+
|
|
605
|
+
# Map severity to RegScale
|
|
606
|
+
severity_map = {"LOW": "Low", "MEDIUM": "Moderate", "HIGH": "High", "CRITICAL": "Critical"}
|
|
607
|
+
regscale_severity = severity_map.get(severity, "Moderate")
|
|
608
|
+
|
|
609
|
+
# Create IntegrationFinding
|
|
610
|
+
integration_finding = IntegrationFinding(
|
|
611
|
+
asset_identifier=asset_identifier,
|
|
612
|
+
control_labels=[],
|
|
613
|
+
category="Security",
|
|
614
|
+
external_id=external_id,
|
|
615
|
+
title=f"{finding_type}: {title}",
|
|
616
|
+
description=detailed_description,
|
|
617
|
+
severity=regscale_severity,
|
|
618
|
+
status="Open",
|
|
619
|
+
plugin_id=finding_type,
|
|
620
|
+
plugin_name=f"AWS GuardDuty - {finding_type}",
|
|
621
|
+
comments=f"GuardDuty Finding ID: {finding_id}\nRegion: {finding.get('Region', self.region)}",
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
return integration_finding
|
|
625
|
+
|
|
626
|
+
def _parse_guardduty_finding_as_vulnerability(self, finding: Dict[str, Any]) -> IntegrationFinding:
|
|
627
|
+
"""Parse GuardDuty finding with CVE as RegScale Vulnerability."""
|
|
628
|
+
finding_id = finding.get("Id", "")
|
|
629
|
+
finding_type = finding.get("Type", "")
|
|
630
|
+
title = finding.get("Title", "")
|
|
631
|
+
severity = self.control_mapper._get_severity_level(finding.get("Severity", 0))
|
|
632
|
+
|
|
633
|
+
# Extract CVEs
|
|
634
|
+
cves = self.control_mapper.extract_cves_from_finding(finding)
|
|
635
|
+
primary_cve = cves[0] if cves else None
|
|
636
|
+
|
|
637
|
+
# Extract resource identifier
|
|
638
|
+
asset_identifier = self._extract_resource_identifier(finding)
|
|
639
|
+
|
|
640
|
+
# Create URL-safe external_id by replacing colons with dashes
|
|
641
|
+
external_id = finding_id.replace(":", "-")
|
|
642
|
+
|
|
643
|
+
# Build detailed description
|
|
644
|
+
detailed_description = self._build_finding_description(finding)
|
|
645
|
+
|
|
646
|
+
# Map severity
|
|
647
|
+
severity_map = {"LOW": "Low", "MEDIUM": "Moderate", "HIGH": "High", "CRITICAL": "Critical"}
|
|
648
|
+
regscale_severity = severity_map.get(severity, "Moderate")
|
|
649
|
+
|
|
650
|
+
integration_finding = IntegrationFinding(
|
|
651
|
+
asset_identifier=asset_identifier,
|
|
652
|
+
control_labels=[],
|
|
653
|
+
category="Vulnerability",
|
|
654
|
+
external_id=external_id,
|
|
655
|
+
title=f"{finding_type}: {title}",
|
|
656
|
+
description=detailed_description,
|
|
657
|
+
severity=regscale_severity,
|
|
658
|
+
status="Open",
|
|
659
|
+
vulnerability_number=primary_cve,
|
|
660
|
+
plugin_id=finding_type,
|
|
661
|
+
plugin_name=f"AWS GuardDuty - {finding_type}",
|
|
662
|
+
comments=f"GuardDuty Finding ID: {finding_id}\nCVEs: {', '.join(cves)}\nRegion: {finding.get('Region', self.region)}",
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
return integration_finding
|
|
666
|
+
|
|
667
|
+
def _build_finding_description(self, finding: Dict[str, Any]) -> str:
|
|
668
|
+
"""Build HTML-formatted finding description."""
|
|
669
|
+
# Extract finding metadata
|
|
670
|
+
metadata = self._extract_finding_metadata(finding)
|
|
671
|
+
|
|
672
|
+
# Build base description
|
|
673
|
+
desc_parts = self._build_finding_header()
|
|
674
|
+
desc_parts.extend(self._build_finding_details(metadata))
|
|
675
|
+
|
|
676
|
+
# Add CVE information if present
|
|
677
|
+
cve_section = self._build_cve_section(finding)
|
|
678
|
+
if cve_section:
|
|
679
|
+
desc_parts.extend(cve_section)
|
|
680
|
+
|
|
681
|
+
return "\n".join(desc_parts)
|
|
682
|
+
|
|
683
|
+
def _extract_finding_metadata(self, finding: Dict[str, Any]) -> Dict[str, str]:
|
|
684
|
+
"""Extract metadata from finding for description building."""
|
|
685
|
+
resource = finding.get("Resource", {})
|
|
686
|
+
service = finding.get("Service", {})
|
|
687
|
+
action = service.get("Action", {})
|
|
688
|
+
|
|
689
|
+
return {
|
|
690
|
+
"description": finding.get("Description", ""),
|
|
691
|
+
"finding_type": finding.get("Type", ""),
|
|
692
|
+
"severity": self.control_mapper._get_severity_level(finding.get("Severity", 0)),
|
|
693
|
+
"created_at": finding.get("CreatedAt", ""),
|
|
694
|
+
"updated_at": finding.get("UpdatedAt", ""),
|
|
695
|
+
"resource_type": resource.get("ResourceType", ""),
|
|
696
|
+
"action_type": action.get("ActionType", "N/A"),
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
def _build_finding_header(self) -> List[str]:
|
|
700
|
+
"""Build the header section of finding description."""
|
|
701
|
+
return [
|
|
702
|
+
f"{HTML_H3_OPEN}GuardDuty Security Finding{HTML_H3_CLOSE}",
|
|
703
|
+
HTML_P_OPEN,
|
|
704
|
+
]
|
|
705
|
+
|
|
706
|
+
def _build_finding_details(self, metadata: Dict[str, str]) -> List[str]:
|
|
707
|
+
"""Build the details section of finding description."""
|
|
708
|
+
details = [
|
|
709
|
+
f"{HTML_STRONG_OPEN}Finding Type:{HTML_STRONG_CLOSE} {metadata['finding_type']}{HTML_BR}",
|
|
710
|
+
f"{HTML_STRONG_OPEN}Severity:{HTML_STRONG_CLOSE} {metadata['severity']}{HTML_BR}",
|
|
711
|
+
f"{HTML_STRONG_OPEN}Resource Type:{HTML_STRONG_CLOSE} {metadata['resource_type']}{HTML_BR}",
|
|
712
|
+
f"{HTML_STRONG_OPEN}Action Type:{HTML_STRONG_CLOSE} {metadata['action_type']}{HTML_BR}",
|
|
713
|
+
f"{HTML_STRONG_OPEN}Created:{HTML_STRONG_CLOSE} {metadata['created_at']}{HTML_BR}",
|
|
714
|
+
f"{HTML_STRONG_OPEN}Updated:{HTML_STRONG_CLOSE} {metadata['updated_at']}",
|
|
715
|
+
HTML_P_CLOSE,
|
|
716
|
+
f"{HTML_H3_OPEN}Description{HTML_H3_CLOSE}",
|
|
717
|
+
f"{HTML_P_OPEN}{metadata['description']}{HTML_P_CLOSE}",
|
|
718
|
+
]
|
|
719
|
+
return details
|
|
720
|
+
|
|
721
|
+
def _build_cve_section(self, finding: Dict[str, Any]) -> List[str]:
|
|
722
|
+
"""Build CVE references section if CVEs are present."""
|
|
723
|
+
if not self.control_mapper.has_cve_reference(finding):
|
|
724
|
+
return []
|
|
725
|
+
|
|
726
|
+
cves = self.control_mapper.extract_cves_from_finding(finding)
|
|
727
|
+
cve_parts = [
|
|
728
|
+
f"{HTML_H3_OPEN}CVE References{HTML_H3_CLOSE}",
|
|
729
|
+
HTML_UL_OPEN,
|
|
730
|
+
]
|
|
731
|
+
|
|
732
|
+
for cve in cves:
|
|
733
|
+
cve_parts.append(f"{HTML_LI_OPEN}{cve}{HTML_LI_CLOSE}")
|
|
734
|
+
|
|
735
|
+
cve_parts.append(HTML_UL_CLOSE)
|
|
736
|
+
return cve_parts
|
|
737
|
+
|
|
738
|
+
def sync_compliance(self) -> None:
|
|
739
|
+
"""
|
|
740
|
+
Main method to sync GuardDuty compliance data.
|
|
741
|
+
|
|
742
|
+
This extends the base sync_compliance to:
|
|
743
|
+
1. Create assessments for controls (SI-4, IR-4, IR-5, SI-3, RA-5)
|
|
744
|
+
2. Update control implementation status
|
|
745
|
+
3. Create issues for failed compliance
|
|
746
|
+
4. Process individual findings as issues/vulnerabilities
|
|
747
|
+
5. Collect evidence if requested
|
|
748
|
+
"""
|
|
749
|
+
# Call the base class sync_compliance to handle control assessments and issues
|
|
750
|
+
super().sync_compliance()
|
|
751
|
+
|
|
752
|
+
# Additionally process individual findings as issues/vulnerabilities
|
|
753
|
+
self._process_individual_findings()
|
|
754
|
+
|
|
755
|
+
# If evidence collection is enabled, collect evidence after compliance sync
|
|
756
|
+
if self.collect_evidence:
|
|
757
|
+
logger.info("Evidence collection enabled, starting evidence collection...")
|
|
758
|
+
self._collect_guardduty_evidence()
|
|
759
|
+
|
|
760
|
+
def _process_individual_findings(self) -> None:
|
|
761
|
+
"""Process individual GuardDuty findings as issues or vulnerabilities."""
|
|
762
|
+
# Classify findings
|
|
763
|
+
self._classify_findings()
|
|
764
|
+
|
|
765
|
+
# Create issues for findings without CVEs
|
|
766
|
+
if self.create_issues and self.findings_without_cves:
|
|
767
|
+
logger.info(f"Creating {len(self.findings_without_cves)} issues from GuardDuty findings...")
|
|
768
|
+
issues = [self._parse_guardduty_finding_as_issue(f) for f in self.findings_without_cves]
|
|
769
|
+
self.update_regscale_findings(issues)
|
|
770
|
+
|
|
771
|
+
# Create vulnerabilities for findings with CVEs
|
|
772
|
+
if self.create_vulnerabilities and self.findings_with_cves:
|
|
773
|
+
logger.info(f"Creating {len(self.findings_with_cves)} vulnerabilities from GuardDuty CVE findings...")
|
|
774
|
+
vulns = [self._parse_guardduty_finding_as_vulnerability(f) for f in self.findings_with_cves]
|
|
775
|
+
self.update_regscale_findings(vulns)
|
|
776
|
+
|
|
777
|
+
def sync_findings(self) -> None:
|
|
778
|
+
"""
|
|
779
|
+
Legacy method for backward compatibility.
|
|
780
|
+
Redirects to sync_compliance which now handles everything.
|
|
781
|
+
"""
|
|
782
|
+
logger.info("sync_findings called - redirecting to sync_compliance for full compliance integration")
|
|
783
|
+
self.sync_compliance()
|
|
784
|
+
|
|
785
|
+
def _collect_guardduty_evidence(self) -> None:
|
|
786
|
+
if not self.raw_guardduty_data:
|
|
787
|
+
logger.warning("No GuardDuty data available for evidence collection")
|
|
788
|
+
return
|
|
789
|
+
|
|
790
|
+
scan_date = get_current_datetime(dt_format="%Y-%m-%d")
|
|
791
|
+
|
|
792
|
+
if self.evidence_as_attachments:
|
|
793
|
+
logger.info("Creating SSP file attachment with GuardDuty evidence...")
|
|
794
|
+
self._create_ssp_attachment(scan_date)
|
|
795
|
+
else:
|
|
796
|
+
logger.info("Creating Evidence record with GuardDuty evidence...")
|
|
797
|
+
self._create_evidence_record(scan_date)
|
|
798
|
+
|
|
799
|
+
def _create_ssp_attachment(self, scan_date: str) -> None:
|
|
800
|
+
try:
|
|
801
|
+
# Check for existing evidence to avoid duplicates
|
|
802
|
+
date_str = datetime.now().strftime("%Y%m%d")
|
|
803
|
+
file_name_pattern = f"guardduty_evidence_{date_str}"
|
|
804
|
+
|
|
805
|
+
if self.check_for_existing_evidence(file_name_pattern):
|
|
806
|
+
logger.info(
|
|
807
|
+
"Evidence file for GuardDuty already exists for today. Skipping upload to avoid duplicates."
|
|
808
|
+
)
|
|
809
|
+
return
|
|
810
|
+
|
|
811
|
+
# Add timestamp to make filename unique if run multiple times per day
|
|
812
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
813
|
+
file_name = f"guardduty_evidence_{timestamp}.jsonl.gz"
|
|
814
|
+
|
|
815
|
+
# Build compliance assessment
|
|
816
|
+
compliance_results = self.control_mapper.assess_guardduty_compliance(self.raw_guardduty_data)
|
|
817
|
+
|
|
818
|
+
evidence_entry = {
|
|
819
|
+
**self.raw_guardduty_data,
|
|
820
|
+
"compliance_assessment": {
|
|
821
|
+
"control_results": compliance_results,
|
|
822
|
+
"assessed_controls": list(compliance_results.keys()),
|
|
823
|
+
"assessment_date": scan_date,
|
|
824
|
+
},
|
|
825
|
+
"findings_summary": {
|
|
826
|
+
"total": len(self.raw_guardduty_data.get("Findings", [])),
|
|
827
|
+
"with_cves": len(self.findings_with_cves),
|
|
828
|
+
"without_cves": len(self.findings_without_cves),
|
|
829
|
+
},
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
jsonl_content = json.dumps(evidence_entry, default=str)
|
|
833
|
+
|
|
834
|
+
compressed_buffer = BytesIO()
|
|
835
|
+
with gzip.open(compressed_buffer, "wt", encoding="utf-8", compresslevel=9) as gz_file:
|
|
836
|
+
gz_file.write(jsonl_content)
|
|
837
|
+
|
|
838
|
+
compressed_data = compressed_buffer.getvalue()
|
|
839
|
+
|
|
840
|
+
api = Api()
|
|
841
|
+
success = File.upload_file_to_regscale(
|
|
842
|
+
file_name=file_name,
|
|
843
|
+
parent_id=self.plan_id,
|
|
844
|
+
parent_module=self.parent_module,
|
|
845
|
+
api=api,
|
|
846
|
+
file_data=compressed_data,
|
|
847
|
+
tags="aws,guardduty,threat-detection,automated",
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
if success:
|
|
851
|
+
logger.info(f"Successfully uploaded GuardDuty evidence file: {file_name}")
|
|
852
|
+
else:
|
|
853
|
+
logger.error("Failed to upload GuardDuty evidence file")
|
|
854
|
+
|
|
855
|
+
except Exception as e:
|
|
856
|
+
logger.error(f"Error creating SSP attachment: {e}", exc_info=True)
|
|
857
|
+
|
|
858
|
+
def _create_evidence_record(self, scan_date: str) -> None:
|
|
859
|
+
try:
|
|
860
|
+
title = f"AWS GuardDuty Evidence - {scan_date}"
|
|
861
|
+
description = self._build_evidence_description(scan_date)
|
|
862
|
+
due_date = (datetime.now() + timedelta(days=self.evidence_frequency)).isoformat()
|
|
863
|
+
|
|
864
|
+
evidence = Evidence(
|
|
865
|
+
title=title,
|
|
866
|
+
description=description,
|
|
867
|
+
status="Collected",
|
|
868
|
+
updateFrequency=self.evidence_frequency,
|
|
869
|
+
dueDate=due_date,
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
created_evidence = evidence.create()
|
|
873
|
+
if not created_evidence or not created_evidence.id:
|
|
874
|
+
logger.error("Failed to create evidence record")
|
|
875
|
+
return
|
|
876
|
+
|
|
877
|
+
logger.info(f"Created evidence record {created_evidence.id}: {title}")
|
|
878
|
+
self._upload_evidence_file(created_evidence.id, scan_date)
|
|
879
|
+
self._link_evidence_to_ssp(created_evidence.id)
|
|
880
|
+
|
|
881
|
+
# Link to controls if specified
|
|
882
|
+
if self.evidence_control_ids:
|
|
883
|
+
self._link_evidence_to_controls(created_evidence.id, is_attachment=False)
|
|
884
|
+
|
|
885
|
+
except Exception as e:
|
|
886
|
+
logger.error(f"Error creating evidence record: {e}", exc_info=True)
|
|
887
|
+
|
|
888
|
+
def _build_evidence_description(self, scan_date: str) -> str:
|
|
889
|
+
"""Build HTML-formatted evidence description."""
|
|
890
|
+
# Get summary data
|
|
891
|
+
summary = self._get_guardduty_summary()
|
|
892
|
+
compliance_results = self.control_mapper.assess_guardduty_compliance(self.raw_guardduty_data)
|
|
893
|
+
|
|
894
|
+
# Build description parts
|
|
895
|
+
desc_parts = self._build_evidence_header(scan_date)
|
|
896
|
+
desc_parts.extend(self._build_summary_section(summary))
|
|
897
|
+
desc_parts.extend(self._build_compliance_section(compliance_results))
|
|
898
|
+
|
|
899
|
+
return "\n".join(desc_parts)
|
|
900
|
+
|
|
901
|
+
def _get_guardduty_summary(self) -> Dict[str, int]:
|
|
902
|
+
"""Extract summary statistics from GuardDuty data."""
|
|
903
|
+
return {
|
|
904
|
+
"detectors": len(self.raw_guardduty_data.get("Detectors", [])),
|
|
905
|
+
"total_findings": len(self.raw_guardduty_data.get("Findings", [])),
|
|
906
|
+
"cve_findings": len(self.findings_with_cves),
|
|
907
|
+
"security_issues": len(self.findings_without_cves),
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
def _build_evidence_header(self, scan_date: str) -> List[str]:
|
|
911
|
+
"""Build evidence header section."""
|
|
912
|
+
return [
|
|
913
|
+
"<h1>AWS GuardDuty Threat Detection Evidence</h1>",
|
|
914
|
+
f"{HTML_P_OPEN}{HTML_STRONG_OPEN}Assessment Date:{HTML_STRONG_CLOSE} {scan_date}{HTML_P_CLOSE}",
|
|
915
|
+
]
|
|
916
|
+
|
|
917
|
+
def _build_summary_section(self, summary: Dict[str, int]) -> List[str]:
|
|
918
|
+
"""Build GuardDuty summary section."""
|
|
919
|
+
return [
|
|
920
|
+
f"{HTML_H2_OPEN}GuardDuty Summary{HTML_H2_CLOSE}",
|
|
921
|
+
HTML_UL_OPEN,
|
|
922
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Detectors:{HTML_STRONG_CLOSE} {summary['detectors']}{HTML_LI_CLOSE}",
|
|
923
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Total Findings:{HTML_STRONG_CLOSE} {summary['total_findings']}{HTML_LI_CLOSE}",
|
|
924
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}CVE-Related Findings:{HTML_STRONG_CLOSE} {summary['cve_findings']}{HTML_LI_CLOSE}",
|
|
925
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}Security Issues:{HTML_STRONG_CLOSE} {summary['security_issues']}{HTML_LI_CLOSE}",
|
|
926
|
+
HTML_UL_CLOSE,
|
|
927
|
+
]
|
|
928
|
+
|
|
929
|
+
def _build_compliance_section(self, compliance_results: Dict[str, str]) -> List[str]:
|
|
930
|
+
"""Build control compliance results section."""
|
|
931
|
+
section_parts = [
|
|
932
|
+
f"{HTML_H2_OPEN}Control Compliance Results{HTML_H2_CLOSE}",
|
|
933
|
+
HTML_UL_OPEN,
|
|
934
|
+
]
|
|
935
|
+
|
|
936
|
+
for control_id, result in compliance_results.items():
|
|
937
|
+
control_item = self._format_control_result(control_id, result)
|
|
938
|
+
section_parts.append(control_item)
|
|
939
|
+
|
|
940
|
+
section_parts.append(HTML_UL_CLOSE)
|
|
941
|
+
return section_parts
|
|
942
|
+
|
|
943
|
+
def _format_control_result(self, control_id: str, result: str) -> str:
|
|
944
|
+
"""Format a single control result for display."""
|
|
945
|
+
control_desc = self.control_mapper.get_control_description(control_id)
|
|
946
|
+
result_color = "#d32f2f" if result == "FAIL" else "#2e7d32"
|
|
947
|
+
return (
|
|
948
|
+
f"{HTML_LI_OPEN}{HTML_STRONG_OPEN}{control_id}:{HTML_STRONG_CLOSE} "
|
|
949
|
+
f"<span style='color: {result_color};'>{result}</span> - {control_desc}{HTML_LI_CLOSE}"
|
|
950
|
+
)
|
|
951
|
+
|
|
952
|
+
def _upload_evidence_file(self, evidence_id: int, scan_date: str) -> None:
|
|
953
|
+
try:
|
|
954
|
+
compliance_results = self.control_mapper.assess_guardduty_compliance(self.raw_guardduty_data)
|
|
955
|
+
evidence_entry = {
|
|
956
|
+
**self.raw_guardduty_data,
|
|
957
|
+
"compliance_assessment": {
|
|
958
|
+
"control_results": compliance_results,
|
|
959
|
+
"assessed_controls": list(compliance_results.keys()),
|
|
960
|
+
"assessment_date": scan_date,
|
|
961
|
+
},
|
|
962
|
+
}
|
|
963
|
+
jsonl_content = json.dumps(evidence_entry, default=str)
|
|
964
|
+
|
|
965
|
+
compressed_buffer = BytesIO()
|
|
966
|
+
with gzip.open(compressed_buffer, "wt", encoding="utf-8", compresslevel=9) as gz_file:
|
|
967
|
+
gz_file.write(jsonl_content)
|
|
968
|
+
|
|
969
|
+
compressed_data = compressed_buffer.getvalue()
|
|
970
|
+
file_name = f"guardduty_evidence_{scan_date}.jsonl.gz"
|
|
971
|
+
|
|
972
|
+
api = Api()
|
|
973
|
+
success = File.upload_file_to_regscale(
|
|
974
|
+
file_name=file_name,
|
|
975
|
+
parent_id=evidence_id,
|
|
976
|
+
parent_module="evidence",
|
|
977
|
+
api=api,
|
|
978
|
+
file_data=compressed_data,
|
|
979
|
+
tags="aws,guardduty,threat-detection",
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
if success:
|
|
983
|
+
logger.info(f"Uploaded GuardDuty evidence file to Evidence {evidence_id}")
|
|
984
|
+
else:
|
|
985
|
+
logger.warning(f"Failed to upload evidence file to Evidence {evidence_id}")
|
|
986
|
+
|
|
987
|
+
except Exception as e:
|
|
988
|
+
logger.error(f"Error uploading evidence file: {e}", exc_info=True)
|
|
989
|
+
|
|
990
|
+
def _link_evidence_to_ssp(self, evidence_id: int) -> None:
|
|
991
|
+
try:
|
|
992
|
+
mapping = EvidenceMapping(evidenceID=evidence_id, mappedID=self.plan_id, mappingType=self.parent_module)
|
|
993
|
+
mapping.create()
|
|
994
|
+
logger.info(f"Linked evidence {evidence_id} to SSP {self.plan_id}")
|
|
995
|
+
except Exception as ex:
|
|
996
|
+
logger.warning(f"Failed to link evidence to SSP: {ex}")
|
|
997
|
+
|
|
998
|
+
def _link_evidence_to_controls(self, evidence_id: int, is_attachment: bool = False) -> None:
|
|
999
|
+
"""
|
|
1000
|
+
Link evidence to specified control IDs.
|
|
1001
|
+
|
|
1002
|
+
:param int evidence_id: Evidence or attachment ID
|
|
1003
|
+
:param bool is_attachment: True if linking attachment, False for evidence record
|
|
1004
|
+
"""
|
|
1005
|
+
try:
|
|
1006
|
+
for control_id in self.evidence_control_ids:
|
|
1007
|
+
if is_attachment:
|
|
1008
|
+
self.api.link_ssp_attachment_to_control(self.plan_id, evidence_id, control_id)
|
|
1009
|
+
else:
|
|
1010
|
+
self.api.link_evidence_to_control(evidence_id, control_id)
|
|
1011
|
+
logger.info(f"Linked evidence {evidence_id} to control {control_id}")
|
|
1012
|
+
except Exception as e:
|
|
1013
|
+
logger.error(f"Failed to link evidence to controls: {e}", exc_info=True)
|
|
1014
|
+
|
|
1015
|
+
def _map_resource_type_to_asset_type(self, compliance_item: ComplianceItem) -> str:
|
|
1016
|
+
"""
|
|
1017
|
+
Map GuardDuty service to RegScale asset type.
|
|
1018
|
+
|
|
1019
|
+
:param ComplianceItem compliance_item: Compliance item
|
|
1020
|
+
:return: Asset type string
|
|
1021
|
+
:rtype: str
|
|
1022
|
+
"""
|
|
1023
|
+
return "AWS GuardDuty Service"
|
|
1024
|
+
|
|
1025
|
+
def fetch_findings(self, *args, **kwargs):
|
|
1026
|
+
"""
|
|
1027
|
+
Fetch findings from GuardDuty (implements ScannerIntegration abstract method).
|
|
1028
|
+
|
|
1029
|
+
This method is not used in the current implementation as GuardDuty findings
|
|
1030
|
+
are fetched and processed directly in sync_compliance().
|
|
1031
|
+
|
|
1032
|
+
:return: Empty iterator
|
|
1033
|
+
:rtype: Iterator
|
|
1034
|
+
"""
|
|
1035
|
+
return iter([])
|
|
1036
|
+
|
|
1037
|
+
def fetch_assets(self, *args, **kwargs):
|
|
1038
|
+
"""
|
|
1039
|
+
Fetch assets from GuardDuty (implements ScannerIntegration abstract method).
|
|
1040
|
+
|
|
1041
|
+
GuardDuty creates a single asset representing the GuardDuty service itself.
|
|
1042
|
+
|
|
1043
|
+
:return: Iterator of assets
|
|
1044
|
+
:rtype: Iterator
|
|
1045
|
+
"""
|
|
1046
|
+
# GuardDuty represents a service-level asset, not individual resources
|
|
1047
|
+
# We create one asset per account/region combination
|
|
1048
|
+
if self.all_compliance_items:
|
|
1049
|
+
# Use the first compliance item to get account info
|
|
1050
|
+
first_item = self.all_compliance_items[0]
|
|
1051
|
+
asset = self.create_asset_from_compliance_item(first_item)
|
|
1052
|
+
if asset:
|
|
1053
|
+
yield asset
|