regscale-cli 6.25.1.0__py3-none-any.whl → 6.27.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/airflow/hierarchy.py +2 -2
- regscale/core/app/application.py +19 -4
- regscale/core/app/internal/evidence.py +419 -2
- regscale/core/app/internal/login.py +0 -1
- regscale/core/app/utils/catalog_utils/common.py +1 -1
- regscale/dev/code_gen.py +24 -20
- regscale/integrations/commercial/jira.py +367 -126
- regscale/integrations/commercial/qualys/__init__.py +7 -8
- regscale/integrations/commercial/qualys/scanner.py +8 -3
- regscale/integrations/commercial/sicura/api.py +14 -13
- regscale/integrations/commercial/sicura/commands.py +8 -2
- regscale/integrations/commercial/sicura/scanner.py +49 -39
- regscale/integrations/commercial/stigv2/ckl_parser.py +5 -5
- regscale/integrations/commercial/synqly/assets.py +17 -0
- regscale/integrations/commercial/synqly/vulnerabilities.py +45 -28
- regscale/integrations/commercial/tenablev2/cis_parsers.py +453 -0
- regscale/integrations/commercial/tenablev2/cis_scanner.py +447 -0
- regscale/integrations/commercial/tenablev2/commands.py +142 -1
- regscale/integrations/commercial/tenablev2/scanner.py +0 -1
- regscale/integrations/commercial/tenablev2/stig_parsers.py +113 -57
- regscale/integrations/commercial/wizv2/WizDataMixin.py +1 -1
- regscale/integrations/commercial/wizv2/click.py +64 -79
- regscale/integrations/commercial/wizv2/compliance/__init__.py +15 -0
- regscale/integrations/commercial/wizv2/{policy_compliance_helpers.py → compliance/helpers.py} +78 -60
- regscale/integrations/commercial/wizv2/compliance_report.py +161 -165
- regscale/integrations/commercial/wizv2/core/__init__.py +133 -0
- regscale/integrations/commercial/wizv2/{async_client.py → core/client.py} +3 -3
- regscale/integrations/commercial/wizv2/{constants.py → core/constants.py} +1 -17
- regscale/integrations/commercial/wizv2/core/file_operations.py +237 -0
- regscale/integrations/commercial/wizv2/fetchers/__init__.py +11 -0
- regscale/integrations/commercial/wizv2/{data_fetcher.py → fetchers/policy_assessment.py} +5 -9
- regscale/integrations/commercial/wizv2/issue.py +1 -1
- regscale/integrations/commercial/wizv2/models/__init__.py +0 -0
- regscale/integrations/commercial/wizv2/parsers/__init__.py +34 -0
- regscale/integrations/commercial/wizv2/{parsers.py → parsers/main.py} +1 -1
- regscale/integrations/commercial/wizv2/processors/__init__.py +11 -0
- regscale/integrations/commercial/wizv2/{finding_processor.py → processors/finding.py} +1 -1
- regscale/integrations/commercial/wizv2/reports.py +1 -1
- regscale/integrations/commercial/wizv2/sbom.py +1 -1
- regscale/integrations/commercial/wizv2/scanner.py +39 -99
- regscale/integrations/commercial/wizv2/utils/__init__.py +48 -0
- regscale/integrations/commercial/wizv2/{utils.py → utils/main.py} +116 -61
- regscale/integrations/commercial/wizv2/variables.py +89 -3
- regscale/integrations/compliance_integration.py +60 -41
- regscale/integrations/control_matcher.py +377 -0
- regscale/integrations/due_date_handler.py +14 -8
- regscale/integrations/milestone_manager.py +291 -0
- regscale/integrations/public/__init__.py +1 -0
- regscale/integrations/public/cci_importer.py +37 -38
- regscale/integrations/public/fedramp/click.py +60 -2
- regscale/integrations/public/fedramp/docx_parser.py +10 -1
- regscale/integrations/public/fedramp/fedramp_cis_crm.py +393 -340
- regscale/integrations/public/fedramp/fedramp_five.py +1 -1
- regscale/integrations/public/fedramp/poam_export_v5.py +888 -0
- regscale/integrations/scanner_integration.py +277 -153
- regscale/models/integration_models/cisa_kev_data.json +282 -9
- regscale/models/integration_models/nexpose.py +36 -10
- regscale/models/integration_models/qualys.py +3 -4
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/synqly_models/connectors/vulnerabilities.py +24 -7
- regscale/models/integration_models/synqly_models/synqly_model.py +8 -1
- regscale/models/locking.py +12 -8
- regscale/models/platform.py +1 -2
- regscale/models/regscale_models/control_implementation.py +47 -22
- regscale/models/regscale_models/issue.py +256 -95
- regscale/models/regscale_models/milestone.py +1 -1
- regscale/models/regscale_models/regscale_model.py +6 -1
- regscale/templates/__init__.py +0 -0
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/METADATA +1 -17
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/RECORD +145 -65
- tests/regscale/integrations/commercial/__init__.py +0 -0
- tests/regscale/integrations/commercial/conftest.py +28 -0
- tests/regscale/integrations/commercial/microsoft_defender/__init__.py +1 -0
- tests/regscale/integrations/commercial/microsoft_defender/test_defender.py +1517 -0
- tests/regscale/integrations/commercial/microsoft_defender/test_defender_api.py +1748 -0
- tests/regscale/integrations/commercial/microsoft_defender/test_defender_constants.py +327 -0
- tests/regscale/integrations/commercial/microsoft_defender/test_defender_scanner.py +487 -0
- tests/regscale/integrations/commercial/test_aws.py +3731 -0
- tests/regscale/integrations/commercial/test_burp.py +48 -0
- tests/regscale/integrations/commercial/test_crowdstrike.py +49 -0
- tests/regscale/integrations/commercial/test_dependabot.py +341 -0
- tests/regscale/integrations/commercial/test_gcp.py +1543 -0
- tests/regscale/integrations/commercial/test_gitlab.py +549 -0
- tests/regscale/integrations/commercial/test_ip_mac_address_length.py +84 -0
- tests/regscale/integrations/commercial/test_jira.py +2204 -0
- tests/regscale/integrations/commercial/test_npm_audit.py +42 -0
- tests/regscale/integrations/commercial/test_okta.py +1228 -0
- tests/regscale/integrations/commercial/test_sarif_converter.py +251 -0
- tests/regscale/integrations/commercial/test_sicura.py +350 -0
- tests/regscale/integrations/commercial/test_snow.py +423 -0
- tests/regscale/integrations/commercial/test_sonarcloud.py +394 -0
- tests/regscale/integrations/commercial/test_sqlserver.py +186 -0
- tests/regscale/integrations/commercial/test_stig.py +33 -0
- tests/regscale/integrations/commercial/test_stig_mapper.py +153 -0
- tests/regscale/integrations/commercial/test_stigv2.py +406 -0
- tests/regscale/integrations/commercial/test_wiz.py +1365 -0
- tests/regscale/integrations/commercial/test_wiz_inventory.py +256 -0
- tests/regscale/integrations/commercial/wizv2/__init__.py +339 -0
- tests/regscale/integrations/commercial/wizv2/compliance/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/compliance/test_helpers.py +903 -0
- tests/regscale/integrations/commercial/wizv2/core/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/core/test_auth.py +701 -0
- tests/regscale/integrations/commercial/wizv2/core/test_client.py +1037 -0
- tests/regscale/integrations/commercial/wizv2/core/test_file_operations.py +989 -0
- tests/regscale/integrations/commercial/wizv2/fetchers/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/fetchers/test_policy_assessment.py +805 -0
- tests/regscale/integrations/commercial/wizv2/parsers/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/parsers/test_main.py +1153 -0
- tests/regscale/integrations/commercial/wizv2/processors/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/processors/test_finding.py +671 -0
- tests/regscale/integrations/commercial/wizv2/test_WizDataMixin.py +537 -0
- tests/regscale/integrations/commercial/wizv2/test_click_comprehensive.py +851 -0
- tests/regscale/integrations/commercial/wizv2/test_compliance_report_comprehensive.py +910 -0
- tests/regscale/integrations/commercial/wizv2/test_compliance_report_normalization.py +138 -0
- tests/regscale/integrations/commercial/wizv2/test_file_cleanup.py +283 -0
- tests/regscale/integrations/commercial/wizv2/test_file_operations.py +260 -0
- tests/regscale/integrations/commercial/wizv2/test_issue.py +343 -0
- tests/regscale/integrations/commercial/wizv2/test_issue_comprehensive.py +1203 -0
- tests/regscale/integrations/commercial/wizv2/test_reports.py +497 -0
- tests/regscale/integrations/commercial/wizv2/test_sbom.py +643 -0
- tests/regscale/integrations/commercial/wizv2/test_scanner_comprehensive.py +805 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_click_client_id.py +165 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_compliance_report.py +1394 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_compliance_unit.py +341 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_control_normalization.py +138 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_findings_comprehensive.py +364 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_inventory_comprehensive.py +644 -0
- tests/regscale/integrations/commercial/wizv2/test_wiz_status_mapping.py +149 -0
- tests/regscale/integrations/commercial/wizv2/test_wizv2.py +1132 -0
- tests/regscale/integrations/commercial/wizv2/test_wizv2_utils.py +519 -0
- tests/regscale/integrations/commercial/wizv2/utils/__init__.py +1 -0
- tests/regscale/integrations/commercial/wizv2/utils/test_main.py +1523 -0
- tests/regscale/integrations/public/fedramp/__init__.py +1 -0
- tests/regscale/integrations/public/fedramp/test_poam_export_v5.py +1293 -0
- tests/regscale/integrations/public/test_fedramp.py +301 -0
- tests/regscale/integrations/test_control_matcher.py +1397 -0
- tests/regscale/integrations/test_control_matching.py +155 -0
- tests/regscale/integrations/test_milestone_manager.py +408 -0
- tests/regscale/models/test_issue.py +378 -1
- regscale/integrations/commercial/wizv2/policy_compliance.py +0 -3543
- /regscale/integrations/commercial/wizv2/{wiz_auth.py → core/auth.py} +0 -0
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.25.1.0.dist-info → regscale_cli-6.27.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,3543 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""Wiz Policy Compliance Integration for RegScale CLI."""
|
|
4
|
-
|
|
5
|
-
import json
|
|
6
|
-
import logging
|
|
7
|
-
import os
|
|
8
|
-
import re
|
|
9
|
-
from datetime import datetime
|
|
10
|
-
from typing import Dict, List, Optional, Iterator, Any
|
|
11
|
-
|
|
12
|
-
from regscale.core.app.application import Application
|
|
13
|
-
from regscale.core.app.utils.app_utils import error_and_exit, check_license, get_current_datetime
|
|
14
|
-
from regscale.integrations.commercial.wizv2.async_client import run_async_queries
|
|
15
|
-
from regscale.integrations.commercial.wizv2.constants import (
|
|
16
|
-
WizVulnerabilityType,
|
|
17
|
-
WIZ_POLICY_QUERY,
|
|
18
|
-
WIZ_FRAMEWORK_QUERY,
|
|
19
|
-
FRAMEWORK_MAPPINGS,
|
|
20
|
-
FRAMEWORK_SHORTCUTS,
|
|
21
|
-
FRAMEWORK_CATEGORIES,
|
|
22
|
-
)
|
|
23
|
-
from regscale.integrations.commercial.wizv2.data_fetcher import PolicyAssessmentFetcher
|
|
24
|
-
from regscale.integrations.commercial.wizv2.finding_processor import (
|
|
25
|
-
FindingConsolidator,
|
|
26
|
-
FindingToIssueProcessor,
|
|
27
|
-
)
|
|
28
|
-
from regscale.integrations.commercial.wizv2.policy_compliance_helpers import (
|
|
29
|
-
ControlImplementationCache,
|
|
30
|
-
AssetConsolidator,
|
|
31
|
-
IssueFieldSetter,
|
|
32
|
-
ControlAssessmentProcessor,
|
|
33
|
-
)
|
|
34
|
-
from regscale.integrations.commercial.wizv2.wiz_auth import wiz_authenticate
|
|
35
|
-
from regscale.integrations.compliance_integration import ComplianceIntegration, ComplianceItem
|
|
36
|
-
from regscale.integrations.scanner_integration import (
|
|
37
|
-
ScannerIntegrationType,
|
|
38
|
-
IntegrationAsset,
|
|
39
|
-
IntegrationFinding,
|
|
40
|
-
)
|
|
41
|
-
from regscale.integrations.variables import ScannerVariables
|
|
42
|
-
from regscale.models import regscale_models
|
|
43
|
-
|
|
44
|
-
logger = logging.getLogger("regscale")
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
# Constants for file operations
|
|
48
|
-
JSON_FILE_EXT = ".json"
|
|
49
|
-
JSONL_FILE_EXT = ".jsonl"
|
|
50
|
-
MAX_DISPLAY_ASSETS = 10 # Maximum number of asset names to display in descriptions
|
|
51
|
-
CACHE_CLEANUP_KEEP_COUNT = 5 # Number of recent cache files to keep during cleanup
|
|
52
|
-
WIZ_URL = "https://api.wiz.io/graphql"
|
|
53
|
-
|
|
54
|
-
# Safer, linear-time regex for control-id normalization.
|
|
55
|
-
# Examples supported: 'AC-4', 'AC-4(2)', 'AC-4 (2)', 'AC-4-2', 'AC-4 2'
|
|
56
|
-
# This avoids ambiguous nested optional whitespace with alternation that can
|
|
57
|
-
# trigger excessive backtracking. Each branch starts with a distinct token
|
|
58
|
-
# ('(', '-' or whitespace), so the engine proceeds deterministically.
|
|
59
|
-
SAFE_CONTROL_ID_RE = re.compile( # NOSONAR
|
|
60
|
-
r"^([A-Za-z]{2}-\d+)(?:\s*\(\s*(\d+)\s*\)|-\s*(\d+)|\s+(\d+))?$", # NOSONAR
|
|
61
|
-
re.IGNORECASE, # NOSONAR
|
|
62
|
-
) # NOSONAR
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
class WizComplianceItem(ComplianceItem):
|
|
66
|
-
"""Wiz implementation of ComplianceItem."""
|
|
67
|
-
|
|
68
|
-
def __init__(
|
|
69
|
-
self,
|
|
70
|
-
raw_data: Dict[str, Any],
|
|
71
|
-
integration: Optional["WizPolicyComplianceIntegration"] = None,
|
|
72
|
-
specific_control_id: Optional[str] = None,
|
|
73
|
-
):
|
|
74
|
-
"""
|
|
75
|
-
Initialize WizComplianceItem from raw GraphQL response.
|
|
76
|
-
|
|
77
|
-
:param Dict[str, Any] raw_data: Raw policy assessment data from Wiz
|
|
78
|
-
:param Optional['WizPolicyComplianceIntegration'] integration: Integration instance for framework mapping
|
|
79
|
-
:param Optional[str] specific_control_id: Specific control ID to use (for multi-control policies)
|
|
80
|
-
"""
|
|
81
|
-
self.id = raw_data.get("id", "")
|
|
82
|
-
self.result = raw_data.get("result", "")
|
|
83
|
-
self.policy = raw_data.get("policy", {})
|
|
84
|
-
self.resource = raw_data.get("resource", {})
|
|
85
|
-
self.output = raw_data.get("output", {})
|
|
86
|
-
self._integration = integration
|
|
87
|
-
self._specific_control_id = specific_control_id
|
|
88
|
-
|
|
89
|
-
def _get_filtered_subcategories(self) -> List[Dict[str, Any]]:
|
|
90
|
-
"""
|
|
91
|
-
Return only subcategories that belong to the selected framework.
|
|
92
|
-
|
|
93
|
-
If no integration or framework filter is available, return all.
|
|
94
|
-
|
|
95
|
-
:return: List of filtered security subcategories
|
|
96
|
-
:rtype: List[Dict[str, Any]]
|
|
97
|
-
"""
|
|
98
|
-
subcategories = self.policy.get("securitySubCategories", []) if self.policy else []
|
|
99
|
-
if not subcategories or not self._integration or not getattr(self._integration, "framework_id", None):
|
|
100
|
-
return subcategories
|
|
101
|
-
|
|
102
|
-
target_framework_id = self._integration.framework_id
|
|
103
|
-
filtered = [
|
|
104
|
-
sc for sc in subcategories if sc.get("category", {}).get("framework", {}).get("id") == target_framework_id
|
|
105
|
-
]
|
|
106
|
-
# Return filtered results - if empty, the control_id will be empty (framework filtering working as intended)
|
|
107
|
-
return filtered
|
|
108
|
-
|
|
109
|
-
@property
|
|
110
|
-
def resource_id(self) -> str:
|
|
111
|
-
"""Unique identifier for the resource being assessed."""
|
|
112
|
-
return self.resource.get("id", "")
|
|
113
|
-
|
|
114
|
-
@property
|
|
115
|
-
def resource_name(self) -> str:
|
|
116
|
-
"""Human-readable name of the resource."""
|
|
117
|
-
return self.resource.get("name", "")
|
|
118
|
-
|
|
119
|
-
@property
|
|
120
|
-
def provider_unique_id(self) -> str:
|
|
121
|
-
"""Provider unique ID (e.g., ARN for AWS resources) for meaningful asset identification."""
|
|
122
|
-
return self.resource.get("providerUniqueId", "")
|
|
123
|
-
|
|
124
|
-
@property
|
|
125
|
-
def control_id(self) -> str:
|
|
126
|
-
"""Control identifier (e.g., AC-3, SI-2)."""
|
|
127
|
-
# If a specific control ID was provided (for multi-control policies), use it
|
|
128
|
-
if self._specific_control_id:
|
|
129
|
-
return self._specific_control_id
|
|
130
|
-
|
|
131
|
-
if not self.policy:
|
|
132
|
-
return ""
|
|
133
|
-
|
|
134
|
-
subcategories = self._get_filtered_subcategories()
|
|
135
|
-
if subcategories:
|
|
136
|
-
return subcategories[0].get("externalId", "").strip()
|
|
137
|
-
return ""
|
|
138
|
-
|
|
139
|
-
@property
|
|
140
|
-
def compliance_result(self) -> str:
|
|
141
|
-
"""Result of compliance check (PASS, FAIL, etc)."""
|
|
142
|
-
return self.result
|
|
143
|
-
|
|
144
|
-
@property
|
|
145
|
-
def severity(self) -> Optional[str]:
|
|
146
|
-
"""Severity level of the compliance violation (if failed)."""
|
|
147
|
-
return self.policy.get("severity")
|
|
148
|
-
|
|
149
|
-
@property
|
|
150
|
-
def description(self) -> str:
|
|
151
|
-
"""Description of the compliance check."""
|
|
152
|
-
desc = self.policy.get("description") or self.policy.get("ruleDescription", "")
|
|
153
|
-
if not desc:
|
|
154
|
-
desc = f"Compliance check for {self.policy.get('name', 'unknown policy')}"
|
|
155
|
-
return desc
|
|
156
|
-
|
|
157
|
-
@property
|
|
158
|
-
def framework(self) -> str:
|
|
159
|
-
"""Compliance framework (e.g., NIST800-53R5, CSF)."""
|
|
160
|
-
if not self.policy:
|
|
161
|
-
return ""
|
|
162
|
-
|
|
163
|
-
subcategories = self._get_filtered_subcategories()
|
|
164
|
-
if subcategories:
|
|
165
|
-
category = subcategories[0].get("category", {})
|
|
166
|
-
framework = category.get("framework", {})
|
|
167
|
-
framework_id = framework.get("id", "")
|
|
168
|
-
|
|
169
|
-
# Prefer integration mapping using the actual framework id from the item
|
|
170
|
-
if self._integration and framework_id:
|
|
171
|
-
return self._integration.get_framework_name(framework_id)
|
|
172
|
-
|
|
173
|
-
return framework.get("name", "")
|
|
174
|
-
return ""
|
|
175
|
-
|
|
176
|
-
@property
|
|
177
|
-
def framework_id(self) -> Optional[str]:
|
|
178
|
-
"""Extract framework ID."""
|
|
179
|
-
if not self.policy:
|
|
180
|
-
return None
|
|
181
|
-
|
|
182
|
-
subcategories = self._get_filtered_subcategories()
|
|
183
|
-
if subcategories:
|
|
184
|
-
category = subcategories[0].get("category", {})
|
|
185
|
-
framework = category.get("framework", {})
|
|
186
|
-
return framework.get("id")
|
|
187
|
-
return None
|
|
188
|
-
|
|
189
|
-
@property
|
|
190
|
-
def is_pass(self) -> bool:
|
|
191
|
-
"""Check if assessment result is PASS."""
|
|
192
|
-
return self.result == "PASS"
|
|
193
|
-
|
|
194
|
-
@property
|
|
195
|
-
def is_fail(self) -> bool:
|
|
196
|
-
"""Check if assessment result is FAIL."""
|
|
197
|
-
return self.result == "FAIL"
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
201
|
-
"""
|
|
202
|
-
Wiz Policy Compliance Integration for syncing policy assessments from Wiz to RegScale.
|
|
203
|
-
|
|
204
|
-
This integration fetches policy assessment data from Wiz, processes the results,
|
|
205
|
-
and creates control assessments in RegScale based on compliance status.
|
|
206
|
-
"""
|
|
207
|
-
|
|
208
|
-
title = "Wiz Policy Compliance Integration"
|
|
209
|
-
type = ScannerIntegrationType.CONTROL_TEST
|
|
210
|
-
# Use wizId field for asset identification (matches other Wiz integrations)
|
|
211
|
-
asset_identifier_field = "wizId"
|
|
212
|
-
issue_identifier_field = "wizId"
|
|
213
|
-
|
|
214
|
-
# Do not create assets - they come from separate inventory import
|
|
215
|
-
options_map_assets_to_components: bool = False
|
|
216
|
-
# Do not create vulnerabilities from compliance policy results
|
|
217
|
-
create_vulnerabilities: bool = False
|
|
218
|
-
# Do not create scan history - this is compliance report ingest, not a vulnerability scan
|
|
219
|
-
enable_scan_history: bool = False
|
|
220
|
-
|
|
221
|
-
# Control whether JSONL control-centric export is written alongside JSON
|
|
222
|
-
write_jsonl_output: bool = False
|
|
223
|
-
|
|
224
|
-
def __init__(
|
|
225
|
-
self,
|
|
226
|
-
plan_id: int,
|
|
227
|
-
wiz_project_id: str,
|
|
228
|
-
client_id: str,
|
|
229
|
-
client_secret: str,
|
|
230
|
-
framework_id: str = "wf-id-4", # Default to NIST SP 800-53 Revision 5
|
|
231
|
-
catalog_id: Optional[int] = None,
|
|
232
|
-
tenant_id: int = 1,
|
|
233
|
-
create_issues: bool = True,
|
|
234
|
-
update_control_status: bool = True,
|
|
235
|
-
create_poams: bool = False,
|
|
236
|
-
regscale_module: Optional[str] = "securityplans",
|
|
237
|
-
**kwargs,
|
|
238
|
-
):
|
|
239
|
-
"""
|
|
240
|
-
Initialize the Wiz Policy Compliance Integration.
|
|
241
|
-
|
|
242
|
-
:param int plan_id: RegScale Security Plan ID
|
|
243
|
-
:param str wiz_project_id: Wiz Project ID to query
|
|
244
|
-
:param str client_id: Wiz API client ID
|
|
245
|
-
:param str client_secret: Wiz API client secret
|
|
246
|
-
:param str framework_id: Wiz framework ID to filter by (default: wf-id-4)
|
|
247
|
-
:param Optional[int] catalog_id: RegScale catalog ID
|
|
248
|
-
:param int tenant_id: RegScale tenant ID
|
|
249
|
-
:param bool create_issues: Whether to create issues for failed compliance
|
|
250
|
-
:param bool update_control_status: Whether to update control implementation status
|
|
251
|
-
:param bool create_poams: Whether to mark issues as POAMs
|
|
252
|
-
:param Optional[str] regscale_module: RegScale module string (overrides default parent_module)
|
|
253
|
-
"""
|
|
254
|
-
super().__init__(
|
|
255
|
-
plan_id=plan_id,
|
|
256
|
-
parent_module=regscale_module,
|
|
257
|
-
catalog_id=catalog_id,
|
|
258
|
-
framework=self._map_framework_id_to_name(framework_id),
|
|
259
|
-
create_issues=create_issues,
|
|
260
|
-
update_control_status=update_control_status,
|
|
261
|
-
create_poams=create_poams,
|
|
262
|
-
tenant_id=tenant_id,
|
|
263
|
-
**kwargs,
|
|
264
|
-
)
|
|
265
|
-
|
|
266
|
-
# Override parent_module if regscale_module is provided
|
|
267
|
-
if regscale_module:
|
|
268
|
-
self.parent_module = regscale_module
|
|
269
|
-
|
|
270
|
-
self.wiz_project_id = wiz_project_id
|
|
271
|
-
self.client_id = client_id
|
|
272
|
-
self.client_secret = client_secret
|
|
273
|
-
self.framework_id = framework_id
|
|
274
|
-
self.wiz_endpoint = ""
|
|
275
|
-
self.access_token = ""
|
|
276
|
-
self.framework_mapping: Dict[str, str] = {}
|
|
277
|
-
self.framework_cache_file = os.path.join("artifacts", "wiz", "framework_mapping.json")
|
|
278
|
-
self.raw_policy_assessments: List[Dict[str, Any]] = []
|
|
279
|
-
|
|
280
|
-
# Caching configuration for policy assessments
|
|
281
|
-
# Default: disabled for tests; CLI enables via --cache-duration
|
|
282
|
-
self.cache_duration_minutes: int = int(kwargs.get("cache_duration_minutes", 0))
|
|
283
|
-
self.force_refresh: bool = bool(kwargs.get("force_refresh", False))
|
|
284
|
-
self.policy_cache_dir: str = os.path.join("artifacts", "wiz")
|
|
285
|
-
self.policy_cache_file: str = os.path.join(
|
|
286
|
-
self.policy_cache_dir, f"policy_assessments_{wiz_project_id}_{framework_id}.json"
|
|
287
|
-
)
|
|
288
|
-
|
|
289
|
-
# Initialize helper classes for cleaner code organization
|
|
290
|
-
self._control_cache = ControlImplementationCache()
|
|
291
|
-
self._asset_consolidator = AssetConsolidator()
|
|
292
|
-
self._issue_field_setter = IssueFieldSetter(self._control_cache, plan_id, regscale_module or "securityplans")
|
|
293
|
-
self._finding_consolidator = FindingConsolidator(self)
|
|
294
|
-
self._finding_processor = FindingToIssueProcessor(self)
|
|
295
|
-
self._assessment_processor = ControlAssessmentProcessor(
|
|
296
|
-
plan_id,
|
|
297
|
-
regscale_module or "securityplans",
|
|
298
|
-
self.scan_date,
|
|
299
|
-
self.title,
|
|
300
|
-
self._map_framework_id_to_name(framework_id),
|
|
301
|
-
)
|
|
302
|
-
|
|
303
|
-
# Configure strict control failure threshold for Wiz project-scoped assessments
|
|
304
|
-
# Since Wiz filters to project resources, use 0% failure tolerance
|
|
305
|
-
self.control_failure_threshold = 0.0
|
|
306
|
-
|
|
307
|
-
def fetch_compliance_data(self) -> List[Any]:
|
|
308
|
-
"""
|
|
309
|
-
Fetch compliance data from Wiz GraphQL API and filter to framework-specific
|
|
310
|
-
items for existing assets only.
|
|
311
|
-
|
|
312
|
-
:return: List of filtered raw compliance data
|
|
313
|
-
:rtype: List[Any]
|
|
314
|
-
"""
|
|
315
|
-
# Authenticate if not already done
|
|
316
|
-
if not self.access_token:
|
|
317
|
-
self.authenticate_wiz()
|
|
318
|
-
|
|
319
|
-
# Load existing assets early for filtering
|
|
320
|
-
self._load_regscale_assets()
|
|
321
|
-
|
|
322
|
-
# Use the data fetcher for cleaner code
|
|
323
|
-
fetcher = PolicyAssessmentFetcher(
|
|
324
|
-
wiz_endpoint=self.wiz_endpoint or WIZ_URL,
|
|
325
|
-
access_token=self.access_token,
|
|
326
|
-
wiz_project_id=self.wiz_project_id,
|
|
327
|
-
framework_id=self.framework_id,
|
|
328
|
-
cache_duration_minutes=self.cache_duration_minutes,
|
|
329
|
-
)
|
|
330
|
-
|
|
331
|
-
all_policy_assessments = fetcher.fetch_policy_assessments()
|
|
332
|
-
|
|
333
|
-
if not all_policy_assessments:
|
|
334
|
-
logger.info("No policy assessments fetched from Wiz")
|
|
335
|
-
self.raw_policy_assessments = []
|
|
336
|
-
return []
|
|
337
|
-
|
|
338
|
-
# Filter to only items with existing assets in RegScale
|
|
339
|
-
filtered_assessments = self._filter_assessments_to_existing_assets(all_policy_assessments)
|
|
340
|
-
|
|
341
|
-
self.raw_policy_assessments = filtered_assessments
|
|
342
|
-
return filtered_assessments
|
|
343
|
-
|
|
344
|
-
def _filter_assessments_to_existing_assets(self, assessments: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
345
|
-
"""
|
|
346
|
-
Filter assessments to include items with control IDs and existing assets.
|
|
347
|
-
|
|
348
|
-
For compliance reporting, PASS controls are always included even without assets
|
|
349
|
-
to ensure complete compliance documentation.
|
|
350
|
-
|
|
351
|
-
:param assessments: List of raw assessments from Wiz
|
|
352
|
-
:return: Filtered list of assessments
|
|
353
|
-
"""
|
|
354
|
-
assets_exist = getattr(self, "_regscale_assets_by_wiz_id", {})
|
|
355
|
-
filtered_assessments = []
|
|
356
|
-
skipped_no_control = 0
|
|
357
|
-
skipped_no_asset = 0
|
|
358
|
-
|
|
359
|
-
for assessment in assessments:
|
|
360
|
-
# Convert to compliance item to check framework and asset existence
|
|
361
|
-
temp_item = WizComplianceItem(assessment, self)
|
|
362
|
-
|
|
363
|
-
# Skip if no control ID (not in selected framework)
|
|
364
|
-
if not temp_item.control_id:
|
|
365
|
-
skipped_no_control += 1
|
|
366
|
-
continue
|
|
367
|
-
|
|
368
|
-
# For PASS controls, allow through even without existing assets for compliance documentation
|
|
369
|
-
is_pass = temp_item.compliance_result in self.PASS_STATUSES
|
|
370
|
-
|
|
371
|
-
# Skip if asset doesn't exist in RegScale UNLESS it's a PASS control
|
|
372
|
-
if temp_item.resource_id not in assets_exist:
|
|
373
|
-
if not is_pass:
|
|
374
|
-
skipped_no_asset += 1
|
|
375
|
-
continue
|
|
376
|
-
# PASS control without asset - allow through for compliance documentation
|
|
377
|
-
|
|
378
|
-
filtered_assessments.append(assessment)
|
|
379
|
-
logger.debug(f"Skipped {skipped_no_control} assessments with no control ID for framework.")
|
|
380
|
-
logger.debug(
|
|
381
|
-
f"Skipped {skipped_no_asset} assessments with no existing asset in RegScale (PASS controls allowed)."
|
|
382
|
-
)
|
|
383
|
-
return filtered_assessments
|
|
384
|
-
|
|
385
|
-
def create_compliance_item(self, raw_data: Any) -> ComplianceItem:
|
|
386
|
-
"""
|
|
387
|
-
Create a ComplianceItem from raw compliance data.
|
|
388
|
-
|
|
389
|
-
Note: This creates a single item for the first control ID only.
|
|
390
|
-
Use create_all_compliance_items() to get all control mappings.
|
|
391
|
-
|
|
392
|
-
:param Any raw_data: Raw compliance data from Wiz
|
|
393
|
-
:return: ComplianceItem instance
|
|
394
|
-
:rtype: ComplianceItem
|
|
395
|
-
"""
|
|
396
|
-
return WizComplianceItem(raw_data, self)
|
|
397
|
-
|
|
398
|
-
def create_all_compliance_items(self, raw_data: Any) -> List[ComplianceItem]:
|
|
399
|
-
"""
|
|
400
|
-
Create all ComplianceItems from raw compliance data.
|
|
401
|
-
|
|
402
|
-
This handles Wiz policies that map to multiple controls by creating
|
|
403
|
-
a separate ComplianceItem for each control ID.
|
|
404
|
-
|
|
405
|
-
:param Any raw_data: Raw compliance data from Wiz
|
|
406
|
-
:return: List of ComplianceItem instances (one per control)
|
|
407
|
-
:rtype: List[ComplianceItem]
|
|
408
|
-
"""
|
|
409
|
-
# First get all control IDs this policy maps to
|
|
410
|
-
temp_item = WizComplianceItem(raw_data, self)
|
|
411
|
-
all_control_ids = self._get_all_control_ids_for_compliance_item(temp_item)
|
|
412
|
-
|
|
413
|
-
if not all_control_ids:
|
|
414
|
-
# No control IDs found, return single item with default behavior
|
|
415
|
-
return [temp_item]
|
|
416
|
-
|
|
417
|
-
# Create one compliance item per control ID
|
|
418
|
-
compliance_items = []
|
|
419
|
-
for control_id in all_control_ids:
|
|
420
|
-
compliance_items.append(WizComplianceItem(raw_data, self, specific_control_id=control_id))
|
|
421
|
-
|
|
422
|
-
return compliance_items
|
|
423
|
-
|
|
424
|
-
def process_compliance_data(self) -> None:
|
|
425
|
-
"""
|
|
426
|
-
Override base class to handle multi-control Wiz policies.
|
|
427
|
-
|
|
428
|
-
Creates separate compliance items for each control ID that a policy maps to.
|
|
429
|
-
"""
|
|
430
|
-
logger.info("Processing compliance data with multi-control support...")
|
|
431
|
-
|
|
432
|
-
# Reset state to avoid double counting on repeated calls
|
|
433
|
-
self._reset_compliance_state()
|
|
434
|
-
|
|
435
|
-
# Build allowed control IDs from plan/catalog controls to restrict scope
|
|
436
|
-
allowed_controls_normalized = self._build_allowed_controls_set()
|
|
437
|
-
|
|
438
|
-
# Fetch and process raw compliance data
|
|
439
|
-
raw_compliance_data = self.fetch_compliance_data()
|
|
440
|
-
total_policies_processed, total_compliance_items_created = self._process_raw_compliance_data(
|
|
441
|
-
raw_compliance_data, allowed_controls_normalized
|
|
442
|
-
)
|
|
443
|
-
|
|
444
|
-
# Perform control-level categorization based on aggregated results
|
|
445
|
-
self._categorize_controls_by_aggregation()
|
|
446
|
-
|
|
447
|
-
self._log_processing_summary(total_policies_processed, total_compliance_items_created)
|
|
448
|
-
|
|
449
|
-
def _reset_compliance_state(self) -> None:
|
|
450
|
-
"""Reset state to avoid double counting on repeated calls."""
|
|
451
|
-
self.all_compliance_items = []
|
|
452
|
-
self.failed_compliance_items = []
|
|
453
|
-
self.passing_controls = {}
|
|
454
|
-
self.failing_controls = {}
|
|
455
|
-
self.asset_compliance_map.clear()
|
|
456
|
-
|
|
457
|
-
def _build_allowed_controls_set(self) -> set[str]:
|
|
458
|
-
"""Build allowed control IDs from plan/catalog controls to restrict scope."""
|
|
459
|
-
allowed_controls_normalized: set[str] = set()
|
|
460
|
-
try:
|
|
461
|
-
controls = self._get_controls()
|
|
462
|
-
for ctl in controls:
|
|
463
|
-
cid = (ctl.get("controlId") or "").strip()
|
|
464
|
-
if not cid:
|
|
465
|
-
continue
|
|
466
|
-
base, sub = self._normalize_control_id(cid)
|
|
467
|
-
normalized = f"{base}({sub})" if sub else base
|
|
468
|
-
allowed_controls_normalized.add(normalized)
|
|
469
|
-
except Exception:
|
|
470
|
-
# If controls cannot be loaded, proceed without additional filtering
|
|
471
|
-
allowed_controls_normalized = set()
|
|
472
|
-
return allowed_controls_normalized
|
|
473
|
-
|
|
474
|
-
def _process_raw_compliance_data(
|
|
475
|
-
self, raw_compliance_data: List[Any], allowed_controls_normalized: set[str]
|
|
476
|
-
) -> tuple[int, int]:
|
|
477
|
-
"""Process raw compliance data and return counts."""
|
|
478
|
-
total_policies_processed = 0
|
|
479
|
-
total_compliance_items_created = 0
|
|
480
|
-
|
|
481
|
-
for raw_item in raw_compliance_data:
|
|
482
|
-
try:
|
|
483
|
-
total_policies_processed += 1
|
|
484
|
-
compliance_items_for_policy = self.create_all_compliance_items(raw_item)
|
|
485
|
-
|
|
486
|
-
items_created_for_policy = self._process_compliance_items_for_policy(
|
|
487
|
-
compliance_items_for_policy, allowed_controls_normalized
|
|
488
|
-
)
|
|
489
|
-
total_compliance_items_created += items_created_for_policy
|
|
490
|
-
|
|
491
|
-
except Exception as e:
|
|
492
|
-
logger.error(f"Error processing compliance item: {e}")
|
|
493
|
-
continue
|
|
494
|
-
|
|
495
|
-
return total_policies_processed, total_compliance_items_created
|
|
496
|
-
|
|
497
|
-
def _process_compliance_items_for_policy(
|
|
498
|
-
self, compliance_items_for_policy: List[Any], allowed_controls_normalized: set[str]
|
|
499
|
-
) -> int:
|
|
500
|
-
"""Process compliance items for a single policy and return count of items created."""
|
|
501
|
-
items_created = 0
|
|
502
|
-
|
|
503
|
-
for compliance_item in compliance_items_for_policy:
|
|
504
|
-
if not self._is_valid_compliance_item(compliance_item):
|
|
505
|
-
continue
|
|
506
|
-
|
|
507
|
-
if not self._is_control_in_allowed_set(compliance_item, allowed_controls_normalized):
|
|
508
|
-
continue
|
|
509
|
-
|
|
510
|
-
self._add_compliance_item_to_collections(compliance_item)
|
|
511
|
-
items_created += 1
|
|
512
|
-
|
|
513
|
-
return items_created
|
|
514
|
-
|
|
515
|
-
def _is_valid_compliance_item(self, compliance_item: Any) -> bool:
|
|
516
|
-
"""Check if compliance item has required control_id and resource_id."""
|
|
517
|
-
return getattr(compliance_item, "control_id", "") and getattr(compliance_item, "resource_id", "")
|
|
518
|
-
|
|
519
|
-
def _is_control_in_allowed_set(self, compliance_item: Any, allowed_controls_normalized: set[str]) -> bool:
|
|
520
|
-
"""Check if compliance item's control is in allowed set."""
|
|
521
|
-
if not allowed_controls_normalized:
|
|
522
|
-
return True
|
|
523
|
-
|
|
524
|
-
base, sub = self._normalize_control_id(getattr(compliance_item, "control_id", ""))
|
|
525
|
-
norm_item = f"{base}({sub})" if sub else base
|
|
526
|
-
return norm_item in allowed_controls_normalized
|
|
527
|
-
|
|
528
|
-
def _add_compliance_item_to_collections(self, compliance_item: Any) -> None:
|
|
529
|
-
"""Add compliance item to appropriate collections."""
|
|
530
|
-
self.all_compliance_items.append(compliance_item)
|
|
531
|
-
self.asset_compliance_map[compliance_item.resource_id].append(compliance_item)
|
|
532
|
-
|
|
533
|
-
if compliance_item.compliance_result in self.FAIL_STATUSES:
|
|
534
|
-
self.failed_compliance_items.append(compliance_item)
|
|
535
|
-
|
|
536
|
-
def _log_processing_summary(self, total_policies_processed: int, total_compliance_items_created: int) -> None:
|
|
537
|
-
"""Log processing summary information."""
|
|
538
|
-
logger.info(
|
|
539
|
-
f"Processed {total_policies_processed} Wiz policies into {total_compliance_items_created} compliance items"
|
|
540
|
-
)
|
|
541
|
-
logger.debug(
|
|
542
|
-
f"Compliance breakdown: {len(self.all_compliance_items) - len(self.failed_compliance_items)} passing items, "
|
|
543
|
-
f"{len(self.failed_compliance_items)} failing items"
|
|
544
|
-
)
|
|
545
|
-
logger.info(
|
|
546
|
-
f"Control categorization: {len(self.passing_controls)} passing controls, {len(self.failing_controls)} failing controls"
|
|
547
|
-
)
|
|
548
|
-
|
|
549
|
-
def _map_resource_type_to_asset_type(self, compliance_item: ComplianceItem) -> str:
|
|
550
|
-
"""
|
|
551
|
-
Map Wiz resource type to RegScale asset type.
|
|
552
|
-
|
|
553
|
-
:param ComplianceItem compliance_item: Compliance item
|
|
554
|
-
:return: Asset type string
|
|
555
|
-
:rtype: str
|
|
556
|
-
"""
|
|
557
|
-
if isinstance(compliance_item, WizComplianceItem):
|
|
558
|
-
resource_type = compliance_item.resource.get("type", "").upper()
|
|
559
|
-
|
|
560
|
-
# Minimal mapping expected by tests; default to generic type name
|
|
561
|
-
name_mapping = {
|
|
562
|
-
"VIRTUAL_MACHINE": "Virtual Machine",
|
|
563
|
-
"CONTAINER": "Container",
|
|
564
|
-
"DATABASE": "Database",
|
|
565
|
-
"BUCKET": "Storage",
|
|
566
|
-
}
|
|
567
|
-
if resource_type in name_mapping:
|
|
568
|
-
return name_mapping[resource_type]
|
|
569
|
-
|
|
570
|
-
return "Cloud Resource"
|
|
571
|
-
|
|
572
|
-
def _get_component_name_from_source_type(self, compliance_item: WizComplianceItem) -> str:
|
|
573
|
-
"""
|
|
574
|
-
Build a component name from the original Wiz resource type (source type).
|
|
575
|
-
|
|
576
|
-
Example: "STORAGE_ACCOUNT" -> "Storage Account"
|
|
577
|
-
|
|
578
|
-
:param WizComplianceItem compliance_item: Compliance item containing resource information
|
|
579
|
-
:return: Human-readable component name derived from resource type
|
|
580
|
-
:rtype: str
|
|
581
|
-
"""
|
|
582
|
-
raw_type = (compliance_item.resource or {}).get("type", "Unknown Resource")
|
|
583
|
-
return raw_type.replace("_", " ").title()
|
|
584
|
-
|
|
585
|
-
def fetch_assets(self, *args, **kwargs) -> Iterator[IntegrationAsset]:
|
|
586
|
-
"""
|
|
587
|
-
No assets are created in policy compliance integration.
|
|
588
|
-
Assets come from separate Wiz inventory import.
|
|
589
|
-
"""
|
|
590
|
-
return iter([])
|
|
591
|
-
|
|
592
|
-
def fetch_findings(self, *args, **kwargs) -> Iterator[IntegrationFinding]:
|
|
593
|
-
"""
|
|
594
|
-
Create consolidated findings grouped by control, with all affected resources under each control.
|
|
595
|
-
|
|
596
|
-
This approach groups by control first, then collects all resources that fail that control.
|
|
597
|
-
This results in one finding per control with multiple resources, making consolidation much easier.
|
|
598
|
-
"""
|
|
599
|
-
if not self.failed_compliance_items:
|
|
600
|
-
return
|
|
601
|
-
|
|
602
|
-
# Use the finding consolidator for cleaner code
|
|
603
|
-
yield from self._finding_consolidator.create_consolidated_findings(self.failed_compliance_items)
|
|
604
|
-
|
|
605
|
-
def _get_all_control_ids_for_compliance_item(self, compliance_item: WizComplianceItem) -> List[str]:
|
|
606
|
-
"""
|
|
607
|
-
Get ALL control IDs that a compliance item maps to.
|
|
608
|
-
|
|
609
|
-
Wiz policies can map to multiple controls (e.g., one policy failure might affect
|
|
610
|
-
AC-4(2), AC-4(4), and SC-28(1) controls). This method returns all of them.
|
|
611
|
-
|
|
612
|
-
:param WizComplianceItem compliance_item: Compliance item to extract control IDs from
|
|
613
|
-
:return: List of control IDs this policy maps to
|
|
614
|
-
:rtype: List[str]
|
|
615
|
-
"""
|
|
616
|
-
if not compliance_item.policy:
|
|
617
|
-
return []
|
|
618
|
-
|
|
619
|
-
subcategories = compliance_item._get_filtered_subcategories()
|
|
620
|
-
if not subcategories:
|
|
621
|
-
return []
|
|
622
|
-
|
|
623
|
-
# Extract control IDs and deduplicate in one pass
|
|
624
|
-
unique_control_ids = []
|
|
625
|
-
seen = set()
|
|
626
|
-
|
|
627
|
-
for subcat in subcategories:
|
|
628
|
-
external_id = subcat.get("externalId", "").strip()
|
|
629
|
-
if external_id and external_id not in seen:
|
|
630
|
-
seen.add(external_id)
|
|
631
|
-
unique_control_ids.append(external_id)
|
|
632
|
-
|
|
633
|
-
return unique_control_ids
|
|
634
|
-
|
|
635
|
-
def _group_compliance_items_by_control(self) -> Dict[str, Dict[str, WizComplianceItem]]:
|
|
636
|
-
"""
|
|
637
|
-
Group failed compliance items by control ID.
|
|
638
|
-
|
|
639
|
-
:return: Dictionary mapping control IDs to resource dictionaries
|
|
640
|
-
:rtype: Dict[str, Dict[str, WizComplianceItem]]
|
|
641
|
-
"""
|
|
642
|
-
control_to_resources = {} # {control_id: {resource_id: compliance_item}}
|
|
643
|
-
|
|
644
|
-
for compliance_item in self.failed_compliance_items:
|
|
645
|
-
if not isinstance(compliance_item, WizComplianceItem):
|
|
646
|
-
continue
|
|
647
|
-
|
|
648
|
-
asset_id = (compliance_item.resource_id or "").lower()
|
|
649
|
-
if not asset_id:
|
|
650
|
-
continue
|
|
651
|
-
|
|
652
|
-
# Get ALL control IDs that this policy assessment maps to
|
|
653
|
-
all_control_ids = self._get_all_control_ids_for_compliance_item(compliance_item)
|
|
654
|
-
if not all_control_ids:
|
|
655
|
-
continue
|
|
656
|
-
|
|
657
|
-
# Add this resource to each control it fails
|
|
658
|
-
for control_id in all_control_ids:
|
|
659
|
-
control = control_id.upper()
|
|
660
|
-
|
|
661
|
-
if control not in control_to_resources:
|
|
662
|
-
control_to_resources[control] = {}
|
|
663
|
-
|
|
664
|
-
# Use the first compliance item we find for this resource-control pair
|
|
665
|
-
# (there might be duplicates from multiple policy assessments)
|
|
666
|
-
if asset_id not in control_to_resources[control]:
|
|
667
|
-
control_to_resources[control][asset_id] = compliance_item
|
|
668
|
-
|
|
669
|
-
return control_to_resources
|
|
670
|
-
|
|
671
|
-
def _create_consolidated_findings(
|
|
672
|
-
self, control_to_resources: Dict[str, Dict[str, WizComplianceItem]]
|
|
673
|
-
) -> Iterator[IntegrationFinding]:
|
|
674
|
-
"""
|
|
675
|
-
Create consolidated findings from grouped control-resource mappings.
|
|
676
|
-
|
|
677
|
-
:param Dict[str, Dict[str, WizComplianceItem]] control_to_resources: Control groupings
|
|
678
|
-
:yield: Consolidated findings
|
|
679
|
-
:rtype: Iterator[IntegrationFinding]
|
|
680
|
-
"""
|
|
681
|
-
for control_id, resources in control_to_resources.items():
|
|
682
|
-
# Use the first compliance item as the base for this control's finding
|
|
683
|
-
base_compliance_item = next(iter(resources.values()))
|
|
684
|
-
|
|
685
|
-
# Create a consolidated finding for this control
|
|
686
|
-
finding = self._create_consolidated_finding_for_control(
|
|
687
|
-
control_id=control_id, compliance_item=base_compliance_item, affected_resources=list(resources.keys())
|
|
688
|
-
)
|
|
689
|
-
|
|
690
|
-
if finding:
|
|
691
|
-
yield finding
|
|
692
|
-
|
|
693
|
-
def _create_consolidated_finding_for_control(
|
|
694
|
-
self, control_id: str, compliance_item: WizComplianceItem, affected_resources: List[str]
|
|
695
|
-
) -> Optional[IntegrationFinding]:
|
|
696
|
-
"""
|
|
697
|
-
Create a consolidated finding for a control with all affected resources.
|
|
698
|
-
|
|
699
|
-
:param str control_id: The control ID (e.g., 'AC-4(2)')
|
|
700
|
-
:param WizComplianceItem compliance_item: Base compliance item for this control
|
|
701
|
-
:param List[str] affected_resources: List of Wiz resource IDs that fail this control
|
|
702
|
-
:return: Consolidated finding with all affected resources
|
|
703
|
-
:rtype: Optional[IntegrationFinding]
|
|
704
|
-
"""
|
|
705
|
-
# Filter to only resources that exist as assets in RegScale
|
|
706
|
-
asset_mappings = self._build_asset_mappings(affected_resources)
|
|
707
|
-
|
|
708
|
-
if not asset_mappings:
|
|
709
|
-
return None
|
|
710
|
-
|
|
711
|
-
# Create the base finding using the control-specific approach
|
|
712
|
-
finding = self._create_finding_for_specific_control(compliance_item, control_id)
|
|
713
|
-
if not finding:
|
|
714
|
-
return None
|
|
715
|
-
|
|
716
|
-
# Update the asset identifier and description with consolidated info
|
|
717
|
-
self._update_finding_with_consolidated_assets(finding, asset_mappings)
|
|
718
|
-
return finding
|
|
719
|
-
|
|
720
|
-
def _build_asset_mappings(self, resource_ids: List[str]) -> Dict[str, Dict[str, str]]:
|
|
721
|
-
"""
|
|
722
|
-
Build asset mappings for resources that exist in RegScale.
|
|
723
|
-
|
|
724
|
-
:param List[str] resource_ids: List of Wiz resource IDs
|
|
725
|
-
:return: Mapping of resource IDs to asset information
|
|
726
|
-
:rtype: Dict[str, Dict[str, str]]
|
|
727
|
-
"""
|
|
728
|
-
asset_mappings = {}
|
|
729
|
-
|
|
730
|
-
for resource_id in resource_ids:
|
|
731
|
-
if self._asset_exists_in_regscale(resource_id):
|
|
732
|
-
asset = self.get_asset_by_identifier(resource_id)
|
|
733
|
-
if asset and asset.name:
|
|
734
|
-
asset_mappings[resource_id] = {"name": asset.name, "wiz_id": resource_id}
|
|
735
|
-
else:
|
|
736
|
-
# Fallback to resource ID if asset name not found
|
|
737
|
-
asset_mappings[resource_id] = {"name": resource_id, "wiz_id": resource_id}
|
|
738
|
-
|
|
739
|
-
return asset_mappings
|
|
740
|
-
|
|
741
|
-
def _update_finding_with_consolidated_assets(
|
|
742
|
-
self, finding: IntegrationFinding, asset_mappings: Dict[str, Dict[str, str]]
|
|
743
|
-
) -> None:
|
|
744
|
-
"""
|
|
745
|
-
Update a finding with consolidated asset information.
|
|
746
|
-
|
|
747
|
-
:param IntegrationFinding finding: Finding to update
|
|
748
|
-
:param Dict[str, Dict[str, str]] asset_mappings: Asset mapping information
|
|
749
|
-
:return: None
|
|
750
|
-
:rtype: None
|
|
751
|
-
"""
|
|
752
|
-
# Update the asset identifier to include all asset names (clean format for POAMs)
|
|
753
|
-
consolidated_asset_identifier = self._create_consolidated_asset_identifier(asset_mappings)
|
|
754
|
-
finding.asset_identifier = consolidated_asset_identifier
|
|
755
|
-
|
|
756
|
-
# Update finding description to indicate multiple resources
|
|
757
|
-
asset_names = [info["name"] for info in asset_mappings.values()]
|
|
758
|
-
if len(asset_names) > 1:
|
|
759
|
-
finding.description = f"{finding.description}\n\nThis control failure affects {len(asset_names)} assets: {', '.join(asset_names[:MAX_DISPLAY_ASSETS])}"
|
|
760
|
-
if len(asset_names) > MAX_DISPLAY_ASSETS:
|
|
761
|
-
finding.description += f" (and {len(asset_names) - MAX_DISPLAY_ASSETS} more)"
|
|
762
|
-
|
|
763
|
-
def _create_finding_for_specific_control(
|
|
764
|
-
self, compliance_item: WizComplianceItem, control_id: str
|
|
765
|
-
) -> Optional[IntegrationFinding]:
|
|
766
|
-
"""
|
|
767
|
-
Create a finding for a specific control ID from a compliance item.
|
|
768
|
-
|
|
769
|
-
This is similar to create_finding_from_compliance_item but ensures the finding
|
|
770
|
-
uses the specific control ID rather than just the first one.
|
|
771
|
-
|
|
772
|
-
:param WizComplianceItem compliance_item: Source compliance item
|
|
773
|
-
:param str control_id: Specific control ID to create finding for
|
|
774
|
-
:return: Integration finding for this specific control
|
|
775
|
-
:rtype: Optional[IntegrationFinding]
|
|
776
|
-
"""
|
|
777
|
-
try:
|
|
778
|
-
control_labels = [control_id] if control_id else []
|
|
779
|
-
severity = self._map_severity(compliance_item.severity)
|
|
780
|
-
policy_name = self._get_policy_name(compliance_item)
|
|
781
|
-
title = f"{policy_name} ({control_id})" if control_id else policy_name
|
|
782
|
-
description = self._compose_description(policy_name, compliance_item)
|
|
783
|
-
|
|
784
|
-
finding = self._build_finding(
|
|
785
|
-
control_labels=control_labels,
|
|
786
|
-
title=title,
|
|
787
|
-
description=description,
|
|
788
|
-
severity=severity,
|
|
789
|
-
compliance_item=compliance_item,
|
|
790
|
-
)
|
|
791
|
-
|
|
792
|
-
# Set the specific control ID for this finding
|
|
793
|
-
finding.rule_id = control_id
|
|
794
|
-
finding.affected_controls = self._normalize_control_id_string(control_id)
|
|
795
|
-
|
|
796
|
-
# Ensure unique external_id for each control to prevent unwanted updates
|
|
797
|
-
finding.external_id = f"wiz-policy-control-{control_id.upper()}-{self.framework_id}"
|
|
798
|
-
|
|
799
|
-
self._set_assessment_id_if_available(finding, compliance_item)
|
|
800
|
-
return finding
|
|
801
|
-
|
|
802
|
-
except Exception as e:
|
|
803
|
-
logger.error(f"Error creating finding for control {control_id}: {e}")
|
|
804
|
-
return None
|
|
805
|
-
|
|
806
|
-
def _asset_exists_in_regscale(self, resource_id: str) -> bool:
|
|
807
|
-
"""
|
|
808
|
-
Check if an asset with the given Wiz resource ID exists in RegScale.
|
|
809
|
-
|
|
810
|
-
:param str resource_id: Wiz resource ID to check (stored in RegScale asset wizId field)
|
|
811
|
-
:return: True if asset exists, False otherwise
|
|
812
|
-
:rtype: bool
|
|
813
|
-
"""
|
|
814
|
-
if not resource_id:
|
|
815
|
-
return False
|
|
816
|
-
|
|
817
|
-
try:
|
|
818
|
-
# Check if we have a cached lookup of existing assets
|
|
819
|
-
if not hasattr(self, "_regscale_assets_by_wiz_id"):
|
|
820
|
-
self._load_regscale_assets()
|
|
821
|
-
|
|
822
|
-
return resource_id in self._regscale_assets_by_wiz_id
|
|
823
|
-
except Exception:
|
|
824
|
-
return False
|
|
825
|
-
|
|
826
|
-
def _load_regscale_assets(self) -> None:
|
|
827
|
-
"""
|
|
828
|
-
Load all existing assets from RegScale into a Wiz ID-based lookup cache.
|
|
829
|
-
Wiz resource IDs are stored in the RegScale asset wizId field.
|
|
830
|
-
"""
|
|
831
|
-
try:
|
|
832
|
-
logger.info("Loading existing assets from RegScale for asset existence checks...")
|
|
833
|
-
# Get all assets for the current plan
|
|
834
|
-
existing_assets = regscale_models.Asset.get_all_by_parent(
|
|
835
|
-
parent_id=self.plan_id,
|
|
836
|
-
parent_module=self.parent_module,
|
|
837
|
-
)
|
|
838
|
-
|
|
839
|
-
# Create Wiz ID-based lookup cache (Wiz resource ID -> RegScale asset)
|
|
840
|
-
self._regscale_assets_by_wiz_id = {asset.wizId: asset for asset in existing_assets if asset.wizId}
|
|
841
|
-
logger.info(f"Loaded {len(self._regscale_assets_by_wiz_id)} existing assets for lookup")
|
|
842
|
-
|
|
843
|
-
except Exception as e:
|
|
844
|
-
logger.error(f"Error loading RegScale assets: {e}")
|
|
845
|
-
# Initialize empty cache to avoid repeated failures
|
|
846
|
-
self._regscale_assets_by_wiz_id = {}
|
|
847
|
-
|
|
848
|
-
def _map_framework_id_to_name(self, framework_id: str) -> str:
|
|
849
|
-
"""
|
|
850
|
-
Map framework ID to framework name.
|
|
851
|
-
|
|
852
|
-
:param str framework_id: Framework ID to map
|
|
853
|
-
:return: Human-readable framework name
|
|
854
|
-
:rtype: str
|
|
855
|
-
"""
|
|
856
|
-
# Default mappings - will be enhanced with cached data
|
|
857
|
-
default_mappings = {
|
|
858
|
-
"wf-id-4": "NIST800-53R5",
|
|
859
|
-
"wf-id-48": "NIST800-53R4",
|
|
860
|
-
"wf-id-5": "FedRAMP",
|
|
861
|
-
}
|
|
862
|
-
|
|
863
|
-
return default_mappings.get(framework_id, framework_id)
|
|
864
|
-
|
|
865
|
-
def create_finding_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationFinding]:
|
|
866
|
-
"""
|
|
867
|
-
Create an IntegrationFinding from a failed compliance item with proper asset/issue matching.
|
|
868
|
-
|
|
869
|
-
:param ComplianceItem compliance_item: The compliance item
|
|
870
|
-
:return: IntegrationFinding or None
|
|
871
|
-
:rtype: Optional[IntegrationFinding]
|
|
872
|
-
"""
|
|
873
|
-
if not isinstance(compliance_item, WizComplianceItem):
|
|
874
|
-
return super().create_finding_from_compliance_item(compliance_item)
|
|
875
|
-
|
|
876
|
-
try:
|
|
877
|
-
control_labels = self._get_control_labels(compliance_item)
|
|
878
|
-
severity = self._map_severity(compliance_item.severity)
|
|
879
|
-
policy_name = self._get_policy_name(compliance_item)
|
|
880
|
-
title = self._compose_title(policy_name, compliance_item)
|
|
881
|
-
description = self._compose_description(policy_name, compliance_item)
|
|
882
|
-
finding = self._build_finding(
|
|
883
|
-
control_labels=control_labels,
|
|
884
|
-
title=title,
|
|
885
|
-
description=description,
|
|
886
|
-
severity=severity,
|
|
887
|
-
compliance_item=compliance_item,
|
|
888
|
-
)
|
|
889
|
-
self._set_affected_controls(finding, compliance_item)
|
|
890
|
-
self._set_assessment_id_if_available(finding, compliance_item)
|
|
891
|
-
return finding
|
|
892
|
-
except Exception as e:
|
|
893
|
-
logger.error(f"Error creating finding from Wiz compliance item: {e}")
|
|
894
|
-
return None
|
|
895
|
-
|
|
896
|
-
# ---------- Private helpers (low-complexity building blocks) ----------
|
|
897
|
-
|
|
898
|
-
@staticmethod
|
|
899
|
-
def _get_control_labels(item: WizComplianceItem) -> List[str]:
|
|
900
|
-
"""
|
|
901
|
-
Extract control labels from a Wiz compliance item.
|
|
902
|
-
|
|
903
|
-
:param WizComplianceItem item: Compliance item to extract labels from
|
|
904
|
-
:return: List of control labels
|
|
905
|
-
:rtype: List[str]
|
|
906
|
-
"""
|
|
907
|
-
return [item.control_id] if item.control_id else []
|
|
908
|
-
|
|
909
|
-
@staticmethod
|
|
910
|
-
def _get_policy_name(item: WizComplianceItem) -> str:
|
|
911
|
-
"""
|
|
912
|
-
Extract policy name from a Wiz compliance item.
|
|
913
|
-
|
|
914
|
-
:param WizComplianceItem item: Compliance item to extract policy name from
|
|
915
|
-
:return: Policy name or 'Unknown Policy' if not found
|
|
916
|
-
:rtype: str
|
|
917
|
-
"""
|
|
918
|
-
return (item.policy.get("name") or "Unknown Policy").strip()
|
|
919
|
-
|
|
920
|
-
@staticmethod
|
|
921
|
-
def _compose_title(policy_name: str, item: WizComplianceItem) -> str:
|
|
922
|
-
"""
|
|
923
|
-
Compose a finding title from policy name and control information.
|
|
924
|
-
|
|
925
|
-
:param str policy_name: Name of the policy
|
|
926
|
-
:param WizComplianceItem item: Compliance item with control information
|
|
927
|
-
:return: Formatted title for the finding
|
|
928
|
-
:rtype: str
|
|
929
|
-
"""
|
|
930
|
-
return f"{policy_name} ({item.control_id})" if item.control_id else policy_name
|
|
931
|
-
|
|
932
|
-
def _compose_description(self, policy_name: str, item: WizComplianceItem) -> str:
|
|
933
|
-
"""
|
|
934
|
-
Compose a detailed description for a compliance finding.
|
|
935
|
-
|
|
936
|
-
:param str policy_name: Name of the policy that failed
|
|
937
|
-
:param WizComplianceItem item: Compliance item with resource and policy details
|
|
938
|
-
:return: Formatted markdown description
|
|
939
|
-
:rtype: str
|
|
940
|
-
"""
|
|
941
|
-
parts: List[str] = [
|
|
942
|
-
f"Policy compliance failure detected by Wiz for resource '{item.resource_name}'.",
|
|
943
|
-
"",
|
|
944
|
-
f"**Policy:** {policy_name}",
|
|
945
|
-
f"**Resource:** {item.resource_name} ({item.resource.get('type', 'Unknown')})",
|
|
946
|
-
f"**Control:** {item.control_id}",
|
|
947
|
-
f"**Framework:** {item.framework}",
|
|
948
|
-
f"**Result:** {item.result}",
|
|
949
|
-
]
|
|
950
|
-
|
|
951
|
-
# Policy/Remediation details
|
|
952
|
-
policy_desc = item.policy.get("description") or item.policy.get("ruleDescription")
|
|
953
|
-
if policy_desc:
|
|
954
|
-
parts.extend(["", "**Policy Description:**", policy_desc])
|
|
955
|
-
|
|
956
|
-
remediation = item.policy.get("remediationInstructions")
|
|
957
|
-
if remediation:
|
|
958
|
-
parts.extend(["", "**Remediation Instructions:**", remediation])
|
|
959
|
-
|
|
960
|
-
# Location details
|
|
961
|
-
if item.resource.get("region"):
|
|
962
|
-
parts.append(f"**Region:** {item.resource['region']}")
|
|
963
|
-
if item.resource.get("subscription"):
|
|
964
|
-
sub = item.resource["subscription"]
|
|
965
|
-
parts.append(
|
|
966
|
-
f"**Cloud Provider:** {sub.get('cloudProvider', 'Unknown')} "
|
|
967
|
-
f"(Subscription: {sub.get('name', 'Unknown')})"
|
|
968
|
-
)
|
|
969
|
-
|
|
970
|
-
return "\n".join(parts)
|
|
971
|
-
|
|
972
|
-
def _build_finding(
|
|
973
|
-
self,
|
|
974
|
-
*,
|
|
975
|
-
control_labels: List[str],
|
|
976
|
-
title: str,
|
|
977
|
-
description: str,
|
|
978
|
-
severity: regscale_models.IssueSeverity,
|
|
979
|
-
compliance_item: WizComplianceItem,
|
|
980
|
-
) -> IntegrationFinding:
|
|
981
|
-
"""
|
|
982
|
-
Build an IntegrationFinding from compliance item components.
|
|
983
|
-
|
|
984
|
-
:param List[str] control_labels: List of control labels
|
|
985
|
-
:param str title: Finding title
|
|
986
|
-
:param str description: Finding description
|
|
987
|
-
:param regscale_models.IssueSeverity severity: Finding severity
|
|
988
|
-
:param WizComplianceItem compliance_item: Source compliance item
|
|
989
|
-
:return: Constructed integration finding
|
|
990
|
-
:rtype: IntegrationFinding
|
|
991
|
-
"""
|
|
992
|
-
stable_rule = compliance_item.control_id or ""
|
|
993
|
-
return IntegrationFinding(
|
|
994
|
-
control_labels=control_labels,
|
|
995
|
-
title=f"Policy Compliance Failure: {title}" if compliance_item.is_fail else title,
|
|
996
|
-
category="Policy Compliance",
|
|
997
|
-
plugin_name=f"{self.title}",
|
|
998
|
-
severity=severity,
|
|
999
|
-
description=description,
|
|
1000
|
-
status=regscale_models.IssueStatus.Open,
|
|
1001
|
-
priority=self._map_severity_to_priority(severity),
|
|
1002
|
-
plugin_id=f"policy-control:{self.framework_id}:{stable_rule}",
|
|
1003
|
-
external_id=(
|
|
1004
|
-
f"wiz-policy-{compliance_item.id}" if compliance_item.id else f"wiz-policy-control-{stable_rule}"
|
|
1005
|
-
),
|
|
1006
|
-
identification="Security Control Assessment",
|
|
1007
|
-
first_seen=self.scan_date,
|
|
1008
|
-
last_seen=self.scan_date,
|
|
1009
|
-
scan_date=self.scan_date,
|
|
1010
|
-
asset_identifier=self._get_regscale_asset_identifier(compliance_item),
|
|
1011
|
-
issue_asset_identifier_value=self._get_provider_unique_id_for_asset_identifier(compliance_item),
|
|
1012
|
-
vulnerability_type="Policy Compliance Violation",
|
|
1013
|
-
rule_id=compliance_item.control_id,
|
|
1014
|
-
baseline=compliance_item.framework,
|
|
1015
|
-
remediation=compliance_item.policy.get("remediationInstructions") or "",
|
|
1016
|
-
)
|
|
1017
|
-
|
|
1018
|
-
def _set_affected_controls(self, finding: IntegrationFinding, item: WizComplianceItem) -> None:
|
|
1019
|
-
"""
|
|
1020
|
-
Set the affected controls field on a finding from a compliance item.
|
|
1021
|
-
|
|
1022
|
-
:param IntegrationFinding finding: Finding to update
|
|
1023
|
-
:param WizComplianceItem item: Compliance item with control information
|
|
1024
|
-
:return: None
|
|
1025
|
-
:rtype: None
|
|
1026
|
-
"""
|
|
1027
|
-
if item.control_id:
|
|
1028
|
-
finding.affected_controls = self._normalize_control_id_string(item.control_id)
|
|
1029
|
-
|
|
1030
|
-
def _set_assessment_id_if_available(self, finding: IntegrationFinding, item: WizComplianceItem) -> None:
|
|
1031
|
-
"""
|
|
1032
|
-
Set the assessment ID on a finding if available from cached mappings.
|
|
1033
|
-
|
|
1034
|
-
:param IntegrationFinding finding: Finding to update with assessment ID
|
|
1035
|
-
:param WizComplianceItem item: Compliance item with control information
|
|
1036
|
-
:return: None
|
|
1037
|
-
:rtype: None
|
|
1038
|
-
"""
|
|
1039
|
-
try:
|
|
1040
|
-
ctrl_norm = self._normalize_control_id_string(item.control_id)
|
|
1041
|
-
if ctrl_norm and hasattr(self, "_impl_id_by_control"):
|
|
1042
|
-
impl_id = self._impl_id_by_control.get(ctrl_norm)
|
|
1043
|
-
if impl_id and hasattr(self, "_assessment_by_impl_today"):
|
|
1044
|
-
assess = self._assessment_by_impl_today.get(impl_id)
|
|
1045
|
-
if assess:
|
|
1046
|
-
finding.assessment_id = assess.id
|
|
1047
|
-
except Exception:
|
|
1048
|
-
pass
|
|
1049
|
-
|
|
1050
|
-
def create_asset_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationAsset]:
|
|
1051
|
-
"""
|
|
1052
|
-
Create an IntegrationAsset from a Wiz compliance item with enhanced metadata.
|
|
1053
|
-
|
|
1054
|
-
:param ComplianceItem compliance_item: The compliance item
|
|
1055
|
-
:return: IntegrationAsset or None
|
|
1056
|
-
:rtype: Optional[IntegrationAsset]
|
|
1057
|
-
"""
|
|
1058
|
-
if not isinstance(compliance_item, WizComplianceItem):
|
|
1059
|
-
return super().create_asset_from_compliance_item(compliance_item)
|
|
1060
|
-
|
|
1061
|
-
try:
|
|
1062
|
-
resource = compliance_item.resource
|
|
1063
|
-
asset_type = self._map_resource_type_to_asset_type(compliance_item)
|
|
1064
|
-
|
|
1065
|
-
# Build asset description with cloud metadata
|
|
1066
|
-
description_parts = [
|
|
1067
|
-
"Cloud resource from Wiz compliance scan",
|
|
1068
|
-
f"Type: {resource.get('type', 'Unknown')}",
|
|
1069
|
-
]
|
|
1070
|
-
|
|
1071
|
-
if resource.get("region"):
|
|
1072
|
-
description_parts.append(f"Region: {resource['region']}")
|
|
1073
|
-
|
|
1074
|
-
if resource.get("subscription"):
|
|
1075
|
-
sub = resource["subscription"]
|
|
1076
|
-
description_parts.append(
|
|
1077
|
-
f"Cloud Provider: {sub.get('cloudProvider', 'Unknown')} "
|
|
1078
|
-
f"(Subscription: {sub.get('name', 'Unknown')})"
|
|
1079
|
-
)
|
|
1080
|
-
|
|
1081
|
-
# Add tags if available
|
|
1082
|
-
tags = resource.get("tags", [])
|
|
1083
|
-
if tags:
|
|
1084
|
-
tag_strings = [f"{tag.get('key')}:{tag.get('value')}" for tag in tags if tag.get("key")]
|
|
1085
|
-
if tag_strings:
|
|
1086
|
-
description_parts.append(f"Tags: {', '.join(tag_strings)}")
|
|
1087
|
-
|
|
1088
|
-
# Get user ID directly from application config
|
|
1089
|
-
app = Application()
|
|
1090
|
-
config = app.config
|
|
1091
|
-
user_id = config.get("userId")
|
|
1092
|
-
|
|
1093
|
-
asset = IntegrationAsset(
|
|
1094
|
-
name=compliance_item.resource_name,
|
|
1095
|
-
identifier=compliance_item.resource_name, # Use name only without UUID
|
|
1096
|
-
external_id=compliance_item.resource_id,
|
|
1097
|
-
other_tracking_number=compliance_item.resource_id, # For deduplication
|
|
1098
|
-
asset_type=asset_type,
|
|
1099
|
-
asset_category=regscale_models.AssetCategory.Hardware,
|
|
1100
|
-
description="\n".join(description_parts),
|
|
1101
|
-
parent_id=self.plan_id,
|
|
1102
|
-
parent_module=self.parent_module,
|
|
1103
|
-
status=regscale_models.AssetStatus.Active,
|
|
1104
|
-
date_last_updated=self.scan_date,
|
|
1105
|
-
notes=self._create_asset_notes(compliance_item),
|
|
1106
|
-
# Set asset owner ID from config
|
|
1107
|
-
asset_owner_id=user_id,
|
|
1108
|
-
# Enable component mapping flow downstream
|
|
1109
|
-
component_names=[],
|
|
1110
|
-
)
|
|
1111
|
-
|
|
1112
|
-
return asset
|
|
1113
|
-
|
|
1114
|
-
except Exception as e:
|
|
1115
|
-
logger.error(f"Error creating asset from Wiz compliance item: {e}")
|
|
1116
|
-
return None
|
|
1117
|
-
|
|
1118
|
-
def create_scan_history(self): # type: ignore[override]
|
|
1119
|
-
"""No scan history created for compliance report ingest."""
|
|
1120
|
-
return None
|
|
1121
|
-
|
|
1122
|
-
def _create_asset_notes(self, compliance_item: WizComplianceItem) -> str:
|
|
1123
|
-
"""
|
|
1124
|
-
Create detailed notes for asset with compliance context.
|
|
1125
|
-
|
|
1126
|
-
:param WizComplianceItem compliance_item: Compliance item with resource details
|
|
1127
|
-
:return: Formatted asset notes in markdown
|
|
1128
|
-
:rtype: str
|
|
1129
|
-
"""
|
|
1130
|
-
resource = compliance_item.resource
|
|
1131
|
-
notes_parts = [
|
|
1132
|
-
"# Wiz Asset Details",
|
|
1133
|
-
f"**Resource ID:** {compliance_item.resource_id}",
|
|
1134
|
-
f"**Resource Type:** {resource.get('type', 'Unknown')}",
|
|
1135
|
-
]
|
|
1136
|
-
|
|
1137
|
-
# Add subscription details
|
|
1138
|
-
if resource.get("subscription"):
|
|
1139
|
-
sub = resource["subscription"]
|
|
1140
|
-
notes_parts.extend(
|
|
1141
|
-
[
|
|
1142
|
-
"",
|
|
1143
|
-
"## Cloud Provider Details",
|
|
1144
|
-
f"**Provider:** {sub.get('cloudProvider', 'Unknown')}",
|
|
1145
|
-
f"**Subscription Name:** {sub.get('name', 'Unknown')}",
|
|
1146
|
-
f"**Subscription ID:** {sub.get('externalId', 'Unknown')}",
|
|
1147
|
-
]
|
|
1148
|
-
)
|
|
1149
|
-
|
|
1150
|
-
# Add compliance summary
|
|
1151
|
-
total_items = len(self.asset_compliance_map.get(compliance_item.resource_id, []))
|
|
1152
|
-
failed_items = len(
|
|
1153
|
-
[
|
|
1154
|
-
item
|
|
1155
|
-
for item in self.asset_compliance_map.get(compliance_item.resource_id, [])
|
|
1156
|
-
if item.compliance_result in self.FAIL_STATUSES
|
|
1157
|
-
]
|
|
1158
|
-
)
|
|
1159
|
-
|
|
1160
|
-
if total_items > 0:
|
|
1161
|
-
notes_parts.extend(
|
|
1162
|
-
[
|
|
1163
|
-
"",
|
|
1164
|
-
"## Compliance Summary",
|
|
1165
|
-
f"**Total Assessments:** {total_items}",
|
|
1166
|
-
f"**Failed Assessments:** {failed_items}",
|
|
1167
|
-
f"**Compliance Rate:** {((total_items - failed_items) / total_items * 100):.1f}%",
|
|
1168
|
-
]
|
|
1169
|
-
)
|
|
1170
|
-
|
|
1171
|
-
return "\n".join(notes_parts)
|
|
1172
|
-
|
|
1173
|
-
def authenticate_wiz(self) -> str:
|
|
1174
|
-
"""
|
|
1175
|
-
Authenticate with Wiz and return access token.
|
|
1176
|
-
|
|
1177
|
-
:return: Wiz access token
|
|
1178
|
-
:rtype: str
|
|
1179
|
-
"""
|
|
1180
|
-
logger.info("Authenticating with Wiz...")
|
|
1181
|
-
try:
|
|
1182
|
-
token = wiz_authenticate(client_id=self.client_id, client_secret=self.client_secret)
|
|
1183
|
-
if not token:
|
|
1184
|
-
error_and_exit("Failed to authenticate with Wiz")
|
|
1185
|
-
|
|
1186
|
-
# Get Wiz endpoint from config
|
|
1187
|
-
app = check_license()
|
|
1188
|
-
config = app.config
|
|
1189
|
-
self.wiz_endpoint = config.get("wizUrl", "")
|
|
1190
|
-
if not self.wiz_endpoint:
|
|
1191
|
-
error_and_exit("No Wiz URL found in configuration")
|
|
1192
|
-
|
|
1193
|
-
self.access_token = token
|
|
1194
|
-
logger.info("Successfully authenticated with Wiz")
|
|
1195
|
-
return token
|
|
1196
|
-
|
|
1197
|
-
except Exception as e:
|
|
1198
|
-
logger.error(f"Wiz authentication failed: {str(e)}")
|
|
1199
|
-
error_and_exit(f"Wiz authentication failed: {str(e)}")
|
|
1200
|
-
|
|
1201
|
-
def _fetch_policy_assessments_from_wiz(self) -> List[Dict[str, Any]]:
|
|
1202
|
-
"""
|
|
1203
|
-
Fetch policy assessments from Wiz GraphQL API.
|
|
1204
|
-
|
|
1205
|
-
:return: List of raw policy assessment data
|
|
1206
|
-
:rtype: List[Dict[str, Any]]
|
|
1207
|
-
"""
|
|
1208
|
-
logger.info("Fetching policy assessments from Wiz...")
|
|
1209
|
-
|
|
1210
|
-
if not self.access_token:
|
|
1211
|
-
self.authenticate_wiz()
|
|
1212
|
-
|
|
1213
|
-
cached_nodes = self._load_assessments_from_cache()
|
|
1214
|
-
if cached_nodes is not None:
|
|
1215
|
-
logger.info("Using cached Wiz policy assessments")
|
|
1216
|
-
return cached_nodes
|
|
1217
|
-
|
|
1218
|
-
# Try async approach first
|
|
1219
|
-
async_results = self._try_async_assessment_fetch()
|
|
1220
|
-
if async_results is not None:
|
|
1221
|
-
self._write_assessments_cache(async_results)
|
|
1222
|
-
return async_results
|
|
1223
|
-
|
|
1224
|
-
# Fall back to requests-based method
|
|
1225
|
-
filtered_nodes = self._fetch_assessments_with_requests()
|
|
1226
|
-
self._write_assessments_cache(filtered_nodes)
|
|
1227
|
-
return filtered_nodes
|
|
1228
|
-
|
|
1229
|
-
def _try_async_assessment_fetch(self) -> Optional[List[Dict[str, Any]]]:
|
|
1230
|
-
"""Try to fetch assessments using async client."""
|
|
1231
|
-
try:
|
|
1232
|
-
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
1233
|
-
|
|
1234
|
-
page_size = 100
|
|
1235
|
-
headers = self._build_wiz_headers()
|
|
1236
|
-
|
|
1237
|
-
with compliance_job_progress:
|
|
1238
|
-
task = compliance_job_progress.add_task(
|
|
1239
|
-
f"[#f68d1f]Fetching Wiz policy assessments (async, page size: {page_size})...",
|
|
1240
|
-
total=1,
|
|
1241
|
-
)
|
|
1242
|
-
results = run_async_queries(
|
|
1243
|
-
endpoint=self.wiz_endpoint or WIZ_URL,
|
|
1244
|
-
headers=headers,
|
|
1245
|
-
query_configs=[
|
|
1246
|
-
{
|
|
1247
|
-
"type": WizVulnerabilityType.CONFIGURATION,
|
|
1248
|
-
"query": WIZ_POLICY_QUERY,
|
|
1249
|
-
"topic_key": "policyAssessments",
|
|
1250
|
-
"variables": {"first": page_size},
|
|
1251
|
-
}
|
|
1252
|
-
],
|
|
1253
|
-
progress_tracker=compliance_job_progress,
|
|
1254
|
-
max_concurrent=1,
|
|
1255
|
-
)
|
|
1256
|
-
compliance_job_progress.update(task, completed=1, advance=1)
|
|
1257
|
-
|
|
1258
|
-
if results and len(results) == 1 and not results[0][2]:
|
|
1259
|
-
nodes = results[0][1] or []
|
|
1260
|
-
return self._filter_nodes_to_framework(nodes)
|
|
1261
|
-
except Exception:
|
|
1262
|
-
pass
|
|
1263
|
-
return None
|
|
1264
|
-
|
|
1265
|
-
def _fetch_assessments_with_requests(self) -> List[Dict[str, Any]]:
|
|
1266
|
-
"""Fetch assessments using requests-based method with filter variants."""
|
|
1267
|
-
headers = self._build_wiz_headers()
|
|
1268
|
-
session = self._prepare_wiz_requests_session()
|
|
1269
|
-
page_size = 100
|
|
1270
|
-
base_variables = {"first": page_size}
|
|
1271
|
-
|
|
1272
|
-
filter_variants = [
|
|
1273
|
-
{"project": [self.wiz_project_id]},
|
|
1274
|
-
{"projectId": [self.wiz_project_id]},
|
|
1275
|
-
{"projects": [self.wiz_project_id]},
|
|
1276
|
-
{}, # Empty filterBy
|
|
1277
|
-
None, # Omit filterBy entirely
|
|
1278
|
-
]
|
|
1279
|
-
|
|
1280
|
-
return self._fetch_assessments_with_variants(
|
|
1281
|
-
session=session,
|
|
1282
|
-
headers=headers,
|
|
1283
|
-
base_variables=base_variables,
|
|
1284
|
-
page_size=page_size,
|
|
1285
|
-
filter_variants=filter_variants,
|
|
1286
|
-
)
|
|
1287
|
-
|
|
1288
|
-
def _build_wiz_headers(self) -> Dict[str, str]:
|
|
1289
|
-
"""
|
|
1290
|
-
Build HTTP headers for Wiz GraphQL API requests.
|
|
1291
|
-
|
|
1292
|
-
:return: Dictionary of HTTP headers including authorization
|
|
1293
|
-
:rtype: Dict[str, str]
|
|
1294
|
-
"""
|
|
1295
|
-
return {
|
|
1296
|
-
"Authorization": f"Bearer {self.access_token}",
|
|
1297
|
-
"Content-Type": "application/json",
|
|
1298
|
-
}
|
|
1299
|
-
|
|
1300
|
-
def _prepare_wiz_requests_session(self):
|
|
1301
|
-
"""
|
|
1302
|
-
Prepare a requests session with retry logic for Wiz API calls.
|
|
1303
|
-
|
|
1304
|
-
:return: Configured requests session with retry adapter
|
|
1305
|
-
:rtype: requests.Session
|
|
1306
|
-
"""
|
|
1307
|
-
import requests
|
|
1308
|
-
from requests.adapters import HTTPAdapter
|
|
1309
|
-
from urllib3.util.retry import Retry
|
|
1310
|
-
|
|
1311
|
-
session = requests.Session()
|
|
1312
|
-
retry = Retry(
|
|
1313
|
-
total=5,
|
|
1314
|
-
connect=5,
|
|
1315
|
-
read=5,
|
|
1316
|
-
backoff_factor=0.5,
|
|
1317
|
-
status_forcelist=[429, 500, 502, 503, 504],
|
|
1318
|
-
allowed_methods=["POST"],
|
|
1319
|
-
)
|
|
1320
|
-
adapter = HTTPAdapter(max_retries=retry)
|
|
1321
|
-
session.mount("https://", adapter)
|
|
1322
|
-
session.mount("http://", adapter) # NO SONAR #NOSONAR
|
|
1323
|
-
return session
|
|
1324
|
-
|
|
1325
|
-
def _fetch_assessments_with_variants(
|
|
1326
|
-
self,
|
|
1327
|
-
*,
|
|
1328
|
-
session,
|
|
1329
|
-
headers: Dict[str, str],
|
|
1330
|
-
base_variables: Dict[str, Any],
|
|
1331
|
-
page_size: int,
|
|
1332
|
-
filter_variants: List[Optional[Dict[str, Any]]],
|
|
1333
|
-
) -> List[Dict[str, Any]]:
|
|
1334
|
-
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
1335
|
-
|
|
1336
|
-
last_error: Optional[Exception] = None
|
|
1337
|
-
|
|
1338
|
-
# In unit tests, the async client is patched and we should not hit network.
|
|
1339
|
-
|
|
1340
|
-
with compliance_job_progress:
|
|
1341
|
-
task = compliance_job_progress.add_task(
|
|
1342
|
-
f"[#f68d1f]Fetching Wiz policy assessments (page size: {page_size})...",
|
|
1343
|
-
total=None,
|
|
1344
|
-
)
|
|
1345
|
-
for fv in filter_variants:
|
|
1346
|
-
try:
|
|
1347
|
-
# If endpoint is not set (tests), short-circuit to async path mock
|
|
1348
|
-
if not self.wiz_endpoint:
|
|
1349
|
-
results = run_async_queries(
|
|
1350
|
-
endpoint=WIZ_URL,
|
|
1351
|
-
headers=headers,
|
|
1352
|
-
query_configs=[
|
|
1353
|
-
{
|
|
1354
|
-
"type": WizVulnerabilityType.CONFIGURATION,
|
|
1355
|
-
"query": WIZ_POLICY_QUERY,
|
|
1356
|
-
"topic_key": "policyAssessments",
|
|
1357
|
-
"variables": {**base_variables, **({"filterBy": fv} if fv is not None else {})},
|
|
1358
|
-
}
|
|
1359
|
-
],
|
|
1360
|
-
progress_tracker=compliance_job_progress,
|
|
1361
|
-
max_concurrent=1,
|
|
1362
|
-
)
|
|
1363
|
-
# Expected mocked structure: [(type, nodes, error)]
|
|
1364
|
-
if results and len(results) == 1 and not results[0][2]:
|
|
1365
|
-
nodes = results[0][1] or []
|
|
1366
|
-
return self._filter_nodes_to_framework(nodes)
|
|
1367
|
-
|
|
1368
|
-
return self._fetch_with_filter_variant(
|
|
1369
|
-
session=session,
|
|
1370
|
-
headers=headers,
|
|
1371
|
-
base_variables=base_variables,
|
|
1372
|
-
filter_variant=fv,
|
|
1373
|
-
page_size=page_size,
|
|
1374
|
-
progress=compliance_job_progress,
|
|
1375
|
-
task=task,
|
|
1376
|
-
)
|
|
1377
|
-
except Exception as exc: # noqa: BLE001 - propagate last error
|
|
1378
|
-
last_error = exc
|
|
1379
|
-
|
|
1380
|
-
msg = f"Failed to fetch policy assessments after trying all filter variants: {last_error}"
|
|
1381
|
-
logger.error(msg)
|
|
1382
|
-
error_and_exit(msg)
|
|
1383
|
-
|
|
1384
|
-
def _variant_name(self, fv: Optional[Dict[str, Any]]) -> str:
|
|
1385
|
-
"""
|
|
1386
|
-
Get a human-readable name for a filter variant.
|
|
1387
|
-
|
|
1388
|
-
:param Optional[Dict[str, Any]] fv: Filter variant dictionary
|
|
1389
|
-
:return: Human-readable variant name
|
|
1390
|
-
:rtype: str
|
|
1391
|
-
"""
|
|
1392
|
-
if fv is None:
|
|
1393
|
-
return "omitted"
|
|
1394
|
-
if fv == {}:
|
|
1395
|
-
return "empty"
|
|
1396
|
-
try:
|
|
1397
|
-
return next(iter(fv.keys()))
|
|
1398
|
-
except Exception:
|
|
1399
|
-
return "unknown"
|
|
1400
|
-
|
|
1401
|
-
def _fetch_with_filter_variant(
|
|
1402
|
-
self,
|
|
1403
|
-
*,
|
|
1404
|
-
session,
|
|
1405
|
-
headers: Dict[str, str],
|
|
1406
|
-
base_variables: Dict[str, Any],
|
|
1407
|
-
filter_variant: Optional[Dict[str, Any]],
|
|
1408
|
-
page_size: int,
|
|
1409
|
-
progress,
|
|
1410
|
-
task,
|
|
1411
|
-
) -> List[Dict[str, Any]]:
|
|
1412
|
-
variant_name = self._variant_name(filter_variant)
|
|
1413
|
-
progress.update(
|
|
1414
|
-
task,
|
|
1415
|
-
description=(f"[#f68d1f]Fetching Wiz policy assessments (limit: {page_size}, variant: {variant_name})..."),
|
|
1416
|
-
advance=1,
|
|
1417
|
-
)
|
|
1418
|
-
|
|
1419
|
-
variables = base_variables.copy() if filter_variant is None else {**base_variables, "filterBy": filter_variant}
|
|
1420
|
-
|
|
1421
|
-
def on_page(page_idx: int, page_count: int, total_nodes: int) -> None:
|
|
1422
|
-
progress.update(
|
|
1423
|
-
task,
|
|
1424
|
-
description=(
|
|
1425
|
-
f"[cyan]Fetching policy assessments: page {page_idx}, "
|
|
1426
|
-
f"fetched {total_nodes} nodes (last page: {page_count})"
|
|
1427
|
-
),
|
|
1428
|
-
advance=1,
|
|
1429
|
-
)
|
|
1430
|
-
|
|
1431
|
-
nodes = self._execute_wiz_policy_query_paginated(
|
|
1432
|
-
session=session, headers=headers, variables=variables, on_page=on_page
|
|
1433
|
-
)
|
|
1434
|
-
filtered_nodes = self._filter_nodes_to_framework(nodes)
|
|
1435
|
-
progress.update(
|
|
1436
|
-
task,
|
|
1437
|
-
description=f"[green]Completed Wiz policy assessments: {len(filtered_nodes)} nodes",
|
|
1438
|
-
completed=1,
|
|
1439
|
-
total=1,
|
|
1440
|
-
)
|
|
1441
|
-
logger.info("Successfully fetched Wiz policy assessments")
|
|
1442
|
-
|
|
1443
|
-
return filtered_nodes
|
|
1444
|
-
|
|
1445
|
-
def _execute_wiz_policy_query_paginated(
|
|
1446
|
-
self,
|
|
1447
|
-
*,
|
|
1448
|
-
session,
|
|
1449
|
-
headers: Dict[str, str],
|
|
1450
|
-
variables: Dict[str, Any],
|
|
1451
|
-
on_page=None,
|
|
1452
|
-
) -> List[Dict[str, Any]]:
|
|
1453
|
-
import requests
|
|
1454
|
-
|
|
1455
|
-
nodes: List[Dict[str, Any]] = []
|
|
1456
|
-
after_cursor: Optional[str] = variables.get("after")
|
|
1457
|
-
page_index = 0
|
|
1458
|
-
while True:
|
|
1459
|
-
payload_vars = variables.copy()
|
|
1460
|
-
payload_vars["after"] = after_cursor
|
|
1461
|
-
payload = {"query": WIZ_POLICY_QUERY, "variables": payload_vars}
|
|
1462
|
-
resp = session.post(self.wiz_endpoint, json=payload, headers=headers, timeout=300)
|
|
1463
|
-
if resp.status_code >= 400:
|
|
1464
|
-
raise requests.HTTPError(f"{resp.status_code} {resp.text[:500]}")
|
|
1465
|
-
data = resp.json()
|
|
1466
|
-
if "errors" in data:
|
|
1467
|
-
raise RuntimeError(str(data["errors"]))
|
|
1468
|
-
topic = data.get("data", {}).get("policyAssessments", {})
|
|
1469
|
-
page_nodes = topic.get("nodes", [])
|
|
1470
|
-
page_info = topic.get("pageInfo", {})
|
|
1471
|
-
nodes.extend(page_nodes)
|
|
1472
|
-
page_index += 1
|
|
1473
|
-
if on_page:
|
|
1474
|
-
try:
|
|
1475
|
-
on_page(page_index, len(page_nodes), len(nodes))
|
|
1476
|
-
except Exception:
|
|
1477
|
-
pass
|
|
1478
|
-
has_next = page_info.get("hasNextPage", False)
|
|
1479
|
-
after_cursor = page_info.get("endCursor")
|
|
1480
|
-
if not has_next:
|
|
1481
|
-
break
|
|
1482
|
-
return nodes
|
|
1483
|
-
|
|
1484
|
-
def _filter_nodes_to_framework(self, nodes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1485
|
-
filtered_nodes: List[Dict[str, Any]] = []
|
|
1486
|
-
for n in nodes:
|
|
1487
|
-
try:
|
|
1488
|
-
subcats = ((n or {}).get("policy") or {}).get("securitySubCategories", [])
|
|
1489
|
-
# If no subcategories info is present, include the node (cannot evaluate framework)
|
|
1490
|
-
if not subcats:
|
|
1491
|
-
filtered_nodes.append(n)
|
|
1492
|
-
continue
|
|
1493
|
-
if any((sc.get("category", {}).get("framework", {}).get("id") == self.framework_id) for sc in subcats):
|
|
1494
|
-
filtered_nodes.append(n)
|
|
1495
|
-
except Exception:
|
|
1496
|
-
filtered_nodes.append(n)
|
|
1497
|
-
return filtered_nodes
|
|
1498
|
-
|
|
1499
|
-
def _get_assessments_cache_path(self) -> str:
|
|
1500
|
-
"""
|
|
1501
|
-
Get the file path for policy assessments cache.
|
|
1502
|
-
|
|
1503
|
-
:return: Full path to cache file
|
|
1504
|
-
:rtype: str
|
|
1505
|
-
"""
|
|
1506
|
-
try:
|
|
1507
|
-
os.makedirs(self.policy_cache_dir, exist_ok=True)
|
|
1508
|
-
except Exception:
|
|
1509
|
-
pass
|
|
1510
|
-
return self.policy_cache_file
|
|
1511
|
-
|
|
1512
|
-
def _load_assessments_from_cache(self) -> Optional[List[Dict[str, Any]]]:
|
|
1513
|
-
"""
|
|
1514
|
-
Load policy assessments from cache file if valid and within TTL.
|
|
1515
|
-
|
|
1516
|
-
:return: Cached assessment nodes or None if cache is invalid/expired
|
|
1517
|
-
:rtype: Optional[List[Dict[str, Any]]]
|
|
1518
|
-
"""
|
|
1519
|
-
if self.force_refresh or self.cache_duration_minutes <= 0:
|
|
1520
|
-
return None
|
|
1521
|
-
try:
|
|
1522
|
-
path = self._get_assessments_cache_path()
|
|
1523
|
-
if not os.path.exists(path):
|
|
1524
|
-
return None
|
|
1525
|
-
# File age check
|
|
1526
|
-
max_age_seconds = max(0, int(self.cache_duration_minutes)) * 60
|
|
1527
|
-
age = max(0.0, (datetime.now().timestamp() - os.path.getmtime(path)))
|
|
1528
|
-
if age > max_age_seconds:
|
|
1529
|
-
return None
|
|
1530
|
-
with open(path, "r", encoding="utf-8") as f:
|
|
1531
|
-
data = json.load(f)
|
|
1532
|
-
nodes = data.get("nodes") or data.get("assessments") or []
|
|
1533
|
-
# Defensive: ensure list
|
|
1534
|
-
if not isinstance(nodes, list):
|
|
1535
|
-
return None
|
|
1536
|
-
return nodes
|
|
1537
|
-
except Exception:
|
|
1538
|
-
return None
|
|
1539
|
-
|
|
1540
|
-
def _write_assessments_cache(self, nodes: List[Dict[str, Any]]) -> None:
|
|
1541
|
-
"""
|
|
1542
|
-
Write policy assessment nodes to cache file.
|
|
1543
|
-
|
|
1544
|
-
:param List[Dict[str, Any]] nodes: Assessment nodes to cache
|
|
1545
|
-
:return: None
|
|
1546
|
-
:rtype: None
|
|
1547
|
-
"""
|
|
1548
|
-
# Only write cache when enabled
|
|
1549
|
-
if self.cache_duration_minutes <= 0:
|
|
1550
|
-
return None
|
|
1551
|
-
try:
|
|
1552
|
-
path = self._get_assessments_cache_path()
|
|
1553
|
-
payload = {
|
|
1554
|
-
"timestamp": datetime.now().isoformat(),
|
|
1555
|
-
"wiz_project_id": self.wiz_project_id,
|
|
1556
|
-
"framework_id": self.framework_id,
|
|
1557
|
-
"nodes": nodes,
|
|
1558
|
-
}
|
|
1559
|
-
with open(path, "w", encoding="utf-8") as f:
|
|
1560
|
-
json.dump(payload, f, ensure_ascii=False)
|
|
1561
|
-
except Exception:
|
|
1562
|
-
# Cache write failures should not interrupt flow
|
|
1563
|
-
pass
|
|
1564
|
-
|
|
1565
|
-
def write_policy_data_to_json(self) -> str:
|
|
1566
|
-
"""
|
|
1567
|
-
Write policy assessment data to JSON and JSONL files with timestamp.
|
|
1568
|
-
|
|
1569
|
-
:return: Path to the written JSON file
|
|
1570
|
-
:rtype: str
|
|
1571
|
-
"""
|
|
1572
|
-
# Setup file paths
|
|
1573
|
-
artifacts_dir, timestamp, file_path, file_path_jsonl = self._setup_output_files()
|
|
1574
|
-
|
|
1575
|
-
# Build compliance summary data
|
|
1576
|
-
catalog_controls = self._get_catalog_controls()
|
|
1577
|
-
control_sets = self._build_control_sets(catalog_controls)
|
|
1578
|
-
|
|
1579
|
-
# Prepare export data structure
|
|
1580
|
-
export_data = self._build_export_data(timestamp, catalog_controls, control_sets)
|
|
1581
|
-
|
|
1582
|
-
# Convert compliance items to serializable format
|
|
1583
|
-
self._add_policy_assessments_to_export(export_data)
|
|
1584
|
-
|
|
1585
|
-
# Write files and cleanup
|
|
1586
|
-
return self._write_output_files(file_path, file_path_jsonl, export_data, artifacts_dir)
|
|
1587
|
-
|
|
1588
|
-
def _setup_output_files(self) -> tuple[str, str, str, str]:
|
|
1589
|
-
"""Setup output directory and file paths."""
|
|
1590
|
-
artifacts_dir = os.path.join("artifacts", "wiz")
|
|
1591
|
-
os.makedirs(artifacts_dir, exist_ok=True)
|
|
1592
|
-
|
|
1593
|
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1594
|
-
filename_json = f"policy_compliance_report_{timestamp}.json"
|
|
1595
|
-
filename_jsonl = f"policy_compliance_report_{timestamp}.jsonl"
|
|
1596
|
-
file_path = os.path.join(artifacts_dir, filename_json)
|
|
1597
|
-
file_path_jsonl = os.path.join(artifacts_dir, filename_jsonl)
|
|
1598
|
-
|
|
1599
|
-
return artifacts_dir, timestamp, file_path, file_path_jsonl
|
|
1600
|
-
|
|
1601
|
-
def _get_catalog_controls(self) -> set[str]:
|
|
1602
|
-
"""Get catalog controls from the plan/catalog."""
|
|
1603
|
-
catalog_controls = set()
|
|
1604
|
-
try:
|
|
1605
|
-
controls = self._get_controls()
|
|
1606
|
-
for ctl in controls:
|
|
1607
|
-
cid = (ctl.get("controlId") or "").strip()
|
|
1608
|
-
if cid:
|
|
1609
|
-
catalog_controls.add(cid)
|
|
1610
|
-
except Exception:
|
|
1611
|
-
catalog_controls = set()
|
|
1612
|
-
return catalog_controls
|
|
1613
|
-
|
|
1614
|
-
def _build_control_sets(self, catalog_controls: set[str]) -> Dict[str, set]:
|
|
1615
|
-
"""Build control sets for summary calculations."""
|
|
1616
|
-
assessed_controls = {item.control_id for item in self.all_compliance_items if item.control_id}
|
|
1617
|
-
passing_control_ids = {key.upper() for key in self.passing_controls.keys()}
|
|
1618
|
-
failing_control_ids = {key.upper() for key in self.failing_controls.keys()}
|
|
1619
|
-
|
|
1620
|
-
return {
|
|
1621
|
-
"assessed": assessed_controls,
|
|
1622
|
-
"passing": passing_control_ids,
|
|
1623
|
-
"failing": failing_control_ids,
|
|
1624
|
-
"catalog": catalog_controls,
|
|
1625
|
-
}
|
|
1626
|
-
|
|
1627
|
-
def _build_export_data(
|
|
1628
|
-
self, timestamp: str, catalog_controls: set[str], control_sets: Dict[str, set]
|
|
1629
|
-
) -> Dict[str, Any]:
|
|
1630
|
-
"""Build the main export data structure."""
|
|
1631
|
-
assessed_controls = control_sets["assessed"]
|
|
1632
|
-
passing_control_ids = control_sets["passing"]
|
|
1633
|
-
failing_control_ids = control_sets["failing"]
|
|
1634
|
-
|
|
1635
|
-
return {
|
|
1636
|
-
"metadata": {
|
|
1637
|
-
"timestamp": timestamp,
|
|
1638
|
-
"wiz_project_id": self.wiz_project_id,
|
|
1639
|
-
"framework_id": self.framework_id,
|
|
1640
|
-
"framework_name": self.get_framework_name(self.framework_id),
|
|
1641
|
-
"total_assessments": len(self.all_compliance_items),
|
|
1642
|
-
"pass_count": len(self.all_compliance_items) - len(self.failed_compliance_items),
|
|
1643
|
-
"fail_count": len(self.failed_compliance_items),
|
|
1644
|
-
"unique_controls": len(assessed_controls),
|
|
1645
|
-
"catalog_summary": self._build_catalog_summary(
|
|
1646
|
-
catalog_controls, assessed_controls, passing_control_ids, failing_control_ids
|
|
1647
|
-
),
|
|
1648
|
-
},
|
|
1649
|
-
"framework_mapping": self.framework_mapping,
|
|
1650
|
-
"control_summary": {
|
|
1651
|
-
"passing_controls": list(passing_control_ids),
|
|
1652
|
-
"failing_controls": list(failing_control_ids),
|
|
1653
|
-
"catalog_controls_no_wiz_data": list(catalog_controls - assessed_controls - passing_control_ids),
|
|
1654
|
-
"wiz_controls_outside_catalog": list(assessed_controls - catalog_controls),
|
|
1655
|
-
},
|
|
1656
|
-
"policy_assessments": [],
|
|
1657
|
-
}
|
|
1658
|
-
|
|
1659
|
-
def _build_catalog_summary(
|
|
1660
|
-
self,
|
|
1661
|
-
catalog_controls: set[str],
|
|
1662
|
-
assessed_controls: set[str],
|
|
1663
|
-
passing_control_ids: set[str],
|
|
1664
|
-
failing_control_ids: set[str],
|
|
1665
|
-
) -> Dict[str, int]:
|
|
1666
|
-
"""Build catalog summary statistics."""
|
|
1667
|
-
return {
|
|
1668
|
-
"total_catalog_controls": len(catalog_controls),
|
|
1669
|
-
"catalog_controls_with_wiz_data": len(catalog_controls.intersection(assessed_controls)),
|
|
1670
|
-
"catalog_controls_passing": len(catalog_controls.intersection(passing_control_ids)),
|
|
1671
|
-
"catalog_controls_failing": len(catalog_controls.intersection(failing_control_ids)),
|
|
1672
|
-
"catalog_controls_no_data": len(catalog_controls - assessed_controls - passing_control_ids),
|
|
1673
|
-
"wiz_controls_outside_catalog": len(assessed_controls - catalog_controls),
|
|
1674
|
-
}
|
|
1675
|
-
|
|
1676
|
-
def _add_policy_assessments_to_export(self, export_data: Dict[str, Any]) -> None:
|
|
1677
|
-
"""Add policy assessments to export data."""
|
|
1678
|
-
for compliance_item in self.all_compliance_items:
|
|
1679
|
-
if isinstance(compliance_item, WizComplianceItem):
|
|
1680
|
-
assessment_data = self._build_assessment_data(compliance_item)
|
|
1681
|
-
export_data["policy_assessments"].append(assessment_data)
|
|
1682
|
-
|
|
1683
|
-
def _build_assessment_data(self, compliance_item: WizComplianceItem) -> Dict[str, Any]:
|
|
1684
|
-
"""Build assessment data for a single compliance item."""
|
|
1685
|
-
filtered_policy = self._filter_policy_subcategories(compliance_item)
|
|
1686
|
-
|
|
1687
|
-
return {
|
|
1688
|
-
"id": compliance_item.id,
|
|
1689
|
-
"result": compliance_item.result,
|
|
1690
|
-
"control_id": compliance_item.control_id,
|
|
1691
|
-
"framework_name": compliance_item.framework,
|
|
1692
|
-
"framework_id": compliance_item.framework_id,
|
|
1693
|
-
"policy": filtered_policy or compliance_item.policy,
|
|
1694
|
-
"resource": compliance_item.resource,
|
|
1695
|
-
"output": compliance_item.output,
|
|
1696
|
-
}
|
|
1697
|
-
|
|
1698
|
-
def _filter_policy_subcategories(self, compliance_item: WizComplianceItem) -> Dict[str, Any]:
|
|
1699
|
-
"""Filter policy subcategories to only the selected framework."""
|
|
1700
|
-
filtered_policy = dict(compliance_item.policy) if compliance_item.policy else {}
|
|
1701
|
-
if not filtered_policy:
|
|
1702
|
-
return filtered_policy
|
|
1703
|
-
|
|
1704
|
-
subcats = filtered_policy.get("securitySubCategories", [])
|
|
1705
|
-
if not subcats:
|
|
1706
|
-
return filtered_policy
|
|
1707
|
-
|
|
1708
|
-
target_framework_id = self.framework_id
|
|
1709
|
-
filtered_subcats = [
|
|
1710
|
-
sc for sc in subcats if sc.get("category", {}).get("framework", {}).get("id") == target_framework_id
|
|
1711
|
-
]
|
|
1712
|
-
|
|
1713
|
-
if filtered_subcats:
|
|
1714
|
-
filtered_policy["securitySubCategories"] = filtered_subcats
|
|
1715
|
-
|
|
1716
|
-
return filtered_policy
|
|
1717
|
-
|
|
1718
|
-
def _write_output_files(
|
|
1719
|
-
self, file_path: str, file_path_jsonl: str, export_data: Dict[str, Any], artifacts_dir: str
|
|
1720
|
-
) -> str:
|
|
1721
|
-
"""Write output files and perform cleanup."""
|
|
1722
|
-
try:
|
|
1723
|
-
self._write_json_file(file_path, export_data)
|
|
1724
|
-
self._write_jsonl_file_if_enabled(file_path_jsonl)
|
|
1725
|
-
self._cleanup_artifacts(artifacts_dir, keep=CACHE_CLEANUP_KEEP_COUNT)
|
|
1726
|
-
return file_path
|
|
1727
|
-
except Exception as e:
|
|
1728
|
-
error_and_exit(f"Failed to write policy data to JSON: {str(e)}")
|
|
1729
|
-
|
|
1730
|
-
def _write_json_file(self, file_path: str, export_data: Dict[str, Any]) -> None:
|
|
1731
|
-
"""Write JSON export data to file."""
|
|
1732
|
-
with open(file_path, "w", encoding="utf-8") as f:
|
|
1733
|
-
json.dump(export_data, f, indent=2, ensure_ascii=False)
|
|
1734
|
-
logger.info(f"Policy compliance data written to: {file_path}")
|
|
1735
|
-
|
|
1736
|
-
def _write_jsonl_file_if_enabled(self, file_path_jsonl: str) -> None:
|
|
1737
|
-
"""Write JSONL file if output is enabled."""
|
|
1738
|
-
if getattr(self, "write_jsonl_output", False):
|
|
1739
|
-
control_agg = self._build_control_aggregation()
|
|
1740
|
-
with open(file_path_jsonl, "w", encoding="utf-8") as jf:
|
|
1741
|
-
for control_id, ctrl in control_agg.items():
|
|
1742
|
-
jf.write(json.dumps(ctrl, ensure_ascii=False) + "\n")
|
|
1743
|
-
logger.info(f"Policy compliance JSONL written to: {file_path_jsonl}")
|
|
1744
|
-
|
|
1745
|
-
def _build_control_aggregation(self) -> Dict[str, Dict[str, Any]]:
|
|
1746
|
-
"""
|
|
1747
|
-
Build an aggregated view per control_id for JSONL export.
|
|
1748
|
-
|
|
1749
|
-
Creates a control-centric view with assets affected and policy checks.
|
|
1750
|
-
|
|
1751
|
-
:return: Dictionary mapping control IDs to aggregated data
|
|
1752
|
-
:rtype: Dict[str, Dict[str, Any]]
|
|
1753
|
-
|
|
1754
|
-
{
|
|
1755
|
-
control_id: {
|
|
1756
|
-
"control_id": "AC-2(1)",
|
|
1757
|
-
"framework_id": "wf-id-4",
|
|
1758
|
-
"framework_name": "NIST SP 800-53 Revision 5",
|
|
1759
|
-
"failed": true,
|
|
1760
|
-
"assets_affected": [
|
|
1761
|
-
{
|
|
1762
|
-
"resource_id": "...",
|
|
1763
|
-
"resource_name": "...",
|
|
1764
|
-
"resource_type": "...",
|
|
1765
|
-
"region": "...",
|
|
1766
|
-
"subscription": "...",
|
|
1767
|
-
"checks": [
|
|
1768
|
-
{"title": "Policy name", "result": "FAIL", "remediation": "..."}
|
|
1769
|
-
]
|
|
1770
|
-
}
|
|
1771
|
-
]
|
|
1772
|
-
}
|
|
1773
|
-
}
|
|
1774
|
-
"""
|
|
1775
|
-
control_map: Dict[str, Dict[str, Any]] = {}
|
|
1776
|
-
|
|
1777
|
-
for item in self.all_compliance_items:
|
|
1778
|
-
if not isinstance(item, WizComplianceItem):
|
|
1779
|
-
# Skip non-wiz items in this aggregation
|
|
1780
|
-
continue
|
|
1781
|
-
|
|
1782
|
-
ctrl_id = self._normalize_control_id_string(item.control_id)
|
|
1783
|
-
if not ctrl_id:
|
|
1784
|
-
continue
|
|
1785
|
-
|
|
1786
|
-
ctrl_entry = control_map.get(ctrl_id)
|
|
1787
|
-
if not ctrl_entry:
|
|
1788
|
-
ctrl_entry = {
|
|
1789
|
-
"control_id": ctrl_id,
|
|
1790
|
-
"framework_id": self.framework_id,
|
|
1791
|
-
"framework_name": self.get_framework_name(self.framework_id),
|
|
1792
|
-
"failed": False,
|
|
1793
|
-
"assets_affected": [],
|
|
1794
|
-
}
|
|
1795
|
-
# Track assets in a dict for dedupe while building, convert to list at end
|
|
1796
|
-
ctrl_entry["_assets_idx"] = {}
|
|
1797
|
-
control_map[ctrl_id] = ctrl_entry
|
|
1798
|
-
|
|
1799
|
-
# Determine fail/pass at control level
|
|
1800
|
-
if item.compliance_result in self.FAIL_STATUSES:
|
|
1801
|
-
ctrl_entry["failed"] = True
|
|
1802
|
-
|
|
1803
|
-
# Asset bucket
|
|
1804
|
-
asset_id = item.resource_id
|
|
1805
|
-
assets_idx: Dict[str, Any] = ctrl_entry["_assets_idx"] # type: ignore
|
|
1806
|
-
asset_entry = assets_idx.get(asset_id)
|
|
1807
|
-
if not asset_entry:
|
|
1808
|
-
asset_entry = {
|
|
1809
|
-
"resource_id": item.resource_id,
|
|
1810
|
-
"resource_name": item.resource_name,
|
|
1811
|
-
"resource_type": (item.resource or {}).get("type"),
|
|
1812
|
-
"region": (item.resource or {}).get("region"),
|
|
1813
|
-
"subscription": ((item.resource or {}).get("subscription") or {}).get("name"),
|
|
1814
|
-
"checks": [],
|
|
1815
|
-
}
|
|
1816
|
-
assets_idx[asset_id] = asset_entry
|
|
1817
|
-
|
|
1818
|
-
# Append policy check info
|
|
1819
|
-
policy_name = (item.policy or {}).get("name") or (item.policy or {}).get("hostConfigurationRule", {}).get(
|
|
1820
|
-
"name"
|
|
1821
|
-
)
|
|
1822
|
-
remediation = (item.policy or {}).get("remediationInstructions")
|
|
1823
|
-
if policy_name:
|
|
1824
|
-
# Deduplicate identical checks by title within an asset
|
|
1825
|
-
titles = {c.get("title") for c in asset_entry["checks"]}
|
|
1826
|
-
if policy_name not in titles:
|
|
1827
|
-
check = {
|
|
1828
|
-
"title": policy_name,
|
|
1829
|
-
"result": item.compliance_result,
|
|
1830
|
-
"remediation": remediation,
|
|
1831
|
-
}
|
|
1832
|
-
asset_entry["checks"].append(check)
|
|
1833
|
-
|
|
1834
|
-
# Convert asset index maps to lists for final output
|
|
1835
|
-
for ctrl in control_map.values():
|
|
1836
|
-
assets_idx = ctrl.pop("_assets_idx", {}) # type: ignore
|
|
1837
|
-
ctrl["assets_affected"] = list(assets_idx.values())
|
|
1838
|
-
|
|
1839
|
-
return control_map
|
|
1840
|
-
|
|
1841
|
-
@staticmethod
|
|
1842
|
-
def _normalize_control_id_string(control_id: Optional[str]) -> Optional[str]:
|
|
1843
|
-
"""
|
|
1844
|
-
Normalize control id variants to a canonical form, e.g. 'AC-4(2)'.
|
|
1845
|
-
Accepts 'ac-4 (2)', 'AC-4-2', 'AC-4(2)'. Returns uppercase base with optional '(sub)'.
|
|
1846
|
-
"""
|
|
1847
|
-
if not control_id:
|
|
1848
|
-
return None
|
|
1849
|
-
cid = control_id.strip()
|
|
1850
|
-
# Use precompiled safe regex to avoid catastrophic backtracking on crafted input
|
|
1851
|
-
m = SAFE_CONTROL_ID_RE.match(cid)
|
|
1852
|
-
if not m:
|
|
1853
|
-
return cid.upper()
|
|
1854
|
-
base = m.group(1).upper()
|
|
1855
|
-
# Subcontrol may be captured in group 2, 3, or 4 depending on the branch matched
|
|
1856
|
-
sub = m.group(2) or m.group(3) or m.group(4)
|
|
1857
|
-
return f"{base}({sub})" if sub else base
|
|
1858
|
-
|
|
1859
|
-
@staticmethod
|
|
1860
|
-
def parse_control_jsonl(jsonl_path: str) -> Dict[str, Dict[str, Any]]:
|
|
1861
|
-
"""
|
|
1862
|
-
Parse the aggregated control JSONL back into a dict keyed by control_id.
|
|
1863
|
-
"""
|
|
1864
|
-
aggregated: Dict[str, Dict[str, Any]] = {}
|
|
1865
|
-
try:
|
|
1866
|
-
with open(jsonl_path, "r", encoding="utf-8") as jf:
|
|
1867
|
-
for line in jf:
|
|
1868
|
-
line = line.strip()
|
|
1869
|
-
if not line:
|
|
1870
|
-
continue
|
|
1871
|
-
obj = json.loads(line)
|
|
1872
|
-
ctrl_id = obj.get("control_id")
|
|
1873
|
-
if ctrl_id:
|
|
1874
|
-
aggregated[ctrl_id] = obj
|
|
1875
|
-
except Exception as exc:
|
|
1876
|
-
logger.error(f"Error parsing JSONL {jsonl_path}: {exc}")
|
|
1877
|
-
return aggregated
|
|
1878
|
-
|
|
1879
|
-
def _cleanup_artifacts(self, dir_path: str, keep: int = CACHE_CLEANUP_KEEP_COUNT) -> None:
|
|
1880
|
-
"""
|
|
1881
|
-
Keep the most recent JSON and JSONL policy_compliance_report files, delete older ones.
|
|
1882
|
-
|
|
1883
|
-
:param str dir_path: Directory containing artifacts to clean
|
|
1884
|
-
:param int keep: Number of most recent files per extension to keep
|
|
1885
|
-
:return: None
|
|
1886
|
-
:rtype: None
|
|
1887
|
-
"""
|
|
1888
|
-
try:
|
|
1889
|
-
entries = [
|
|
1890
|
-
(f, os.path.join(dir_path, f))
|
|
1891
|
-
for f in os.listdir(dir_path)
|
|
1892
|
-
if f.startswith("policy_compliance_report_")
|
|
1893
|
-
and (f.endswith(JSON_FILE_EXT) or f.endswith(JSONL_FILE_EXT))
|
|
1894
|
-
]
|
|
1895
|
-
# Group by extension to keep per-type
|
|
1896
|
-
by_ext: Dict[str, List[tuple[str, str]]] = {JSON_FILE_EXT: [], JSONL_FILE_EXT: []}
|
|
1897
|
-
for name, path in entries:
|
|
1898
|
-
ext = JSONL_FILE_EXT if name.endswith(JSONL_FILE_EXT) else JSON_FILE_EXT
|
|
1899
|
-
by_ext[ext].append((name, path))
|
|
1900
|
-
|
|
1901
|
-
for ext, files in by_ext.items():
|
|
1902
|
-
files.sort(key=lambda p: os.path.getmtime(p[1]), reverse=True)
|
|
1903
|
-
for _, old_path in files[keep:]:
|
|
1904
|
-
try:
|
|
1905
|
-
os.remove(old_path)
|
|
1906
|
-
except Exception:
|
|
1907
|
-
# Non-fatal; continue cleanup
|
|
1908
|
-
pass
|
|
1909
|
-
except Exception:
|
|
1910
|
-
pass
|
|
1911
|
-
|
|
1912
|
-
def load_or_create_framework_mapping(self) -> Dict[str, str]:
|
|
1913
|
-
"""
|
|
1914
|
-
Load framework mapping from cache file or create it by fetching from Wiz.
|
|
1915
|
-
|
|
1916
|
-
:return: Framework ID to name mapping dictionary
|
|
1917
|
-
:rtype: Dict[str, str]
|
|
1918
|
-
"""
|
|
1919
|
-
# Check if cache file exists
|
|
1920
|
-
if os.path.exists(self.framework_cache_file):
|
|
1921
|
-
logger.info("Loading framework mapping from cache file")
|
|
1922
|
-
return self._load_framework_mapping_from_cache()
|
|
1923
|
-
|
|
1924
|
-
logger.info("Framework mapping cache not found, fetching from Wiz API")
|
|
1925
|
-
return self._fetch_and_cache_framework_mapping()
|
|
1926
|
-
|
|
1927
|
-
def _load_framework_mapping_from_cache(self) -> Dict[str, str]:
|
|
1928
|
-
"""
|
|
1929
|
-
Load framework mapping from existing JSON cache file.
|
|
1930
|
-
|
|
1931
|
-
:return: Framework ID to name mapping
|
|
1932
|
-
:rtype: Dict[str, str]
|
|
1933
|
-
"""
|
|
1934
|
-
try:
|
|
1935
|
-
with open(self.framework_cache_file, "r", encoding="utf-8") as f:
|
|
1936
|
-
cache_data = json.load(f)
|
|
1937
|
-
|
|
1938
|
-
framework_mapping = cache_data.get("framework_mapping", {})
|
|
1939
|
-
cache_timestamp = cache_data.get("timestamp", "")
|
|
1940
|
-
|
|
1941
|
-
logger.info(f"Loaded {len(framework_mapping)} frameworks from cache (created: {cache_timestamp})")
|
|
1942
|
-
self.framework_mapping = framework_mapping
|
|
1943
|
-
return framework_mapping
|
|
1944
|
-
|
|
1945
|
-
except Exception as e:
|
|
1946
|
-
logger.error(f"Error loading framework mapping from cache: {str(e)}")
|
|
1947
|
-
logger.info("Falling back to fetching fresh framework data")
|
|
1948
|
-
return self._fetch_and_cache_framework_mapping()
|
|
1949
|
-
|
|
1950
|
-
def _fetch_and_cache_framework_mapping(self) -> Dict[str, str]:
|
|
1951
|
-
"""
|
|
1952
|
-
Fetch framework data from Wiz API and cache it to JSON file.
|
|
1953
|
-
|
|
1954
|
-
:return: Framework ID to name mapping
|
|
1955
|
-
:rtype: Dict[str, str]
|
|
1956
|
-
"""
|
|
1957
|
-
frameworks = self._fetch_security_frameworks()
|
|
1958
|
-
framework_mapping = self._create_framework_mapping(frameworks)
|
|
1959
|
-
self._write_framework_mapping_to_json(framework_mapping, frameworks)
|
|
1960
|
-
|
|
1961
|
-
self.framework_mapping = framework_mapping
|
|
1962
|
-
return framework_mapping
|
|
1963
|
-
|
|
1964
|
-
def _fetch_security_frameworks(self) -> List[Dict[str, Any]]:
|
|
1965
|
-
"""
|
|
1966
|
-
Fetch security frameworks from Wiz GraphQL API.
|
|
1967
|
-
|
|
1968
|
-
:return: List of framework data
|
|
1969
|
-
:rtype: List[Dict[str, Any]]
|
|
1970
|
-
"""
|
|
1971
|
-
logger.info("Fetching security frameworks from Wiz...")
|
|
1972
|
-
|
|
1973
|
-
# Authenticate if not already done
|
|
1974
|
-
if not self.access_token:
|
|
1975
|
-
self.authenticate_wiz()
|
|
1976
|
-
|
|
1977
|
-
headers = {
|
|
1978
|
-
"Authorization": f"Bearer {self.access_token}",
|
|
1979
|
-
"Content-Type": "application/json",
|
|
1980
|
-
}
|
|
1981
|
-
|
|
1982
|
-
query_config = {
|
|
1983
|
-
"type": WizVulnerabilityType.CONFIGURATION, # Using existing enum type
|
|
1984
|
-
"query": WIZ_FRAMEWORK_QUERY,
|
|
1985
|
-
"topic_key": "securityFrameworks",
|
|
1986
|
-
"variables": {"first": 200, "filterBy": {}}, # Get all frameworks, no filtering
|
|
1987
|
-
}
|
|
1988
|
-
|
|
1989
|
-
try:
|
|
1990
|
-
# Execute the query using async client with visible progress
|
|
1991
|
-
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
1992
|
-
|
|
1993
|
-
with compliance_job_progress:
|
|
1994
|
-
task = compliance_job_progress.add_task("[#f68d1f]Fetching Wiz security frameworks...", total=1)
|
|
1995
|
-
results = run_async_queries(
|
|
1996
|
-
endpoint=self.wiz_endpoint,
|
|
1997
|
-
headers=headers,
|
|
1998
|
-
query_configs=[query_config],
|
|
1999
|
-
progress_tracker=compliance_job_progress,
|
|
2000
|
-
max_concurrent=1,
|
|
2001
|
-
)
|
|
2002
|
-
compliance_job_progress.update(task, completed=1, advance=1)
|
|
2003
|
-
|
|
2004
|
-
if not results or len(results) == 0:
|
|
2005
|
-
logger.warning("No framework results returned from Wiz")
|
|
2006
|
-
return []
|
|
2007
|
-
|
|
2008
|
-
_, nodes, error = results[0]
|
|
2009
|
-
|
|
2010
|
-
if error:
|
|
2011
|
-
logger.error(f"Error fetching security frameworks: {error}")
|
|
2012
|
-
error_and_exit(f"Error fetching security frameworks: {error}")
|
|
2013
|
-
|
|
2014
|
-
logger.info(f"Successfully fetched {len(nodes)} security frameworks")
|
|
2015
|
-
return nodes
|
|
2016
|
-
|
|
2017
|
-
except Exception as e:
|
|
2018
|
-
error_and_exit(f"Failed to fetch security frameworks: {str(e)}")
|
|
2019
|
-
|
|
2020
|
-
def _create_framework_mapping(self, frameworks: List[Dict[str, Any]]) -> Dict[str, str]:
|
|
2021
|
-
"""
|
|
2022
|
-
Create framework ID to name mapping from framework data.
|
|
2023
|
-
|
|
2024
|
-
:param List[Dict[str, Any]] frameworks: Raw framework data from Wiz API
|
|
2025
|
-
:return: Dictionary mapping framework IDs to human-readable names
|
|
2026
|
-
:rtype: Dict[str, str]
|
|
2027
|
-
"""
|
|
2028
|
-
framework_mapping = {}
|
|
2029
|
-
|
|
2030
|
-
for framework in frameworks:
|
|
2031
|
-
framework_id = framework.get("id")
|
|
2032
|
-
framework_name = framework.get("name")
|
|
2033
|
-
|
|
2034
|
-
if framework_id and framework_name:
|
|
2035
|
-
framework_mapping[framework_id] = framework_name
|
|
2036
|
-
|
|
2037
|
-
logger.info(f"Created mapping for {len(framework_mapping)} frameworks")
|
|
2038
|
-
return framework_mapping
|
|
2039
|
-
|
|
2040
|
-
def _write_framework_mapping_to_json(
|
|
2041
|
-
self, framework_mapping: Dict[str, str], raw_frameworks: List[Dict[str, Any]]
|
|
2042
|
-
) -> None:
|
|
2043
|
-
"""
|
|
2044
|
-
Write framework mapping and raw data to JSON cache file.
|
|
2045
|
-
|
|
2046
|
-
:param Dict[str, str] framework_mapping: Framework ID to name mapping dictionary
|
|
2047
|
-
:param List[Dict[str, Any]] raw_frameworks: Raw framework data from Wiz API
|
|
2048
|
-
:return: None
|
|
2049
|
-
:rtype: None
|
|
2050
|
-
"""
|
|
2051
|
-
# Create artifacts/wiz directory if it doesn't exist
|
|
2052
|
-
artifacts_dir = os.path.dirname(self.framework_cache_file)
|
|
2053
|
-
os.makedirs(artifacts_dir, exist_ok=True)
|
|
2054
|
-
|
|
2055
|
-
# Prepare data for JSON export
|
|
2056
|
-
cache_data = {
|
|
2057
|
-
"metadata": {
|
|
2058
|
-
"timestamp": datetime.now().isoformat(),
|
|
2059
|
-
"total_frameworks": len(framework_mapping),
|
|
2060
|
-
"enabled_frameworks": len([f for f in raw_frameworks if f.get("enabled", False)]),
|
|
2061
|
-
"builtin_frameworks": len([f for f in raw_frameworks if f.get("builtin", False)]),
|
|
2062
|
-
"description": "Cached Wiz security framework mappings",
|
|
2063
|
-
},
|
|
2064
|
-
"framework_mapping": framework_mapping,
|
|
2065
|
-
"raw_frameworks": raw_frameworks,
|
|
2066
|
-
}
|
|
2067
|
-
|
|
2068
|
-
# Write to JSON file
|
|
2069
|
-
try:
|
|
2070
|
-
with open(self.framework_cache_file, "w", encoding="utf-8") as f:
|
|
2071
|
-
json.dump(cache_data, f, indent=2, ensure_ascii=False)
|
|
2072
|
-
|
|
2073
|
-
logger.info(f"Framework mapping cached to: {self.framework_cache_file}")
|
|
2074
|
-
|
|
2075
|
-
except Exception as e:
|
|
2076
|
-
logger.error(f"Failed to write framework mapping to cache: {str(e)}")
|
|
2077
|
-
# Don't exit here - this is not critical to the main functionality
|
|
2078
|
-
|
|
2079
|
-
def get_framework_name(self, framework_id: str) -> str:
|
|
2080
|
-
"""
|
|
2081
|
-
Get framework name by ID from cached mapping.
|
|
2082
|
-
|
|
2083
|
-
:param str framework_id: Framework ID
|
|
2084
|
-
:return: Framework name or ID if not found
|
|
2085
|
-
:rtype: str
|
|
2086
|
-
"""
|
|
2087
|
-
# Load mapping if not already loaded
|
|
2088
|
-
if not self.framework_mapping:
|
|
2089
|
-
self.load_or_create_framework_mapping()
|
|
2090
|
-
|
|
2091
|
-
return self.framework_mapping.get(framework_id, framework_id)
|
|
2092
|
-
|
|
2093
|
-
def sync_compliance(self) -> None:
|
|
2094
|
-
"""
|
|
2095
|
-
Override base sync_compliance to ensure proper order for controlId/assessmentId assignment.
|
|
2096
|
-
|
|
2097
|
-
CRITICAL: Control assessments MUST be created BEFORE issues are processed
|
|
2098
|
-
to ensure controlId and assessmentId can be properly set.
|
|
2099
|
-
"""
|
|
2100
|
-
logger.info(f"Starting {self.title} compliance sync with proper assessment ordering...")
|
|
2101
|
-
|
|
2102
|
-
try:
|
|
2103
|
-
scan_history = self.create_scan_history()
|
|
2104
|
-
self.process_compliance_data()
|
|
2105
|
-
|
|
2106
|
-
# Step 1: Sync assets first
|
|
2107
|
-
self._sync_assets()
|
|
2108
|
-
|
|
2109
|
-
# Step 2: CRITICAL - Pre-populate control implementation cache BEFORE creating assessments
|
|
2110
|
-
logger.info("Pre-populating control implementation cache for issue processing...")
|
|
2111
|
-
self._populate_control_implementation_cache()
|
|
2112
|
-
|
|
2113
|
-
# Step 3: Create control assessments BEFORE issues (ensures assessmentId is available)
|
|
2114
|
-
logger.info("Creating control assessments BEFORE issue processing...")
|
|
2115
|
-
self._sync_control_assessments()
|
|
2116
|
-
|
|
2117
|
-
# Step 3.5: CRITICAL - Refresh assessment cache after assessments are created
|
|
2118
|
-
logger.info("Refreshing assessment cache with newly created assessments...")
|
|
2119
|
-
self._refresh_assessment_cache_after_creation()
|
|
2120
|
-
|
|
2121
|
-
# Step 4: NOW process issues with controlId and assessmentId properly set
|
|
2122
|
-
logger.info("Processing issues with control and assessment IDs available...")
|
|
2123
|
-
self._sync_issues()
|
|
2124
|
-
|
|
2125
|
-
self._finalize_scan_history(scan_history)
|
|
2126
|
-
|
|
2127
|
-
logger.info(f"Completed {self.title} compliance sync with proper assessment ordering")
|
|
2128
|
-
|
|
2129
|
-
except Exception as e:
|
|
2130
|
-
error_and_exit(f"Error during compliance sync: {e}")
|
|
2131
|
-
|
|
2132
|
-
def sync_policy_compliance(self, create_issues: bool = None, update_control_status: bool = None) -> None:
|
|
2133
|
-
"""
|
|
2134
|
-
Main method to sync policy compliance data from Wiz.
|
|
2135
|
-
|
|
2136
|
-
:param bool create_issues: Whether to create issues for failed assessments (uses instance default if None)
|
|
2137
|
-
:param bool update_control_status: Whether to update control implementation status (uses instance default if None)
|
|
2138
|
-
"""
|
|
2139
|
-
logger.info("Starting Wiz policy compliance sync...")
|
|
2140
|
-
|
|
2141
|
-
try:
|
|
2142
|
-
# Use instance defaults if not specified
|
|
2143
|
-
if create_issues is None:
|
|
2144
|
-
create_issues = self.create_issues
|
|
2145
|
-
if update_control_status is None:
|
|
2146
|
-
update_control_status = self.update_control_status
|
|
2147
|
-
|
|
2148
|
-
# Step 1: Authenticate with Wiz
|
|
2149
|
-
self.authenticate_wiz()
|
|
2150
|
-
|
|
2151
|
-
# Step 2: Load or create framework mapping cache
|
|
2152
|
-
self.load_or_create_framework_mapping()
|
|
2153
|
-
|
|
2154
|
-
# Persist flags on the instance for downstream logic
|
|
2155
|
-
if create_issues is not None:
|
|
2156
|
-
self.create_issues = create_issues
|
|
2157
|
-
if update_control_status is not None:
|
|
2158
|
-
self.update_control_status = update_control_status
|
|
2159
|
-
|
|
2160
|
-
# Step 3: Sync using the overridden method (which ensures proper ordering)
|
|
2161
|
-
logger.info(
|
|
2162
|
-
f"Sync parameters: create_issues={self.create_issues}, update_control_status={self.update_control_status}"
|
|
2163
|
-
)
|
|
2164
|
-
|
|
2165
|
-
self.sync_compliance()
|
|
2166
|
-
|
|
2167
|
-
# Step 4: Write data to JSON file for reference (post-processing)
|
|
2168
|
-
json_file = self.write_policy_data_to_json()
|
|
2169
|
-
logger.info(f"Policy compliance data saved to: {json_file}")
|
|
2170
|
-
|
|
2171
|
-
logger.info("Policy compliance sync completed successfully")
|
|
2172
|
-
|
|
2173
|
-
except Exception as e:
|
|
2174
|
-
error_and_exit(f"Policy compliance sync failed: {str(e)}")
|
|
2175
|
-
|
|
2176
|
-
def sync_wiz_compliance(self) -> None:
|
|
2177
|
-
"""
|
|
2178
|
-
Convenience method for backward compatibility.
|
|
2179
|
-
|
|
2180
|
-
:return: None
|
|
2181
|
-
:rtype: None
|
|
2182
|
-
"""
|
|
2183
|
-
self.sync_policy_compliance()
|
|
2184
|
-
|
|
2185
|
-
def is_poam(self, finding: IntegrationFinding) -> bool: # type: ignore[override]
|
|
2186
|
-
"""
|
|
2187
|
-
Determine if an issue should be a POAM.
|
|
2188
|
-
|
|
2189
|
-
If the CLI flag `--create-poams/-cp` was provided (mapped to `self.create_poams`),
|
|
2190
|
-
force POAM for all created/updated issues. Otherwise, fall back to the default
|
|
2191
|
-
scanner behavior.
|
|
2192
|
-
"""
|
|
2193
|
-
try:
|
|
2194
|
-
if getattr(self, "create_poams", False):
|
|
2195
|
-
return True
|
|
2196
|
-
except Exception:
|
|
2197
|
-
pass
|
|
2198
|
-
return super().is_poam(finding)
|
|
2199
|
-
|
|
2200
|
-
def create_or_update_issue_from_finding(
|
|
2201
|
-
self,
|
|
2202
|
-
title: str,
|
|
2203
|
-
finding: IntegrationFinding,
|
|
2204
|
-
) -> regscale_models.Issue:
|
|
2205
|
-
"""
|
|
2206
|
-
Create/update the issue with ALL fields set BEFORE saving.
|
|
2207
|
-
|
|
2208
|
-
This method ensures proper data flow:
|
|
2209
|
-
1. Check for existing issues to prevent duplicates
|
|
2210
|
-
2. Pre-populate compliance fields on the finding
|
|
2211
|
-
3. Use parent class logic which saves with all fields set
|
|
2212
|
-
|
|
2213
|
-
This fixes the duplicate issue creation problem by using proper
|
|
2214
|
-
duplicate detection and avoids double-saving.
|
|
2215
|
-
"""
|
|
2216
|
-
# Load cache if not already loaded for duplicate detection
|
|
2217
|
-
self._load_existing_records_cache()
|
|
2218
|
-
|
|
2219
|
-
# CRITICAL: Pre-populate compliance fields on the finding BEFORE parent call
|
|
2220
|
-
# This ensures the parent class saves the issue with all fields already set
|
|
2221
|
-
self._populate_compliance_fields_on_finding(finding)
|
|
2222
|
-
|
|
2223
|
-
# CRITICAL FIX: If assessment_id is set, prepare the finding for assessment parenting
|
|
2224
|
-
if hasattr(finding, "assessment_id") and finding.assessment_id:
|
|
2225
|
-
assessment_id = finding.assessment_id
|
|
2226
|
-
logger.debug(f"PRE-SETTING ASSESSMENT PARENT: assessmentId={assessment_id}")
|
|
2227
|
-
|
|
2228
|
-
# Add parent override fields to the finding for the ScannerIntegration to use
|
|
2229
|
-
finding._override_parent_id = assessment_id
|
|
2230
|
-
finding._override_parent_module = "assessments"
|
|
2231
|
-
|
|
2232
|
-
logger.debug(f" ✅ Finding will use parent: assessments #{assessment_id}")
|
|
2233
|
-
|
|
2234
|
-
# Check for existing issue by external_id first
|
|
2235
|
-
external_id = finding.external_id
|
|
2236
|
-
existing_issue = self._find_existing_issue_cached(external_id)
|
|
2237
|
-
|
|
2238
|
-
if existing_issue:
|
|
2239
|
-
return self._update_existing_issue_with_compliance_fields(existing_issue, title, finding)
|
|
2240
|
-
else:
|
|
2241
|
-
# Set finding context for our override method to access
|
|
2242
|
-
self._current_finding_context = finding
|
|
2243
|
-
try:
|
|
2244
|
-
# Parent class will now create/save the issue with compliance fields already set
|
|
2245
|
-
return super().create_or_update_issue_from_finding(title, finding)
|
|
2246
|
-
finally:
|
|
2247
|
-
# Clean up context
|
|
2248
|
-
if hasattr(self, "_current_finding_context"):
|
|
2249
|
-
delattr(self, "_current_finding_context")
|
|
2250
|
-
|
|
2251
|
-
def _update_existing_issue_with_compliance_fields(
|
|
2252
|
-
self, existing_issue: regscale_models.Issue, title: str, finding: IntegrationFinding
|
|
2253
|
-
) -> regscale_models.Issue:
|
|
2254
|
-
"""
|
|
2255
|
-
Update existing issue with basic fields and enhance with compliance-specific fields.
|
|
2256
|
-
|
|
2257
|
-
:param existing_issue: The existing issue to update
|
|
2258
|
-
:param title: New issue title
|
|
2259
|
-
:param finding: Finding with updated data
|
|
2260
|
-
:return: Updated issue with all fields set
|
|
2261
|
-
"""
|
|
2262
|
-
|
|
2263
|
-
# Update basic fields (similar to parent class logic)
|
|
2264
|
-
existing_issue.title = title
|
|
2265
|
-
existing_issue.description = finding.description
|
|
2266
|
-
existing_issue.severityLevel = finding.severity
|
|
2267
|
-
existing_issue.status = finding.status
|
|
2268
|
-
existing_issue.dateLastUpdated = self.scan_date
|
|
2269
|
-
|
|
2270
|
-
# Set control-related field
|
|
2271
|
-
if getattr(finding, "control_labels", None):
|
|
2272
|
-
existing_issue.affectedControls = ",".join(finding.control_labels)
|
|
2273
|
-
elif getattr(finding, "affected_controls", None):
|
|
2274
|
-
existing_issue.affectedControls = finding.affected_controls
|
|
2275
|
-
|
|
2276
|
-
# Enhance with compliance-specific fields
|
|
2277
|
-
self._enhance_issue_with_compliance_fields(existing_issue, finding)
|
|
2278
|
-
|
|
2279
|
-
# CRITICAL FIX: Handle assessment parenting for existing issues too
|
|
2280
|
-
if hasattr(finding, "assessment_id") and finding.assessment_id:
|
|
2281
|
-
assessment_id = finding.assessment_id
|
|
2282
|
-
|
|
2283
|
-
# Set assessment as the parent
|
|
2284
|
-
existing_issue.parentId = assessment_id
|
|
2285
|
-
existing_issue.parentModule = "assessments"
|
|
2286
|
-
existing_issue.assessmentId = assessment_id
|
|
2287
|
-
|
|
2288
|
-
existing_issue.save()
|
|
2289
|
-
|
|
2290
|
-
return existing_issue
|
|
2291
|
-
|
|
2292
|
-
def _create_or_update_issue(
|
|
2293
|
-
self,
|
|
2294
|
-
finding: IntegrationFinding,
|
|
2295
|
-
issue_status,
|
|
2296
|
-
title: str,
|
|
2297
|
-
existing_issue=None,
|
|
2298
|
-
):
|
|
2299
|
-
"""
|
|
2300
|
-
Override parent method to handle assessment parenting correctly.
|
|
2301
|
-
|
|
2302
|
-
CRITICAL FIX: Check if the finding has assessment parent overrides and apply them.
|
|
2303
|
-
"""
|
|
2304
|
-
asset_identifier = self.get_consolidated_asset_identifier(finding, existing_issue)
|
|
2305
|
-
issue_data = self._prepare_issue_data(finding, title)
|
|
2306
|
-
|
|
2307
|
-
if existing_issue:
|
|
2308
|
-
logger.debug(
|
|
2309
|
-
"Updating existing issue %s with assetIdentifier %s", existing_issue.id, finding.asset_identifier
|
|
2310
|
-
)
|
|
2311
|
-
|
|
2312
|
-
issue = existing_issue or regscale_models.Issue()
|
|
2313
|
-
parent_info = self._get_parent_info(finding)
|
|
2314
|
-
|
|
2315
|
-
self._set_basic_issue_properties(issue, finding, issue_status, issue_data, parent_info, asset_identifier)
|
|
2316
|
-
self._set_compliance_properties(issue, finding)
|
|
2317
|
-
self._set_additional_properties(issue, finding, issue_data)
|
|
2318
|
-
|
|
2319
|
-
if finding.cve:
|
|
2320
|
-
issue = self.lookup_kev_and_update_issue(cve=finding.cve, issue=issue, cisa_kevs=self._kev_data)
|
|
2321
|
-
|
|
2322
|
-
issue = self._save_or_create_issue_record(issue, finding, existing_issue, issue_data["is_poam"])
|
|
2323
|
-
|
|
2324
|
-
if issue and issue.id:
|
|
2325
|
-
self._handle_post_creation_tasks(issue, finding, existing_issue)
|
|
2326
|
-
else:
|
|
2327
|
-
logger.debug("Skipping milestone creation - issue has no ID")
|
|
2328
|
-
|
|
2329
|
-
return issue
|
|
2330
|
-
|
|
2331
|
-
def _prepare_issue_data(self, finding: IntegrationFinding, title: str) -> Dict[str, Any]:
|
|
2332
|
-
"""Prepare basic issue data from finding."""
|
|
2333
|
-
return {
|
|
2334
|
-
"issue_title": self.get_issue_title(finding) or title,
|
|
2335
|
-
"description": finding.description or "",
|
|
2336
|
-
"remediation_description": finding.recommendation_for_mitigation or finding.remediation or "",
|
|
2337
|
-
"is_poam": self.is_poam(finding),
|
|
2338
|
-
}
|
|
2339
|
-
|
|
2340
|
-
def _get_parent_info(self, finding: IntegrationFinding) -> Dict[str, Any]:
|
|
2341
|
-
"""Get parent information for the issue."""
|
|
2342
|
-
if hasattr(finding, "_override_parent_id") and hasattr(finding, "_override_parent_module"):
|
|
2343
|
-
parent_id = finding._override_parent_id
|
|
2344
|
-
parent_module = finding._override_parent_module
|
|
2345
|
-
logger.debug(f"USING OVERRIDE PARENT: {parent_module} #{parent_id}")
|
|
2346
|
-
else:
|
|
2347
|
-
parent_id = self.plan_id
|
|
2348
|
-
parent_module = self.parent_module
|
|
2349
|
-
|
|
2350
|
-
return {"parent_id": parent_id, "parent_module": parent_module}
|
|
2351
|
-
|
|
2352
|
-
def _set_basic_issue_properties(
|
|
2353
|
-
self,
|
|
2354
|
-
issue: regscale_models.Issue,
|
|
2355
|
-
finding: IntegrationFinding,
|
|
2356
|
-
issue_status,
|
|
2357
|
-
issue_data: Dict[str, Any],
|
|
2358
|
-
parent_info: Dict[str, Any],
|
|
2359
|
-
asset_identifier: str,
|
|
2360
|
-
) -> None:
|
|
2361
|
-
"""Set basic properties on the issue."""
|
|
2362
|
-
issue.parentId = parent_info["parent_id"]
|
|
2363
|
-
issue.parentModule = parent_info["parent_module"]
|
|
2364
|
-
issue.vulnerabilityId = finding.vulnerability_id
|
|
2365
|
-
issue.title = issue_data["issue_title"]
|
|
2366
|
-
issue.dateCreated = finding.date_created
|
|
2367
|
-
issue.status = issue_status
|
|
2368
|
-
issue.dateCompleted = (
|
|
2369
|
-
self.get_date_completed(finding, issue_status)
|
|
2370
|
-
if issue_status == regscale_models.IssueStatus.Closed
|
|
2371
|
-
else None
|
|
2372
|
-
)
|
|
2373
|
-
issue.severityLevel = finding.severity
|
|
2374
|
-
issue.issueOwnerId = self.assessor_id
|
|
2375
|
-
issue.securityPlanId = self.plan_id if not self.is_component else None
|
|
2376
|
-
issue.identification = finding.identification
|
|
2377
|
-
issue.dateFirstDetected = finding.first_seen
|
|
2378
|
-
issue.assetIdentifier = asset_identifier
|
|
2379
|
-
|
|
2380
|
-
# Ensure due date is set
|
|
2381
|
-
self._set_issue_due_date(issue, finding)
|
|
2382
|
-
|
|
2383
|
-
def _set_compliance_properties(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
2384
|
-
"""Set compliance-specific properties."""
|
|
2385
|
-
issue.assessmentId = finding.assessment_id
|
|
2386
|
-
logger.debug(f"SETTING assessmentId = {finding.assessment_id}")
|
|
2387
|
-
|
|
2388
|
-
control_id = self.get_control_implementation_id_for_cci(finding.cci_ref) if finding.cci_ref else None
|
|
2389
|
-
issue.controlId = control_id
|
|
2390
|
-
|
|
2391
|
-
cci_control_ids = [control_id] if control_id is not None else []
|
|
2392
|
-
if finding.affected_controls:
|
|
2393
|
-
issue.affectedControls = finding.affected_controls
|
|
2394
|
-
elif finding.control_labels:
|
|
2395
|
-
issue.affectedControls = ", ".join(sorted({cl for cl in finding.control_labels if cl}))
|
|
2396
|
-
|
|
2397
|
-
issue.controlImplementationIds = list(set(finding._control_implementation_ids + cci_control_ids))
|
|
2398
|
-
|
|
2399
|
-
def _set_additional_properties(
|
|
2400
|
-
self, issue: regscale_models.Issue, finding: IntegrationFinding, issue_data: Dict[str, Any]
|
|
2401
|
-
) -> None:
|
|
2402
|
-
"""Set additional issue properties."""
|
|
2403
|
-
issue.description = issue_data["description"]
|
|
2404
|
-
issue.sourceReport = finding.source_report or self.title
|
|
2405
|
-
issue.recommendedActions = finding.recommendation_for_mitigation
|
|
2406
|
-
issue.securityChecks = finding.security_check or finding.external_id
|
|
2407
|
-
issue.remediationDescription = issue_data["remediation_description"]
|
|
2408
|
-
issue.integrationFindingId = self.get_finding_identifier(finding)
|
|
2409
|
-
issue.poamComments = finding.poam_comments
|
|
2410
|
-
issue.cve = finding.cve
|
|
2411
|
-
issue.isPoam = issue_data["is_poam"]
|
|
2412
|
-
issue.basisForAdjustment = (
|
|
2413
|
-
finding.basis_for_adjustment if finding.basis_for_adjustment else f"{self.title} import"
|
|
2414
|
-
)
|
|
2415
|
-
issue.pluginId = finding.plugin_id
|
|
2416
|
-
issue.originalRiskRating = regscale_models.Issue.assign_risk_rating(finding.severity)
|
|
2417
|
-
issue.changes = "<p>Current: {}</p><p>Planned: {}</p>".format(
|
|
2418
|
-
finding.milestone_changes, finding.planned_milestone_changes
|
|
2419
|
-
)
|
|
2420
|
-
issue.adjustedRiskRating = finding.adjusted_risk_rating
|
|
2421
|
-
issue.riskAdjustment = finding.risk_adjustment
|
|
2422
|
-
issue.operationalRequirement = finding.operational_requirements
|
|
2423
|
-
issue.deviationRationale = finding.deviation_rationale
|
|
2424
|
-
issue.dateLastUpdated = get_current_datetime()
|
|
2425
|
-
issue.affectedControls = finding.affected_controls
|
|
2426
|
-
|
|
2427
|
-
def _save_or_create_issue_record(
|
|
2428
|
-
self, issue: regscale_models.Issue, finding: IntegrationFinding, existing_issue, is_poam: bool
|
|
2429
|
-
) -> regscale_models.Issue:
|
|
2430
|
-
"""Save or create the issue record."""
|
|
2431
|
-
if existing_issue:
|
|
2432
|
-
logger.debug(f"Saving existing issue {issue.id} with assessmentId={issue.assessmentId}")
|
|
2433
|
-
issue.save(bulk=True)
|
|
2434
|
-
else:
|
|
2435
|
-
logger.info(f"Creating new issue with assessmentId={issue.assessmentId}")
|
|
2436
|
-
issue = issue.create_or_update(
|
|
2437
|
-
bulk_update=True, defaults={"otherIdentifier": self._get_other_identifier(finding, is_poam)}
|
|
2438
|
-
)
|
|
2439
|
-
if issue and issue.id:
|
|
2440
|
-
logger.debug(f"Issue created with ID: {issue.id}")
|
|
2441
|
-
self.extra_data_to_properties(finding, issue.id)
|
|
2442
|
-
else:
|
|
2443
|
-
logger.error(f"Issue creation failed - no ID returned for finding {finding.external_id}")
|
|
2444
|
-
return None
|
|
2445
|
-
return issue
|
|
2446
|
-
|
|
2447
|
-
def _handle_post_creation_tasks(
|
|
2448
|
-
self, issue: regscale_models.Issue, finding: IntegrationFinding, existing_issue
|
|
2449
|
-
) -> None:
|
|
2450
|
-
"""Handle tasks after issue creation/update."""
|
|
2451
|
-
if existing_issue and ScannerVariables.useMilestones:
|
|
2452
|
-
self._ensure_issue_has_milestone(issue, finding)
|
|
2453
|
-
|
|
2454
|
-
self._handle_property_and_milestone_creation(issue, finding, existing_issue)
|
|
2455
|
-
|
|
2456
|
-
def _populate_compliance_fields_on_finding(self, finding: IntegrationFinding) -> None:
|
|
2457
|
-
"""
|
|
2458
|
-
Pre-populate compliance-specific fields on the finding before issue creation.
|
|
2459
|
-
|
|
2460
|
-
This ensures controlId and assessmentId are set on the finding object
|
|
2461
|
-
so the parent class can save the issue with all fields in one operation.
|
|
2462
|
-
|
|
2463
|
-
The parent class expects:
|
|
2464
|
-
- finding.assessment_id -> issue.assessmentId
|
|
2465
|
-
- finding.cci_ref -> calls get_control_implementation_id_for_cci() -> issue.controlId
|
|
2466
|
-
|
|
2467
|
-
:param finding: Finding to populate with compliance fields
|
|
2468
|
-
"""
|
|
2469
|
-
try:
|
|
2470
|
-
# Set compliance fields on the finding itself before issue creation
|
|
2471
|
-
if hasattr(finding, "rule_id") and finding.rule_id:
|
|
2472
|
-
control_id = self._normalize_control_id_string(finding.rule_id)
|
|
2473
|
-
if control_id:
|
|
2474
|
-
# Get control implementation ID
|
|
2475
|
-
impl_id = self._issue_field_setter._get_or_find_implementation_id(control_id)
|
|
2476
|
-
if impl_id:
|
|
2477
|
-
# Store the control ID as cci_ref so parent class calls our override method
|
|
2478
|
-
finding.cci_ref = control_id
|
|
2479
|
-
# Cache the implementation ID for our override method
|
|
2480
|
-
finding._wiz_control_implementation_id = impl_id
|
|
2481
|
-
|
|
2482
|
-
# Get assessment ID and set it on the finding (parent class uses this directly)
|
|
2483
|
-
assess_id = self._issue_field_setter._get_or_find_assessment_id(impl_id)
|
|
2484
|
-
if assess_id:
|
|
2485
|
-
finding.assessment_id = assess_id
|
|
2486
|
-
except Exception:
|
|
2487
|
-
pass
|
|
2488
|
-
|
|
2489
|
-
def _ensure_issue_has_milestone(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
2490
|
-
"""
|
|
2491
|
-
Ensure that an existing issue has at least one milestone.
|
|
2492
|
-
|
|
2493
|
-
This method checks if an existing issue has any milestones, and if not,
|
|
2494
|
-
creates an initial "Issue created" milestone. This handles cases where
|
|
2495
|
-
issues were created before milestone tracking was enabled, or were
|
|
2496
|
-
created through other means without milestones.
|
|
2497
|
-
|
|
2498
|
-
:param issue: The existing issue to check for milestones
|
|
2499
|
-
:param finding: The finding data
|
|
2500
|
-
:return: None
|
|
2501
|
-
"""
|
|
2502
|
-
try:
|
|
2503
|
-
# Check if the issue already has milestones
|
|
2504
|
-
# We need to make a direct API call because the Milestone model's endpoint configuration
|
|
2505
|
-
# doesn't include the module parameter that the API expects
|
|
2506
|
-
from regscale.models.regscale_models.milestone import Milestone
|
|
2507
|
-
|
|
2508
|
-
try:
|
|
2509
|
-
existing_milestones = Milestone.get_all_by_parent(parent_id=issue.id, parent_module="issues")
|
|
2510
|
-
logger.debug(f"Fetched {len(existing_milestones)} existing milestones for issue {issue.id}")
|
|
2511
|
-
except Exception as api_error:
|
|
2512
|
-
# If the API call fails, log it and assume no milestones exist
|
|
2513
|
-
logger.debug(f"Could not fetch existing milestones for issue {issue.id}: {api_error}")
|
|
2514
|
-
existing_milestones = []
|
|
2515
|
-
|
|
2516
|
-
if not existing_milestones:
|
|
2517
|
-
# Create an initial milestone for the existing issue
|
|
2518
|
-
logger.debug(f"Creating initial milestone for existing issue {issue.id} that had no milestones")
|
|
2519
|
-
|
|
2520
|
-
# Use the issue's dateCreated if available, otherwise use current date
|
|
2521
|
-
if hasattr(issue, "dateCreated") and issue.dateCreated:
|
|
2522
|
-
# Convert to string if it's a datetime object (e.g., in tests)
|
|
2523
|
-
if hasattr(issue.dateCreated, "isoformat"):
|
|
2524
|
-
milestone_date = issue.dateCreated.isoformat()
|
|
2525
|
-
else:
|
|
2526
|
-
milestone_date = issue.dateCreated
|
|
2527
|
-
else:
|
|
2528
|
-
milestone_date = get_current_datetime()
|
|
2529
|
-
|
|
2530
|
-
regscale_models.Milestone(
|
|
2531
|
-
title=f"Issue created by {self.title}",
|
|
2532
|
-
milestoneDate=milestone_date,
|
|
2533
|
-
responsiblePersonId=self.assessor_id,
|
|
2534
|
-
parentID=issue.id,
|
|
2535
|
-
parentModule=regscale_models.Issue.get_module_slug(),
|
|
2536
|
-
).create()
|
|
2537
|
-
|
|
2538
|
-
logger.debug(f"Created initial milestone for existing issue {issue.id}")
|
|
2539
|
-
except Exception as e:
|
|
2540
|
-
logger.warning(f"Could not check/create milestone for issue {issue.id}: {e}")
|
|
2541
|
-
|
|
2542
|
-
def _enhance_issue_with_compliance_fields(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
2543
|
-
"""
|
|
2544
|
-
Enhance an issue with compliance-specific fields (controlId and assessmentId).
|
|
2545
|
-
|
|
2546
|
-
NOTE: This method is now primarily for the existing issue update path.
|
|
2547
|
-
New issues should have fields set via _populate_compliance_fields_on_finding.
|
|
2548
|
-
|
|
2549
|
-
:param issue: Issue object to enhance
|
|
2550
|
-
:param finding: Finding with control data
|
|
2551
|
-
"""
|
|
2552
|
-
try:
|
|
2553
|
-
# Set control implementation and assessment IDs using our field setter
|
|
2554
|
-
if hasattr(finding, "rule_id") and finding.rule_id:
|
|
2555
|
-
control_id = self._normalize_control_id_string(finding.rule_id)
|
|
2556
|
-
if control_id:
|
|
2557
|
-
result = self._issue_field_setter.set_control_and_assessment_ids(issue, control_id)
|
|
2558
|
-
if not result.success:
|
|
2559
|
-
logger.warning(f"Failed to set compliance fields for '{control_id}': {result.error_message}")
|
|
2560
|
-
except Exception:
|
|
2561
|
-
pass
|
|
2562
|
-
|
|
2563
|
-
def get_control_implementation_id_for_cci(self, cci: Optional[str]) -> Optional[int]:
|
|
2564
|
-
"""
|
|
2565
|
-
Override parent method to return control implementation ID for Wiz control IDs.
|
|
2566
|
-
|
|
2567
|
-
The parent class calls this method when finding.cci_ref is set, and uses the
|
|
2568
|
-
returned value to set issue.controlId. We store our control implementation
|
|
2569
|
-
ID on the finding and return it here.
|
|
2570
|
-
|
|
2571
|
-
:param cci: Control identifier (e.g., 'AC-2(1)') stored in finding.cci_ref
|
|
2572
|
-
:return: Control implementation ID if found, None otherwise
|
|
2573
|
-
"""
|
|
2574
|
-
# Check if this is a call with our cached implementation ID on the current finding
|
|
2575
|
-
if hasattr(self, "_current_finding_context"):
|
|
2576
|
-
finding = self._current_finding_context
|
|
2577
|
-
if (
|
|
2578
|
-
hasattr(finding, "_wiz_control_implementation_id")
|
|
2579
|
-
and hasattr(finding, "cci_ref")
|
|
2580
|
-
and finding.cci_ref == cci
|
|
2581
|
-
):
|
|
2582
|
-
impl_id = finding._wiz_control_implementation_id
|
|
2583
|
-
return impl_id
|
|
2584
|
-
|
|
2585
|
-
# Fallback: try to look it up directly (for edge cases)
|
|
2586
|
-
if cci:
|
|
2587
|
-
control_id = self._normalize_control_id_string(cci)
|
|
2588
|
-
if control_id:
|
|
2589
|
-
impl_id = self._issue_field_setter._get_or_find_implementation_id(control_id)
|
|
2590
|
-
if impl_id:
|
|
2591
|
-
return impl_id
|
|
2592
|
-
|
|
2593
|
-
# Final fallback to parent class behavior
|
|
2594
|
-
return super().get_control_implementation_id_for_cci(cci)
|
|
2595
|
-
|
|
2596
|
-
def _populate_control_implementation_cache(self) -> None:
|
|
2597
|
-
"""
|
|
2598
|
-
Pre-populate the control implementation and assessment caches.
|
|
2599
|
-
|
|
2600
|
-
CRITICAL: This ensures controlId and assessmentId can be reliably set on issues.
|
|
2601
|
-
This method loads control implementations and their associated assessments into
|
|
2602
|
-
cache to enable fast lookups during issue processing.
|
|
2603
|
-
|
|
2604
|
-
:return: None
|
|
2605
|
-
:rtype: None
|
|
2606
|
-
"""
|
|
2607
|
-
try:
|
|
2608
|
-
from regscale.models import regscale_models
|
|
2609
|
-
|
|
2610
|
-
logger.info("Pre-populating control implementation cache for issue processing...")
|
|
2611
|
-
|
|
2612
|
-
# Get all control implementations for this plan
|
|
2613
|
-
implementations = regscale_models.ControlImplementation.get_all_by_parent(
|
|
2614
|
-
parent_id=self.plan_id, parent_module=self.parent_module
|
|
2615
|
-
)
|
|
2616
|
-
|
|
2617
|
-
if not implementations:
|
|
2618
|
-
logger.warning("No control implementations found for this plan")
|
|
2619
|
-
return
|
|
2620
|
-
|
|
2621
|
-
logger.info(f"Found {len(implementations)} control implementations to cache")
|
|
2622
|
-
|
|
2623
|
-
# Cache SecurityControl lookups to avoid repeated API calls
|
|
2624
|
-
security_control_cache = {}
|
|
2625
|
-
controls_mapped = 0
|
|
2626
|
-
assessments_mapped = 0
|
|
2627
|
-
|
|
2628
|
-
for impl in implementations:
|
|
2629
|
-
try:
|
|
2630
|
-
# Skip if no controlID reference
|
|
2631
|
-
if not hasattr(impl, "controlID") or not impl.controlID:
|
|
2632
|
-
continue
|
|
2633
|
-
|
|
2634
|
-
# Get or cache the security control
|
|
2635
|
-
if impl.controlID not in security_control_cache:
|
|
2636
|
-
security_control = regscale_models.SecurityControl.get_object(object_id=impl.controlID)
|
|
2637
|
-
security_control_cache[impl.controlID] = security_control
|
|
2638
|
-
else:
|
|
2639
|
-
security_control = security_control_cache[impl.controlID]
|
|
2640
|
-
|
|
2641
|
-
if security_control and hasattr(security_control, "controlId"):
|
|
2642
|
-
# Normalize and cache the control ID mapping
|
|
2643
|
-
normalized_id = self._normalize_control_id_string(security_control.controlId)
|
|
2644
|
-
if normalized_id:
|
|
2645
|
-
self._impl_id_by_control[normalized_id] = impl.id
|
|
2646
|
-
controls_mapped += 1
|
|
2647
|
-
|
|
2648
|
-
# Also try to cache the most recent assessment
|
|
2649
|
-
try:
|
|
2650
|
-
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2651
|
-
parent_id=impl.id, parent_module="controls"
|
|
2652
|
-
)
|
|
2653
|
-
if assessments:
|
|
2654
|
-
# Get the most recent assessment
|
|
2655
|
-
assessments.sort(key=lambda a: a.id if hasattr(a, "id") else 0, reverse=True)
|
|
2656
|
-
self._assessment_by_impl_today[impl.id] = assessments[0]
|
|
2657
|
-
assessments_mapped += 1
|
|
2658
|
-
except Exception:
|
|
2659
|
-
pass
|
|
2660
|
-
|
|
2661
|
-
except Exception:
|
|
2662
|
-
continue
|
|
2663
|
-
|
|
2664
|
-
logger.info("Control implementation cache populated:")
|
|
2665
|
-
logger.info(f" - {controls_mapped} control ID mappings")
|
|
2666
|
-
logger.info(f" - {assessments_mapped} assessment mappings")
|
|
2667
|
-
|
|
2668
|
-
except Exception as e:
|
|
2669
|
-
logger.error(f"Error populating control implementation cache: {e}")
|
|
2670
|
-
|
|
2671
|
-
def _refresh_assessment_cache_after_creation(self) -> None:
|
|
2672
|
-
"""
|
|
2673
|
-
Refresh the assessment cache after control assessments have been created.
|
|
2674
|
-
|
|
2675
|
-
CRITICAL: This ensures that newly created assessments from the sync_control_assessments
|
|
2676
|
-
step are available when processing issues. Without this, assessmentId will not be set
|
|
2677
|
-
on issues because the cache only contains old assessments.
|
|
2678
|
-
|
|
2679
|
-
:return: None
|
|
2680
|
-
:rtype: None
|
|
2681
|
-
"""
|
|
2682
|
-
try:
|
|
2683
|
-
from regscale.models import regscale_models
|
|
2684
|
-
from datetime import datetime
|
|
2685
|
-
|
|
2686
|
-
logger.info("Refreshing assessment cache with newly created assessments...")
|
|
2687
|
-
|
|
2688
|
-
refreshed_count = 0
|
|
2689
|
-
today = datetime.now().date()
|
|
2690
|
-
|
|
2691
|
-
# Only refresh assessments for implementations we know about
|
|
2692
|
-
for control_id, impl_id in self._impl_id_by_control.items():
|
|
2693
|
-
try:
|
|
2694
|
-
# Get all assessments for this implementation
|
|
2695
|
-
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2696
|
-
parent_id=impl_id, parent_module="controls"
|
|
2697
|
-
)
|
|
2698
|
-
|
|
2699
|
-
if not assessments:
|
|
2700
|
-
continue
|
|
2701
|
-
|
|
2702
|
-
# Find today's assessment (most recent created today)
|
|
2703
|
-
today_assessments = []
|
|
2704
|
-
for assessment in assessments:
|
|
2705
|
-
assessment_date = None
|
|
2706
|
-
try:
|
|
2707
|
-
# Try to get assessment date from various fields
|
|
2708
|
-
date_fields = ["actualFinish", "plannedFinish", "dateCreated"]
|
|
2709
|
-
for field in date_fields:
|
|
2710
|
-
if hasattr(assessment, field) and getattr(assessment, field):
|
|
2711
|
-
date_value = getattr(assessment, field)
|
|
2712
|
-
if isinstance(date_value, str):
|
|
2713
|
-
from regscale.core.app.utils.app_utils import regscale_string_to_datetime
|
|
2714
|
-
|
|
2715
|
-
assessment_date = regscale_string_to_datetime(date_value).date()
|
|
2716
|
-
elif hasattr(date_value, "date"):
|
|
2717
|
-
assessment_date = date_value.date()
|
|
2718
|
-
else:
|
|
2719
|
-
assessment_date = date_value
|
|
2720
|
-
break
|
|
2721
|
-
|
|
2722
|
-
if assessment_date == today:
|
|
2723
|
-
today_assessments.append(assessment)
|
|
2724
|
-
except Exception:
|
|
2725
|
-
continue
|
|
2726
|
-
|
|
2727
|
-
# Use most recent today's assessment, or fallback to most recent overall
|
|
2728
|
-
if today_assessments:
|
|
2729
|
-
best_assessment = max(today_assessments, key=lambda a: getattr(a, "id", 0))
|
|
2730
|
-
else:
|
|
2731
|
-
best_assessment = max(assessments, key=lambda a: getattr(a, "id", 0))
|
|
2732
|
-
|
|
2733
|
-
# Update the cache
|
|
2734
|
-
self._assessment_by_impl_today[impl_id] = best_assessment
|
|
2735
|
-
refreshed_count += 1
|
|
2736
|
-
|
|
2737
|
-
except Exception:
|
|
2738
|
-
continue
|
|
2739
|
-
|
|
2740
|
-
logger.info(f"Assessment cache refreshed: {refreshed_count} assessments updated")
|
|
2741
|
-
|
|
2742
|
-
except Exception as e:
|
|
2743
|
-
logger.error(f"Error refreshing assessment cache: {e}")
|
|
2744
|
-
|
|
2745
|
-
def _find_control_implementation_id(self, control_id: str) -> Optional[int]:
|
|
2746
|
-
"""
|
|
2747
|
-
Find control implementation ID by querying the database directly.
|
|
2748
|
-
OPTIMIZED: Uses controlID field directly and caches SecurityControl lookups.
|
|
2749
|
-
|
|
2750
|
-
:param str control_id: Normalized control ID (e.g., 'AC-2(1)')
|
|
2751
|
-
:return: Control implementation ID if found
|
|
2752
|
-
:rtype: Optional[int]
|
|
2753
|
-
"""
|
|
2754
|
-
try:
|
|
2755
|
-
from regscale.models import regscale_models
|
|
2756
|
-
|
|
2757
|
-
# First check cache
|
|
2758
|
-
if hasattr(self, "_impl_id_by_control") and control_id in self._impl_id_by_control:
|
|
2759
|
-
cached_id = self._impl_id_by_control[control_id]
|
|
2760
|
-
return cached_id
|
|
2761
|
-
|
|
2762
|
-
# Get all control implementations for this plan
|
|
2763
|
-
implementations = regscale_models.ControlImplementation.get_all_by_parent(
|
|
2764
|
-
parent_id=self.plan_id, parent_module=self.parent_module
|
|
2765
|
-
)
|
|
2766
|
-
|
|
2767
|
-
# Create a cache for SecurityControl lookups to avoid repeated API calls
|
|
2768
|
-
security_control_cache = {}
|
|
2769
|
-
|
|
2770
|
-
for impl in implementations:
|
|
2771
|
-
try:
|
|
2772
|
-
# Use controlID field which references the SecurityControl
|
|
2773
|
-
if not hasattr(impl, "controlID") or not impl.controlID:
|
|
2774
|
-
continue
|
|
2775
|
-
|
|
2776
|
-
# Check if we've already looked up this security control
|
|
2777
|
-
if impl.controlID not in security_control_cache:
|
|
2778
|
-
security_control = regscale_models.SecurityControl.get_object(object_id=impl.controlID)
|
|
2779
|
-
security_control_cache[impl.controlID] = security_control
|
|
2780
|
-
else:
|
|
2781
|
-
security_control = security_control_cache[impl.controlID]
|
|
2782
|
-
|
|
2783
|
-
if security_control and hasattr(security_control, "controlId"):
|
|
2784
|
-
impl_control_id = self._normalize_control_id_string(security_control.controlId)
|
|
2785
|
-
|
|
2786
|
-
if impl_control_id == control_id:
|
|
2787
|
-
logger.info(f"Found control implementation {impl.id} for control {control_id}")
|
|
2788
|
-
# Cache it for future lookups
|
|
2789
|
-
if not hasattr(self, "_impl_id_by_control"):
|
|
2790
|
-
self._impl_id_by_control = {}
|
|
2791
|
-
self._impl_id_by_control[control_id] = impl.id
|
|
2792
|
-
return impl.id
|
|
2793
|
-
except Exception:
|
|
2794
|
-
continue
|
|
2795
|
-
|
|
2796
|
-
logger.warning(
|
|
2797
|
-
f"No control implementation found for control {control_id} among {len(implementations)} implementations"
|
|
2798
|
-
)
|
|
2799
|
-
return None
|
|
2800
|
-
except Exception as e:
|
|
2801
|
-
logger.error(f"Error finding control implementation for {control_id}: {e}")
|
|
2802
|
-
return None
|
|
2803
|
-
|
|
2804
|
-
def _find_assessment_id_for_implementation(self, implementation_id: int) -> Optional[int]:
|
|
2805
|
-
"""
|
|
2806
|
-
Find the most recent assessment ID for a control implementation.
|
|
2807
|
-
IMPROVED: Better date handling and caching.
|
|
2808
|
-
|
|
2809
|
-
:param int implementation_id: Control implementation ID
|
|
2810
|
-
:return: Assessment ID if found
|
|
2811
|
-
:rtype: Optional[int]
|
|
2812
|
-
"""
|
|
2813
|
-
try:
|
|
2814
|
-
from regscale.models import regscale_models
|
|
2815
|
-
from datetime import datetime
|
|
2816
|
-
from regscale.core.app.utils.app_utils import regscale_string_to_datetime
|
|
2817
|
-
|
|
2818
|
-
# Check cache first
|
|
2819
|
-
if hasattr(self, "_assessment_by_impl_today") and implementation_id in self._assessment_by_impl_today:
|
|
2820
|
-
cached_assessment = self._assessment_by_impl_today[implementation_id]
|
|
2821
|
-
if cached_assessment and hasattr(cached_assessment, "id"):
|
|
2822
|
-
logger.debug(
|
|
2823
|
-
f"Found cached assessment {cached_assessment.id} for implementation {implementation_id}"
|
|
2824
|
-
)
|
|
2825
|
-
return cached_assessment.id
|
|
2826
|
-
|
|
2827
|
-
# Get assessments for this control implementation
|
|
2828
|
-
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2829
|
-
parent_id=implementation_id, parent_module="controls"
|
|
2830
|
-
)
|
|
2831
|
-
|
|
2832
|
-
if not assessments:
|
|
2833
|
-
logger.warning(f"No assessments found for control implementation {implementation_id}")
|
|
2834
|
-
return None
|
|
2835
|
-
|
|
2836
|
-
# Find the most recent assessment (preferably from today)
|
|
2837
|
-
today = datetime.now().date()
|
|
2838
|
-
today_assessments = []
|
|
2839
|
-
recent_assessments = []
|
|
2840
|
-
|
|
2841
|
-
for assessment in assessments:
|
|
2842
|
-
try:
|
|
2843
|
-
assessment_date = None
|
|
2844
|
-
|
|
2845
|
-
# Try multiple date fields in order of preference
|
|
2846
|
-
date_fields = ["plannedStart", "actualFinish", "plannedFinish", "dateCreated"]
|
|
2847
|
-
for field in date_fields:
|
|
2848
|
-
if hasattr(assessment, field) and getattr(assessment, field):
|
|
2849
|
-
date_value = getattr(assessment, field)
|
|
2850
|
-
if isinstance(date_value, str):
|
|
2851
|
-
assessment_date = regscale_string_to_datetime(date_value).date()
|
|
2852
|
-
elif hasattr(date_value, "date"):
|
|
2853
|
-
assessment_date = date_value.date()
|
|
2854
|
-
else:
|
|
2855
|
-
assessment_date = date_value
|
|
2856
|
-
break
|
|
2857
|
-
|
|
2858
|
-
if assessment_date:
|
|
2859
|
-
if assessment_date == today:
|
|
2860
|
-
today_assessments.append(assessment)
|
|
2861
|
-
else:
|
|
2862
|
-
recent_assessments.append((assessment, assessment_date))
|
|
2863
|
-
else:
|
|
2864
|
-
# Assessment with no parseable date
|
|
2865
|
-
recent_assessments.append((assessment, None))
|
|
2866
|
-
except Exception:
|
|
2867
|
-
recent_assessments.append((assessment, None))
|
|
2868
|
-
|
|
2869
|
-
# Prefer today's assessments
|
|
2870
|
-
if today_assessments:
|
|
2871
|
-
# Sort by ID (highest/newest first) if multiple today
|
|
2872
|
-
today_assessments.sort(key=lambda a: a.id if hasattr(a, "id") else 0, reverse=True)
|
|
2873
|
-
assessment = today_assessments[0]
|
|
2874
|
-
logger.info(f"Found today's assessment {assessment.id} for control implementation {implementation_id}")
|
|
2875
|
-
# Cache it for future lookups
|
|
2876
|
-
if not hasattr(self, "_assessment_by_impl_today"):
|
|
2877
|
-
self._assessment_by_impl_today = {}
|
|
2878
|
-
self._assessment_by_impl_today[implementation_id] = assessment
|
|
2879
|
-
return assessment.id
|
|
2880
|
-
|
|
2881
|
-
# Fall back to most recent assessment
|
|
2882
|
-
if recent_assessments:
|
|
2883
|
-
# Sort by date (newest first), handling None dates
|
|
2884
|
-
recent_assessments.sort(
|
|
2885
|
-
key=lambda x: (x[1] if x[1] else datetime.min.date(), x[0].id if hasattr(x[0], "id") else 0),
|
|
2886
|
-
reverse=True,
|
|
2887
|
-
)
|
|
2888
|
-
assessment = recent_assessments[0][0]
|
|
2889
|
-
logger.info(f"Found recent assessment {assessment.id} for control implementation {implementation_id}")
|
|
2890
|
-
# Cache it even if not today's
|
|
2891
|
-
if not hasattr(self, "_assessment_by_impl_today"):
|
|
2892
|
-
self._assessment_by_impl_today = {}
|
|
2893
|
-
self._assessment_by_impl_today[implementation_id] = assessment
|
|
2894
|
-
return assessment.id
|
|
2895
|
-
|
|
2896
|
-
logger.warning(f"No usable assessments found for control implementation {implementation_id}")
|
|
2897
|
-
return None
|
|
2898
|
-
except Exception as e:
|
|
2899
|
-
logger.error(f"Error finding assessment for control implementation {implementation_id}: {e}")
|
|
2900
|
-
return None
|
|
2901
|
-
|
|
2902
|
-
def _reparent_issue_to_asset(self, issue: regscale_models.Issue) -> None:
|
|
2903
|
-
"""
|
|
2904
|
-
Reparent issue to the control implementation instead of the security plan.
|
|
2905
|
-
This ensures issues are properly associated with their control implementations.
|
|
2906
|
-
|
|
2907
|
-
:param regscale_models.Issue issue: Issue to reparent to control implementation
|
|
2908
|
-
:param IntegrationFinding finding: Finding with control information
|
|
2909
|
-
:return: None
|
|
2910
|
-
:rtype: None
|
|
2911
|
-
"""
|
|
2912
|
-
# If we have a control implementation ID, parent the issue to it
|
|
2913
|
-
if issue.controlId:
|
|
2914
|
-
issue.parentId = issue.controlId
|
|
2915
|
-
issue.parentModule = "controls"
|
|
2916
|
-
else:
|
|
2917
|
-
# Fall back to security plan if no control implementation found
|
|
2918
|
-
pass
|
|
2919
|
-
|
|
2920
|
-
def _update_scan_history(self, scan_history: regscale_models.ScanHistory) -> None:
|
|
2921
|
-
"""
|
|
2922
|
-
No scan history updates for compliance report ingest.
|
|
2923
|
-
|
|
2924
|
-
:param regscale_models.ScanHistory scan_history: Scan history record (unused)
|
|
2925
|
-
"""
|
|
2926
|
-
# No scan history for compliance report ingest
|
|
2927
|
-
pass
|
|
2928
|
-
|
|
2929
|
-
def _process_control_assessments(self) -> None:
|
|
2930
|
-
"""
|
|
2931
|
-
Process control assessments only for controls that have validated compliance items
|
|
2932
|
-
with existing assets in RegScale. This ensures we don't create assessments for
|
|
2933
|
-
controls that have no assets in our boundary.
|
|
2934
|
-
"""
|
|
2935
|
-
logger.info("Starting control assessment processing for Wiz compliance integration")
|
|
2936
|
-
|
|
2937
|
-
self._load_existing_records_cache()
|
|
2938
|
-
|
|
2939
|
-
implementations = self._get_control_implementations()
|
|
2940
|
-
if not implementations:
|
|
2941
|
-
logger.warning("No control implementations found for assessment processing")
|
|
2942
|
-
return
|
|
2943
|
-
|
|
2944
|
-
validated_controls = self._validate_controls_with_assets()
|
|
2945
|
-
if not validated_controls["controls_with_assets"]:
|
|
2946
|
-
logger.warning("No controls have assets in RegScale boundary - no control assessments will be created")
|
|
2947
|
-
logger.info("SUMMARY: 0 control assessments created (no assets exist in RegScale)")
|
|
2948
|
-
return
|
|
2949
|
-
|
|
2950
|
-
assessments_created = self._create_assessments_for_validated_controls(
|
|
2951
|
-
validated_controls["controls_with_assets"], implementations
|
|
2952
|
-
)
|
|
2953
|
-
self._log_assessment_summary(assessments_created, validated_controls)
|
|
2954
|
-
|
|
2955
|
-
def _validate_controls_with_assets(self) -> Dict[str, Any]:
|
|
2956
|
-
"""Validate controls and identify those with existing assets."""
|
|
2957
|
-
all_potential_controls = set(self.passing_controls.keys()) | set(self.failing_controls.keys())
|
|
2958
|
-
logger.debug(
|
|
2959
|
-
f"Found {len(all_potential_controls)} potential controls from compliance data: {sorted(all_potential_controls)}"
|
|
2960
|
-
)
|
|
2961
|
-
|
|
2962
|
-
validated_controls_with_assets = {}
|
|
2963
|
-
validated_passing_controls = {}
|
|
2964
|
-
validated_failing_controls = {}
|
|
2965
|
-
|
|
2966
|
-
for control_id in all_potential_controls:
|
|
2967
|
-
validation_result = self._validate_single_control(control_id)
|
|
2968
|
-
|
|
2969
|
-
if validation_result["should_process"]:
|
|
2970
|
-
validated_controls_with_assets[control_id] = validation_result["asset_identifiers"]
|
|
2971
|
-
|
|
2972
|
-
if control_id in self.failing_controls:
|
|
2973
|
-
validated_failing_controls[control_id] = self.failing_controls[control_id]
|
|
2974
|
-
elif control_id in self.passing_controls:
|
|
2975
|
-
validated_passing_controls[control_id] = self.passing_controls[control_id]
|
|
2976
|
-
|
|
2977
|
-
return {
|
|
2978
|
-
"controls_with_assets": validated_controls_with_assets,
|
|
2979
|
-
"passing_controls": validated_passing_controls,
|
|
2980
|
-
"failing_controls": validated_failing_controls,
|
|
2981
|
-
}
|
|
2982
|
-
|
|
2983
|
-
def _validate_single_control(self, control_id: str) -> Dict[str, Any]:
|
|
2984
|
-
"""Validate a single control for asset existence."""
|
|
2985
|
-
is_passing_control = control_id in self.passing_controls
|
|
2986
|
-
|
|
2987
|
-
if is_passing_control:
|
|
2988
|
-
control_items = self._get_control_compliance_items(control_id)
|
|
2989
|
-
else:
|
|
2990
|
-
control_items = self._get_validated_control_compliance_items(control_id)
|
|
2991
|
-
|
|
2992
|
-
if not control_items and is_passing_control:
|
|
2993
|
-
logger.debug(f"Control {control_id} is passing - will process for compliance documentation")
|
|
2994
|
-
return {"should_process": True, "asset_identifiers": []}
|
|
2995
|
-
|
|
2996
|
-
if not control_items:
|
|
2997
|
-
return {"should_process": False, "asset_identifiers": []}
|
|
2998
|
-
|
|
2999
|
-
asset_identifiers = self._collect_asset_identifiers(control_items, control_id, is_passing_control)
|
|
3000
|
-
|
|
3001
|
-
# For passing controls, allow through even without assets
|
|
3002
|
-
# For failing controls, require at least one asset
|
|
3003
|
-
should_process = bool(asset_identifiers) or is_passing_control
|
|
3004
|
-
|
|
3005
|
-
return {"should_process": should_process, "asset_identifiers": list(asset_identifiers)}
|
|
3006
|
-
|
|
3007
|
-
def _collect_asset_identifiers(self, control_items: List[Any], control_id: str, is_passing_control: bool) -> set:
|
|
3008
|
-
"""Collect asset identifiers for control items."""
|
|
3009
|
-
asset_identifiers = set()
|
|
3010
|
-
assets_found = 0
|
|
3011
|
-
|
|
3012
|
-
for item in control_items:
|
|
3013
|
-
if hasattr(item, "resource_name") and item.resource_name:
|
|
3014
|
-
resource_id = getattr(item, "resource_id", "")
|
|
3015
|
-
# Verify the asset actually exists in RegScale (if not a passing control)
|
|
3016
|
-
if is_passing_control or self._asset_exists_in_regscale(resource_id):
|
|
3017
|
-
asset_identifiers.add(item.resource_name)
|
|
3018
|
-
assets_found += 1
|
|
3019
|
-
else:
|
|
3020
|
-
logger.debug(
|
|
3021
|
-
f"Control {control_id}: Asset {resource_id} ({item.resource_name}) not found in RegScale"
|
|
3022
|
-
)
|
|
3023
|
-
|
|
3024
|
-
logger.debug(f"Found {assets_found} valid assets for control {control_id}")
|
|
3025
|
-
return asset_identifiers
|
|
3026
|
-
|
|
3027
|
-
def _create_assessments_for_validated_controls(
|
|
3028
|
-
self, validated_controls_with_assets: Dict[str, List[str]], implementations: List[Any]
|
|
3029
|
-
) -> int:
|
|
3030
|
-
"""Create assessments for validated controls."""
|
|
3031
|
-
assessments_created = 0
|
|
3032
|
-
processed_impl_today: set[int] = set()
|
|
3033
|
-
|
|
3034
|
-
for control_id in validated_controls_with_assets.keys():
|
|
3035
|
-
created = self._process_single_control_assessment(
|
|
3036
|
-
control_id=control_id,
|
|
3037
|
-
implementations=implementations,
|
|
3038
|
-
processed_impl_today=processed_impl_today,
|
|
3039
|
-
)
|
|
3040
|
-
assessments_created += created
|
|
3041
|
-
|
|
3042
|
-
return assessments_created
|
|
3043
|
-
|
|
3044
|
-
def _log_assessment_summary(self, assessments_created: int, validated_controls: Dict[str, Any]) -> None:
|
|
3045
|
-
"""Log summary of assessment creation."""
|
|
3046
|
-
validated_control_ids = set(validated_controls["controls_with_assets"].keys())
|
|
3047
|
-
validated_failing_controls = validated_controls["failing_controls"]
|
|
3048
|
-
|
|
3049
|
-
passing_assessments = len([cid for cid in validated_control_ids if cid not in validated_failing_controls])
|
|
3050
|
-
failing_assessments = len([cid for cid in validated_control_ids if cid in validated_failing_controls])
|
|
3051
|
-
|
|
3052
|
-
if assessments_created > 0:
|
|
3053
|
-
logger.info(
|
|
3054
|
-
f"Created {assessments_created} control assessments: {passing_assessments} passing, {failing_assessments} failing"
|
|
3055
|
-
)
|
|
3056
|
-
else:
|
|
3057
|
-
logger.warning(
|
|
3058
|
-
f"No control assessments were actually created (0 assessments) despite finding {len(validated_controls['controls_with_assets'])} controls with assets"
|
|
3059
|
-
)
|
|
3060
|
-
|
|
3061
|
-
logger.info(
|
|
3062
|
-
f"CONTROL ASSESSMENT SUMMARY: {assessments_created} assessments created for {len(validated_controls['controls_with_assets'])} validated controls"
|
|
3063
|
-
)
|
|
3064
|
-
|
|
3065
|
-
def _sync_assessment_cache_from_base_class(self) -> None:
|
|
3066
|
-
"""
|
|
3067
|
-
Sync assessments from base class cache to our control cache.
|
|
3068
|
-
|
|
3069
|
-
This ensures that assessments created by the base class ComplianceIntegration
|
|
3070
|
-
are available to our IssueFieldSetter for linking issues to assessments.
|
|
3071
|
-
"""
|
|
3072
|
-
try:
|
|
3073
|
-
# Copy assessments from base class cache to our cache
|
|
3074
|
-
base_cache = getattr(self, "_assessment_by_impl_today", {})
|
|
3075
|
-
synced_count = 0
|
|
3076
|
-
|
|
3077
|
-
for impl_id, assessment in base_cache.items():
|
|
3078
|
-
self._control_cache.set_assessment(impl_id, assessment)
|
|
3079
|
-
synced_count += 1
|
|
3080
|
-
|
|
3081
|
-
logger.info(f"Synced {synced_count} assessments from base class cache to control cache")
|
|
3082
|
-
|
|
3083
|
-
except Exception as e:
|
|
3084
|
-
logger.warning(f"Failed to sync assessment cache: {e}")
|
|
3085
|
-
|
|
3086
|
-
def _get_validated_control_compliance_items(self, control_id: str) -> List[ComplianceItem]:
|
|
3087
|
-
"""
|
|
3088
|
-
Get validated compliance items for a specific control.
|
|
3089
|
-
Only returns items that have existing assets in RegScale boundary.
|
|
3090
|
-
|
|
3091
|
-
:param str control_id: Control identifier to filter by
|
|
3092
|
-
:return: List of validated compliance items for the control
|
|
3093
|
-
:rtype: List[ComplianceItem]
|
|
3094
|
-
"""
|
|
3095
|
-
validated_items: List[ComplianceItem] = []
|
|
3096
|
-
|
|
3097
|
-
for item in self.all_compliance_items:
|
|
3098
|
-
# Check if this item matches the control
|
|
3099
|
-
matches_control = False
|
|
3100
|
-
if hasattr(item, "control_ids"):
|
|
3101
|
-
item_control_ids = getattr(item, "control_ids", [])
|
|
3102
|
-
if any(cid.lower() == control_id.lower() for cid in item_control_ids):
|
|
3103
|
-
matches_control = True
|
|
3104
|
-
elif hasattr(item, "control_id") and item.control_id.lower() == control_id.lower():
|
|
3105
|
-
matches_control = True
|
|
3106
|
-
|
|
3107
|
-
if not matches_control:
|
|
3108
|
-
continue
|
|
3109
|
-
|
|
3110
|
-
# Additional validation: ensure the asset exists in RegScale
|
|
3111
|
-
resource_id = getattr(item, "resource_id", "")
|
|
3112
|
-
if resource_id and self._asset_exists_in_regscale(resource_id):
|
|
3113
|
-
validated_items.append(item)
|
|
3114
|
-
else:
|
|
3115
|
-
logger.debug(
|
|
3116
|
-
f"Filtered out compliance item for control {control_id} - asset {resource_id} not in RegScale"
|
|
3117
|
-
)
|
|
3118
|
-
|
|
3119
|
-
return validated_items
|
|
3120
|
-
|
|
3121
|
-
def _get_control_compliance_items(self, control_id: str) -> List[ComplianceItem]:
|
|
3122
|
-
"""
|
|
3123
|
-
Get all compliance items for a specific control.
|
|
3124
|
-
All items have already been filtered to framework-specific items with existing assets.
|
|
3125
|
-
|
|
3126
|
-
:param str control_id: Control identifier to filter by
|
|
3127
|
-
:return: List of compliance items for the control
|
|
3128
|
-
:rtype: List[ComplianceItem]
|
|
3129
|
-
"""
|
|
3130
|
-
items: List[ComplianceItem] = []
|
|
3131
|
-
|
|
3132
|
-
for item in self.all_compliance_items:
|
|
3133
|
-
# Check if this item matches the control
|
|
3134
|
-
matches_control = False
|
|
3135
|
-
if hasattr(item, "control_ids"):
|
|
3136
|
-
item_control_ids = getattr(item, "control_ids", [])
|
|
3137
|
-
if any(cid.lower() == control_id.lower() for cid in item_control_ids):
|
|
3138
|
-
matches_control = True
|
|
3139
|
-
elif hasattr(item, "control_id") and item.control_id.lower() == control_id.lower():
|
|
3140
|
-
matches_control = True
|
|
3141
|
-
|
|
3142
|
-
if matches_control:
|
|
3143
|
-
items.append(item)
|
|
3144
|
-
|
|
3145
|
-
return items
|
|
3146
|
-
|
|
3147
|
-
# flake8: noqa: C901
|
|
3148
|
-
def get_asset_by_identifier(self, identifier: str) -> Optional["regscale_models.Asset"]:
|
|
3149
|
-
"""
|
|
3150
|
-
Override asset lookup for Wiz policy compliance integration.
|
|
3151
|
-
|
|
3152
|
-
For policy compliance, the identifier should be the Wiz resource ID.
|
|
3153
|
-
We'll try multiple lookup strategies to find the corresponding RegScale asset.
|
|
3154
|
-
|
|
3155
|
-
:param str identifier: Asset identifier (should be Wiz resource ID)
|
|
3156
|
-
:return: Asset if found, None otherwise
|
|
3157
|
-
:rtype: Optional[regscale_models.Asset]
|
|
3158
|
-
"""
|
|
3159
|
-
|
|
3160
|
-
# First try the standard lookup by identifier (uses asset_map_by_identifier)
|
|
3161
|
-
asset = super().get_asset_by_identifier(identifier)
|
|
3162
|
-
if asset:
|
|
3163
|
-
return asset
|
|
3164
|
-
|
|
3165
|
-
# If not found, try to find using our cached RegScale assets by Wiz ID
|
|
3166
|
-
try:
|
|
3167
|
-
if hasattr(self, "_regscale_assets_by_wiz_id") and self._regscale_assets_by_wiz_id:
|
|
3168
|
-
# Direct lookup by Wiz ID (most common case)
|
|
3169
|
-
if identifier in self._regscale_assets_by_wiz_id:
|
|
3170
|
-
regscale_asset = self._regscale_assets_by_wiz_id[identifier]
|
|
3171
|
-
return regscale_asset
|
|
3172
|
-
|
|
3173
|
-
# Fallback: check all assets for name/identifier matches
|
|
3174
|
-
for wiz_id, regscale_asset in self._regscale_assets_by_wiz_id.items():
|
|
3175
|
-
# Check if asset name matches the identifier
|
|
3176
|
-
if regscale_asset.name == identifier:
|
|
3177
|
-
return regscale_asset
|
|
3178
|
-
|
|
3179
|
-
# Also check identifier field
|
|
3180
|
-
if hasattr(regscale_asset, "identifier") and regscale_asset.identifier == identifier:
|
|
3181
|
-
return regscale_asset
|
|
3182
|
-
|
|
3183
|
-
# Check other tracking number
|
|
3184
|
-
if (
|
|
3185
|
-
hasattr(regscale_asset, "otherTrackingNumber")
|
|
3186
|
-
and regscale_asset.otherTrackingNumber == identifier
|
|
3187
|
-
):
|
|
3188
|
-
logger.debug(
|
|
3189
|
-
f"Found asset via otherTrackingNumber match: {regscale_asset.name} (Wiz ID: {wiz_id})"
|
|
3190
|
-
)
|
|
3191
|
-
return regscale_asset
|
|
3192
|
-
|
|
3193
|
-
except Exception:
|
|
3194
|
-
pass
|
|
3195
|
-
|
|
3196
|
-
# Asset not found
|
|
3197
|
-
return None
|
|
3198
|
-
|
|
3199
|
-
def _ensure_asset_for_finding(self, finding: IntegrationFinding) -> Optional["regscale_models.Asset"]:
|
|
3200
|
-
"""
|
|
3201
|
-
Override asset creation for Wiz policy compliance integration.
|
|
3202
|
-
|
|
3203
|
-
We don't create assets in policy compliance integration - they come from
|
|
3204
|
-
separate Wiz inventory import. If an asset isn't found, we skip the finding.
|
|
3205
|
-
|
|
3206
|
-
:param IntegrationFinding finding: Finding that needs an asset
|
|
3207
|
-
:return: None (we don't create assets)
|
|
3208
|
-
:rtype: Optional[regscale_models.Asset]
|
|
3209
|
-
"""
|
|
3210
|
-
return None
|
|
3211
|
-
|
|
3212
|
-
def _process_consolidated_issues(self, findings: List[IntegrationFinding]) -> None:
|
|
3213
|
-
"""
|
|
3214
|
-
Process pre-consolidated findings to create issues.
|
|
3215
|
-
|
|
3216
|
-
Since fetch_findings() now creates consolidated findings (one per control with all resources),
|
|
3217
|
-
this method simply creates issues directly from each finding.
|
|
3218
|
-
|
|
3219
|
-
:param List[IntegrationFinding] findings: List of pre-consolidated findings to process
|
|
3220
|
-
"""
|
|
3221
|
-
if not findings:
|
|
3222
|
-
return
|
|
3223
|
-
|
|
3224
|
-
issues_processed = 0
|
|
3225
|
-
|
|
3226
|
-
for finding in findings:
|
|
3227
|
-
try:
|
|
3228
|
-
control_id = self._normalize_control_id_string(finding.rule_id) or finding.rule_id
|
|
3229
|
-
|
|
3230
|
-
# Create issue title
|
|
3231
|
-
issue_title = self.get_issue_title(finding)
|
|
3232
|
-
|
|
3233
|
-
# Create issue directly from the consolidated finding
|
|
3234
|
-
issue = self.create_or_update_issue_from_finding(title=issue_title, finding=finding)
|
|
3235
|
-
if issue:
|
|
3236
|
-
issues_processed += 1
|
|
3237
|
-
|
|
3238
|
-
else:
|
|
3239
|
-
logger.debug(
|
|
3240
|
-
f"Failed to create issue for control {control_id} - create_or_update_issue_from_finding returned None"
|
|
3241
|
-
)
|
|
3242
|
-
|
|
3243
|
-
except Exception as e:
|
|
3244
|
-
logger.error(f"Error processing consolidated issue for control {control_id}: {e}")
|
|
3245
|
-
|
|
3246
|
-
# Store the count for summary reporting
|
|
3247
|
-
self._issues_processed_count = issues_processed
|
|
3248
|
-
|
|
3249
|
-
def _find_existing_issue_for_control(self) -> Optional["regscale_models.Issue"]:
|
|
3250
|
-
"""
|
|
3251
|
-
Find existing issue for a specific control.
|
|
3252
|
-
|
|
3253
|
-
:param str control_id: Control ID to search for
|
|
3254
|
-
:return: Existing issue if found
|
|
3255
|
-
:rtype: Optional[regscale_models.Issue]
|
|
3256
|
-
"""
|
|
3257
|
-
# This is a simplified check - in practice you might want to search by external_id or other fields
|
|
3258
|
-
# that uniquely identify control-specific issues
|
|
3259
|
-
return None # For now, always create new issues
|
|
3260
|
-
|
|
3261
|
-
def sync_compliance(self, *args, **kwargs) -> None:
|
|
3262
|
-
"""Override sync to use consolidated issue processing and add summary reporting."""
|
|
3263
|
-
# Initialize issue counter
|
|
3264
|
-
self._issues_created_count = 0
|
|
3265
|
-
|
|
3266
|
-
try:
|
|
3267
|
-
# Initialize cache dictionaries if not already initialized
|
|
3268
|
-
if not hasattr(self, "_impl_id_by_control"):
|
|
3269
|
-
self._impl_id_by_control = {}
|
|
3270
|
-
if not hasattr(self, "_assessment_by_impl_today"):
|
|
3271
|
-
self._assessment_by_impl_today = {}
|
|
3272
|
-
|
|
3273
|
-
# Ensure existing records cache is loaded before processing
|
|
3274
|
-
self._load_existing_records_cache()
|
|
3275
|
-
|
|
3276
|
-
# CRITICAL: Pre-populate control implementation cache before any processing
|
|
3277
|
-
logger.info("Pre-populating control implementation cache for reliable issue linking...")
|
|
3278
|
-
self._populate_control_implementation_cache()
|
|
3279
|
-
|
|
3280
|
-
# Call parent's compliance data processing (assessments, etc.) but skip issue creation
|
|
3281
|
-
original_create_issues = self.create_issues
|
|
3282
|
-
self.create_issues = False # Disable base class issue creation
|
|
3283
|
-
super().sync_compliance() # Call the base ComplianceIntegration.sync_compliance method
|
|
3284
|
-
self.create_issues = original_create_issues # Restore setting
|
|
3285
|
-
|
|
3286
|
-
# CRITICAL: Copy assessments from base class cache to our cache so IssueFieldSetter can find them
|
|
3287
|
-
self._sync_assessment_cache_from_base_class()
|
|
3288
|
-
|
|
3289
|
-
# Now handle issue creation with consolidated logic
|
|
3290
|
-
if self.create_issues:
|
|
3291
|
-
findings = list(self.fetch_findings())
|
|
3292
|
-
if findings:
|
|
3293
|
-
self._process_consolidated_issues(findings)
|
|
3294
|
-
|
|
3295
|
-
# Provide concise summary
|
|
3296
|
-
issues_processed = getattr(self, "_issues_processed_count", 0)
|
|
3297
|
-
|
|
3298
|
-
if issues_processed > 0:
|
|
3299
|
-
# Count actual unique issues in the database for this security plan
|
|
3300
|
-
from regscale.models import regscale_models
|
|
3301
|
-
|
|
3302
|
-
actual_issues = len(
|
|
3303
|
-
regscale_models.Issue.get_all_by_parent(parent_id=self.plan_id, parent_module=self.parent_module)
|
|
3304
|
-
)
|
|
3305
|
-
|
|
3306
|
-
logger.info(
|
|
3307
|
-
f"SUMMARY: Processed {issues_processed} policy violations resulting in {actual_issues} consolidated issues for failed controls for assets in RegScale"
|
|
3308
|
-
)
|
|
3309
|
-
else:
|
|
3310
|
-
logger.info("SUMMARY: No issues processed - no failed controls with existing assets")
|
|
3311
|
-
|
|
3312
|
-
except Exception as e:
|
|
3313
|
-
error_and_exit(f"Error during Wiz compliance sync: {e}")
|
|
3314
|
-
|
|
3315
|
-
def _get_regscale_asset_identifier(self, compliance_item: "WizComplianceItem") -> str:
|
|
3316
|
-
"""
|
|
3317
|
-
Get the appropriate RegScale asset identifier for a compliance item.
|
|
3318
|
-
|
|
3319
|
-
For Wiz integrations, the asset_identifier_field is "wizId", so we need to return
|
|
3320
|
-
the Wiz resource ID that will match what's stored in the RegScale Asset's wizId field.
|
|
3321
|
-
|
|
3322
|
-
:param WizComplianceItem compliance_item: Compliance item with resource information
|
|
3323
|
-
:return: Wiz resource ID that matches the RegScale Asset's wizId field
|
|
3324
|
-
:rtype: str
|
|
3325
|
-
"""
|
|
3326
|
-
resource_id = getattr(compliance_item, "resource_id", "")
|
|
3327
|
-
resource_name = getattr(compliance_item, "resource_name", "")
|
|
3328
|
-
|
|
3329
|
-
# For Wiz policy compliance, the asset identifier should be the Wiz resource ID
|
|
3330
|
-
# because that's what gets stored in RegScale Asset's wizId field (asset_identifier_field = "wizId")
|
|
3331
|
-
if resource_id:
|
|
3332
|
-
return resource_id
|
|
3333
|
-
|
|
3334
|
-
# Fallback (should not normally happen since resource_id is required)
|
|
3335
|
-
return resource_name or "Unknown Resource"
|
|
3336
|
-
|
|
3337
|
-
def _get_provider_unique_id_for_asset_identifier(self, compliance_item: "WizComplianceItem") -> str:
|
|
3338
|
-
"""
|
|
3339
|
-
Get the provider unique ID for meaningful asset identification in eMASS exports.
|
|
3340
|
-
|
|
3341
|
-
This provides cloud provider-specific identifiers like ARNs, Azure resource IDs, etc.
|
|
3342
|
-
instead of internal Wiz IDs for better readability in POAMs and eMASS exports.
|
|
3343
|
-
|
|
3344
|
-
:param WizComplianceItem compliance_item: Compliance item with resource information
|
|
3345
|
-
:return: Provider unique ID or fallback to resource name/ID
|
|
3346
|
-
:rtype: str
|
|
3347
|
-
"""
|
|
3348
|
-
provider_unique_id = getattr(compliance_item, "provider_unique_id", "")
|
|
3349
|
-
resource_name = getattr(compliance_item, "resource_name", "")
|
|
3350
|
-
resource_id = getattr(compliance_item, "resource_id", "")
|
|
3351
|
-
|
|
3352
|
-
# Priority: providerUniqueId -> resource_name -> resource_id
|
|
3353
|
-
if provider_unique_id:
|
|
3354
|
-
return provider_unique_id
|
|
3355
|
-
elif resource_name:
|
|
3356
|
-
return resource_name
|
|
3357
|
-
else:
|
|
3358
|
-
return resource_id
|
|
3359
|
-
|
|
3360
|
-
def _create_consolidated_asset_identifier(self, asset_mappings: Dict[str, Dict[str, str]]) -> str:
|
|
3361
|
-
"""
|
|
3362
|
-
Create a consolidated asset identifier with only asset names (one per line).
|
|
3363
|
-
|
|
3364
|
-
Format: "Asset Name 1\nAsset Name 2\nAsset Name 3"
|
|
3365
|
-
This format provides clean, human-readable asset names for POAMs and issues
|
|
3366
|
-
without cluttering them with Wiz resource IDs.
|
|
3367
|
-
|
|
3368
|
-
:param Dict[str, Dict[str, str]] asset_mappings: Map of Wiz resource IDs to asset info
|
|
3369
|
-
:return: Consolidated identifier string with asset names only
|
|
3370
|
-
:rtype: str
|
|
3371
|
-
"""
|
|
3372
|
-
if not asset_mappings:
|
|
3373
|
-
return ""
|
|
3374
|
-
|
|
3375
|
-
# Create entries that show only asset names (one per line)
|
|
3376
|
-
identifier_parts = []
|
|
3377
|
-
# Sort by asset name for consistent ordering
|
|
3378
|
-
sorted_mappings = sorted(asset_mappings.items(), key=lambda x: x[1]["name"])
|
|
3379
|
-
for wiz_id, asset_info in sorted_mappings:
|
|
3380
|
-
asset_name = asset_info["name"]
|
|
3381
|
-
wiz_resource_id = asset_info["wiz_id"]
|
|
3382
|
-
|
|
3383
|
-
# Format: Just the asset name (no Wiz resource ID for cleaner POAMs)
|
|
3384
|
-
if asset_name != wiz_resource_id:
|
|
3385
|
-
# Asset was successfully mapped, show only the name
|
|
3386
|
-
identifier_part = asset_name
|
|
3387
|
-
else:
|
|
3388
|
-
# Asset lookup failed, use the Wiz resource ID as fallback
|
|
3389
|
-
identifier_part = wiz_resource_id
|
|
3390
|
-
|
|
3391
|
-
identifier_parts.append(identifier_part)
|
|
3392
|
-
|
|
3393
|
-
# Join with newlines for multi-asset issues
|
|
3394
|
-
consolidated_identifier = "\n".join(identifier_parts)
|
|
3395
|
-
logger.debug(
|
|
3396
|
-
f"Created consolidated asset identifier with {len(identifier_parts)} assets: {consolidated_identifier}"
|
|
3397
|
-
)
|
|
3398
|
-
return consolidated_identifier
|
|
3399
|
-
|
|
3400
|
-
def _categorize_controls_by_aggregation(self) -> None:
|
|
3401
|
-
"""
|
|
3402
|
-
Override the base method to handle multiple control IDs per compliance item.
|
|
3403
|
-
Wiz policies can map to multiple NIST controls (e.g., AC-2(4), AC-6(9)) in securitySubCategories.
|
|
3404
|
-
This method ensures all controls from a policy assessment are properly categorized.
|
|
3405
|
-
"""
|
|
3406
|
-
from collections import defaultdict, Counter
|
|
3407
|
-
|
|
3408
|
-
# Group all compliance items by control ID - handle multiple controls per item
|
|
3409
|
-
control_items = defaultdict(list)
|
|
3410
|
-
|
|
3411
|
-
for item in self.all_compliance_items:
|
|
3412
|
-
# Get all control IDs that this compliance item maps to
|
|
3413
|
-
all_control_ids = self._get_all_control_ids_for_compliance_item(item)
|
|
3414
|
-
|
|
3415
|
-
# Add this item to each control it maps to
|
|
3416
|
-
for control_id in all_control_ids:
|
|
3417
|
-
control_key = control_id.lower()
|
|
3418
|
-
control_items[control_key].append(item)
|
|
3419
|
-
|
|
3420
|
-
# Analyze each control's results
|
|
3421
|
-
for control_key, items in control_items.items():
|
|
3422
|
-
results = [item.compliance_result for item in items]
|
|
3423
|
-
result_counts = Counter(results)
|
|
3424
|
-
|
|
3425
|
-
fail_count = sum(result_counts.get(status, 0) for status in self.FAIL_STATUSES)
|
|
3426
|
-
pass_count = sum(result_counts.get(status, 0) for status in self.PASS_STATUSES)
|
|
3427
|
-
|
|
3428
|
-
# Determine control status - strict compliance: ALL assessments must pass
|
|
3429
|
-
if fail_count == 0 and pass_count > 0:
|
|
3430
|
-
# All results are passing - control passes
|
|
3431
|
-
self.passing_controls[control_key] = items[0] # Use first item as representative
|
|
3432
|
-
logger.debug(f"Control {control_key} marked as PASSING: {pass_count}P/{fail_count}F")
|
|
3433
|
-
|
|
3434
|
-
elif fail_count > 0:
|
|
3435
|
-
# Any failures present - control fails (strict compliance)
|
|
3436
|
-
self.failing_controls[control_key] = next(
|
|
3437
|
-
item for item in items if item.compliance_result in self.FAIL_STATUSES
|
|
3438
|
-
)
|
|
3439
|
-
logger.debug(
|
|
3440
|
-
f"Control {control_key} marked as FAILING: {pass_count}P/{fail_count}F (any failure = control fails)"
|
|
3441
|
-
)
|
|
3442
|
-
else:
|
|
3443
|
-
# No pass or fail results - skip this control
|
|
3444
|
-
logger.debug(f"Control {control_key} skipped: no valid pass/fail results")
|
|
3445
|
-
|
|
3446
|
-
logger.info(
|
|
3447
|
-
f"Control categorization complete: {len(self.passing_controls)} passing, {len(self.failing_controls)} failing"
|
|
3448
|
-
)
|
|
3449
|
-
|
|
3450
|
-
|
|
3451
|
-
def resolve_framework_id(framework_input: str) -> str:
|
|
3452
|
-
"""
|
|
3453
|
-
Resolve framework input to actual Wiz framework ID.
|
|
3454
|
-
|
|
3455
|
-
Supports:
|
|
3456
|
-
- Direct framework IDs (wf-id-4)
|
|
3457
|
-
- Shorthand names (nist, aws, soc2)
|
|
3458
|
-
- Partial matches (case insensitive)
|
|
3459
|
-
|
|
3460
|
-
:param str framework_input: User input for framework
|
|
3461
|
-
:return: Resolved framework ID
|
|
3462
|
-
:rtype: str
|
|
3463
|
-
:raises ValueError: If framework cannot be resolved
|
|
3464
|
-
"""
|
|
3465
|
-
if not framework_input or not framework_input.strip():
|
|
3466
|
-
error_and_exit("Framework input cannot be empty. Use --list-frameworks to see available options.")
|
|
3467
|
-
|
|
3468
|
-
framework_input = framework_input.lower().strip()
|
|
3469
|
-
|
|
3470
|
-
# Direct framework ID
|
|
3471
|
-
if framework_input.startswith("wf-id-"):
|
|
3472
|
-
if framework_input in FRAMEWORK_MAPPINGS:
|
|
3473
|
-
return framework_input
|
|
3474
|
-
else:
|
|
3475
|
-
error_and_exit(f"Unknown framework ID: {framework_input}")
|
|
3476
|
-
|
|
3477
|
-
# Shorthand lookup
|
|
3478
|
-
if framework_input in FRAMEWORK_SHORTCUTS:
|
|
3479
|
-
return FRAMEWORK_SHORTCUTS[framework_input]
|
|
3480
|
-
|
|
3481
|
-
# Partial name matching
|
|
3482
|
-
for shorthand, framework_id in FRAMEWORK_SHORTCUTS.items():
|
|
3483
|
-
if framework_input in shorthand:
|
|
3484
|
-
return framework_id
|
|
3485
|
-
|
|
3486
|
-
# Search in full framework names (case insensitive)
|
|
3487
|
-
for framework_id, framework_name in FRAMEWORK_MAPPINGS.items():
|
|
3488
|
-
if framework_input in framework_name.lower():
|
|
3489
|
-
return framework_id
|
|
3490
|
-
|
|
3491
|
-
error_and_exit(f"Could not resolve framework: '{framework_input}'. Use --list-frameworks to see available options.")
|
|
3492
|
-
|
|
3493
|
-
|
|
3494
|
-
def list_available_frameworks() -> str:
|
|
3495
|
-
"""
|
|
3496
|
-
Generate a formatted list of available frameworks.
|
|
3497
|
-
|
|
3498
|
-
:return: Formatted framework list
|
|
3499
|
-
:rtype: str
|
|
3500
|
-
"""
|
|
3501
|
-
output = []
|
|
3502
|
-
output.append("🔒 Available Wiz Compliance Frameworks")
|
|
3503
|
-
output.append("=" * 50)
|
|
3504
|
-
|
|
3505
|
-
# Show shorthand mappings first
|
|
3506
|
-
output.append("\nQuick Shortcuts:")
|
|
3507
|
-
output.append("-" * 20)
|
|
3508
|
-
shortcut_items = sorted(FRAMEWORK_SHORTCUTS.items())
|
|
3509
|
-
for shorthand, framework_id in shortcut_items[:10]: # Show first 10
|
|
3510
|
-
framework_name = FRAMEWORK_MAPPINGS.get(framework_id, "Unknown")
|
|
3511
|
-
output.append(f" {shorthand:<15} → {framework_name}")
|
|
3512
|
-
|
|
3513
|
-
if len(shortcut_items) > 10:
|
|
3514
|
-
output.append(f" ... and {len(shortcut_items) - 10} more shortcuts")
|
|
3515
|
-
|
|
3516
|
-
# Show frameworks by category
|
|
3517
|
-
output.append("\n📚 All Frameworks by Category:")
|
|
3518
|
-
output.append("-" * 35)
|
|
3519
|
-
|
|
3520
|
-
for category, framework_ids in FRAMEWORK_CATEGORIES.items():
|
|
3521
|
-
output.append(f"\n🏷️ {category}:")
|
|
3522
|
-
for framework_id in framework_ids:
|
|
3523
|
-
if framework_id in FRAMEWORK_MAPPINGS:
|
|
3524
|
-
framework_name = FRAMEWORK_MAPPINGS[framework_id]
|
|
3525
|
-
output.append(f" {framework_id:<12} → {framework_name}")
|
|
3526
|
-
|
|
3527
|
-
# Usage examples
|
|
3528
|
-
output.append("\n💡 Usage Examples:")
|
|
3529
|
-
output.append("-" * 18)
|
|
3530
|
-
output.append(" # Using shortcuts:")
|
|
3531
|
-
output.append(" regscale wiz sync-policy-compliance -f nist")
|
|
3532
|
-
output.append(" regscale wiz sync-policy-compliance -f aws")
|
|
3533
|
-
output.append(" regscale wiz sync-policy-compliance -f soc2")
|
|
3534
|
-
output.append("")
|
|
3535
|
-
output.append(" # Using full framework IDs:")
|
|
3536
|
-
output.append(" regscale wiz sync-policy-compliance -f wf-id-4")
|
|
3537
|
-
output.append(" regscale wiz sync-policy-compliance -f wf-id-197")
|
|
3538
|
-
output.append("")
|
|
3539
|
-
output.append(" # Using partial names (case insensitive):")
|
|
3540
|
-
output.append(" regscale wiz sync-policy-compliance -f 'nist 800-53'")
|
|
3541
|
-
output.append(" regscale wiz sync-policy-compliance -f kubernetes")
|
|
3542
|
-
|
|
3543
|
-
return "\n".join(output)
|