regscale-cli 6.20.10.0__py3-none-any.whl → 6.21.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/core/app/application.py +12 -5
- regscale/core/app/internal/set_permissions.py +58 -27
- regscale/integrations/commercial/__init__.py +1 -2
- regscale/integrations/commercial/amazon/common.py +79 -2
- regscale/integrations/commercial/aws/cli.py +183 -9
- regscale/integrations/commercial/aws/scanner.py +544 -9
- regscale/integrations/commercial/cpe.py +18 -1
- regscale/integrations/commercial/nessus/scanner.py +2 -0
- regscale/integrations/commercial/sonarcloud.py +35 -36
- regscale/integrations/commercial/synqly/ticketing.py +51 -0
- regscale/integrations/commercial/tenablev2/jsonl_scanner.py +2 -1
- regscale/integrations/commercial/wizv2/async_client.py +10 -3
- regscale/integrations/commercial/wizv2/click.py +102 -26
- regscale/integrations/commercial/wizv2/constants.py +249 -1
- regscale/integrations/commercial/wizv2/issue.py +2 -2
- regscale/integrations/commercial/wizv2/parsers.py +3 -2
- regscale/integrations/commercial/wizv2/policy_compliance.py +1858 -0
- regscale/integrations/commercial/wizv2/scanner.py +15 -21
- regscale/integrations/commercial/wizv2/utils.py +258 -85
- regscale/integrations/commercial/wizv2/variables.py +4 -3
- regscale/integrations/compliance_integration.py +1455 -0
- regscale/integrations/integration_override.py +15 -6
- regscale/integrations/public/fedramp/fedramp_five.py +1 -1
- regscale/integrations/public/fedramp/markdown_parser.py +7 -1
- regscale/integrations/scanner_integration.py +193 -37
- regscale/models/app_models/__init__.py +1 -0
- regscale/models/integration_models/amazon_models/inspector_scan.py +32 -57
- regscale/models/integration_models/aqua.py +92 -78
- regscale/models/integration_models/cisa_kev_data.json +117 -5
- regscale/models/integration_models/defenderimport.py +64 -59
- regscale/models/integration_models/ecr_models/ecr.py +100 -147
- regscale/models/integration_models/flat_file_importer/__init__.py +52 -38
- regscale/models/integration_models/ibm.py +29 -47
- regscale/models/integration_models/nexpose.py +156 -68
- regscale/models/integration_models/prisma.py +46 -66
- regscale/models/integration_models/qualys.py +99 -93
- regscale/models/integration_models/snyk.py +229 -158
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/veracode.py +15 -20
- regscale/{integrations/commercial/wizv2/models.py → models/integration_models/wizv2.py} +4 -12
- regscale/models/integration_models/xray.py +276 -82
- regscale/models/regscale_models/control_implementation.py +14 -12
- regscale/models/regscale_models/file.py +4 -0
- regscale/models/regscale_models/issue.py +123 -0
- regscale/models/regscale_models/milestone.py +1 -1
- regscale/models/regscale_models/rbac.py +22 -0
- regscale/models/regscale_models/regscale_model.py +4 -2
- regscale/models/regscale_models/security_plan.py +1 -1
- regscale/utils/graphql_client.py +3 -1
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/METADATA +9 -9
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/RECORD +64 -60
- tests/fixtures/test_fixture.py +58 -2
- tests/regscale/core/test_app.py +5 -3
- tests/regscale/core/test_version_regscale.py +5 -3
- tests/regscale/integrations/test_integration_mapping.py +522 -40
- tests/regscale/integrations/test_issue_due_date.py +1 -1
- tests/regscale/integrations/test_update_finding_dates.py +336 -0
- tests/regscale/integrations/test_wiz_policy_compliance_affected_controls.py +154 -0
- tests/regscale/models/test_asset.py +406 -50
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.20.10.0.dist-info → regscale_cli-6.21.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1858 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""Wiz Policy Compliance Integration for RegScale CLI."""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Dict, List, Optional, Iterator, Any
|
|
11
|
+
|
|
12
|
+
from regscale.core.app.utils.app_utils import error_and_exit, check_license
|
|
13
|
+
from regscale.core.app.application import Application
|
|
14
|
+
from regscale.integrations.commercial.wizv2.async_client import run_async_queries
|
|
15
|
+
from regscale.integrations.commercial.wizv2.constants import (
|
|
16
|
+
WizVulnerabilityType,
|
|
17
|
+
WIZ_POLICY_QUERY,
|
|
18
|
+
WIZ_FRAMEWORK_QUERY,
|
|
19
|
+
FRAMEWORK_MAPPINGS,
|
|
20
|
+
FRAMEWORK_SHORTCUTS,
|
|
21
|
+
FRAMEWORK_CATEGORIES,
|
|
22
|
+
)
|
|
23
|
+
from regscale.integrations.commercial.wizv2.wiz_auth import wiz_authenticate
|
|
24
|
+
from regscale.integrations.compliance_integration import ComplianceIntegration, ComplianceItem
|
|
25
|
+
from regscale.integrations.scanner_integration import (
|
|
26
|
+
ScannerIntegrationType,
|
|
27
|
+
IntegrationAsset,
|
|
28
|
+
IntegrationFinding,
|
|
29
|
+
)
|
|
30
|
+
from regscale.models import regscale_models
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger("regscale")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
JSON_FILE_EXT = ".json"
|
|
36
|
+
JSONL_FILE_EXT = ".jsonl"
|
|
37
|
+
|
|
38
|
+
## WIZ_POLICY_QUERY moved to constants
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Safer, linear-time regex for control-id normalization.
|
|
42
|
+
# Examples supported: 'AC-4', 'AC-4(2)', 'AC-4 (2)', 'AC-4-2', 'AC-4 2'
|
|
43
|
+
# This avoids ambiguous nested optional whitespace with alternation that can
|
|
44
|
+
# trigger excessive backtracking. Each branch starts with a distinct token
|
|
45
|
+
# ('(', '-' or whitespace), so the engine proceeds deterministically.
|
|
46
|
+
SAFE_CONTROL_ID_RE = re.compile( # NOSONAR
|
|
47
|
+
r"^([A-Za-z]{2}-\d+)(?:\s*\(\s*(\d+)\s*\)|-\s*(\d+)|\s+(\d+))?$", # NOSONAR
|
|
48
|
+
re.IGNORECASE, # NOSONAR
|
|
49
|
+
) # NOSONAR
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class WizComplianceItem(ComplianceItem):
|
|
53
|
+
"""Wiz implementation of ComplianceItem."""
|
|
54
|
+
|
|
55
|
+
def __init__(self, raw_data: Dict[str, Any], integration: Optional["WizPolicyComplianceIntegration"] = None):
|
|
56
|
+
"""
|
|
57
|
+
Initialize WizComplianceItem from raw GraphQL response.
|
|
58
|
+
|
|
59
|
+
:param Dict[str, Any] raw_data: Raw policy assessment data from Wiz
|
|
60
|
+
:param Optional['WizPolicyComplianceIntegration'] integration: Integration instance for framework mapping
|
|
61
|
+
"""
|
|
62
|
+
self.id = raw_data.get("id", "")
|
|
63
|
+
self.result = raw_data.get("result", "")
|
|
64
|
+
self.policy = raw_data.get("policy", {})
|
|
65
|
+
self.resource = raw_data.get("resource", {})
|
|
66
|
+
self.output = raw_data.get("output", {})
|
|
67
|
+
self._integration = integration
|
|
68
|
+
|
|
69
|
+
def _get_filtered_subcategories(self) -> List[Dict[str, Any]]:
|
|
70
|
+
"""
|
|
71
|
+
Return only subcategories that belong to the selected framework.
|
|
72
|
+
|
|
73
|
+
If no integration or framework filter is available, return all.
|
|
74
|
+
|
|
75
|
+
:return: List of filtered security subcategories
|
|
76
|
+
:rtype: List[Dict[str, Any]]
|
|
77
|
+
"""
|
|
78
|
+
subcategories = self.policy.get("securitySubCategories", []) if self.policy else []
|
|
79
|
+
if not subcategories or not self._integration or not getattr(self._integration, "framework_id", None):
|
|
80
|
+
return subcategories
|
|
81
|
+
|
|
82
|
+
target_framework_id = self._integration.framework_id
|
|
83
|
+
filtered = [
|
|
84
|
+
sc for sc in subcategories if sc.get("category", {}).get("framework", {}).get("id") == target_framework_id
|
|
85
|
+
]
|
|
86
|
+
# Fallback to original list if filter removes everything (defensive)
|
|
87
|
+
return filtered if filtered else subcategories
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def resource_id(self) -> str:
|
|
91
|
+
"""Unique identifier for the resource being assessed."""
|
|
92
|
+
return self.resource.get("id", "")
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def resource_name(self) -> str:
|
|
96
|
+
"""Human-readable name of the resource."""
|
|
97
|
+
return self.resource.get("name", "")
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def control_id(self) -> str:
|
|
101
|
+
"""Control identifier (e.g., AC-3, SI-2)."""
|
|
102
|
+
if not self.policy:
|
|
103
|
+
return ""
|
|
104
|
+
|
|
105
|
+
subcategories = self._get_filtered_subcategories()
|
|
106
|
+
if subcategories:
|
|
107
|
+
return subcategories[0].get("externalId", "")
|
|
108
|
+
return ""
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def compliance_result(self) -> str:
|
|
112
|
+
"""Result of compliance check (PASS, FAIL, etc)."""
|
|
113
|
+
return self.result
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def severity(self) -> Optional[str]:
|
|
117
|
+
"""Severity level of the compliance violation (if failed)."""
|
|
118
|
+
return self.policy.get("severity")
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def description(self) -> str:
|
|
122
|
+
"""Description of the compliance check."""
|
|
123
|
+
desc = self.policy.get("description") or self.policy.get("ruleDescription", "")
|
|
124
|
+
if not desc:
|
|
125
|
+
desc = f"Compliance check for {self.policy.get('name', 'unknown policy')}"
|
|
126
|
+
return desc
|
|
127
|
+
|
|
128
|
+
@property
|
|
129
|
+
def framework(self) -> str:
|
|
130
|
+
"""Compliance framework (e.g., NIST800-53R5, CSF)."""
|
|
131
|
+
if not self.policy:
|
|
132
|
+
return ""
|
|
133
|
+
|
|
134
|
+
subcategories = self._get_filtered_subcategories()
|
|
135
|
+
if subcategories:
|
|
136
|
+
category = subcategories[0].get("category", {})
|
|
137
|
+
framework = category.get("framework", {})
|
|
138
|
+
framework_id = framework.get("id", "")
|
|
139
|
+
|
|
140
|
+
# Prefer integration mapping using the actual framework id from the item
|
|
141
|
+
if self._integration and framework_id:
|
|
142
|
+
return self._integration.get_framework_name(framework_id)
|
|
143
|
+
|
|
144
|
+
return framework.get("name", "")
|
|
145
|
+
return ""
|
|
146
|
+
|
|
147
|
+
@property
|
|
148
|
+
def framework_id(self) -> Optional[str]:
|
|
149
|
+
"""Extract framework ID."""
|
|
150
|
+
if not self.policy:
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
subcategories = self._get_filtered_subcategories()
|
|
154
|
+
if subcategories:
|
|
155
|
+
category = subcategories[0].get("category", {})
|
|
156
|
+
framework = category.get("framework", {})
|
|
157
|
+
return framework.get("id")
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
@property
|
|
161
|
+
def is_pass(self) -> bool:
|
|
162
|
+
"""Check if assessment result is PASS."""
|
|
163
|
+
return self.result == "PASS"
|
|
164
|
+
|
|
165
|
+
@property
|
|
166
|
+
def is_fail(self) -> bool:
|
|
167
|
+
"""Check if assessment result is FAIL."""
|
|
168
|
+
return self.result == "FAIL"
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
172
|
+
"""
|
|
173
|
+
Wiz Policy Compliance Integration for syncing policy assessments from Wiz to RegScale.
|
|
174
|
+
|
|
175
|
+
This integration fetches policy assessment data from Wiz, processes the results,
|
|
176
|
+
and creates control assessments in RegScale based on compliance status.
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
title = "Wiz Policy Compliance Integration"
|
|
180
|
+
type = ScannerIntegrationType.CONTROL_TEST
|
|
181
|
+
# Enable component creation/mapping like scanner integrations
|
|
182
|
+
options_map_assets_to_components: bool = True
|
|
183
|
+
# Do not create vulnerabilities from compliance policy results
|
|
184
|
+
create_vulnerabilities: bool = False
|
|
185
|
+
# Enable scan history; we will record issue counts
|
|
186
|
+
enable_scan_history: bool = True
|
|
187
|
+
# Control whether JSONL control-centric export is written alongside JSON
|
|
188
|
+
write_jsonl_output: bool = False
|
|
189
|
+
|
|
190
|
+
def __init__(
|
|
191
|
+
self,
|
|
192
|
+
plan_id: int,
|
|
193
|
+
wiz_project_id: str,
|
|
194
|
+
client_id: str,
|
|
195
|
+
client_secret: str,
|
|
196
|
+
framework_id: str = "wf-id-4", # Default to NIST SP 800-53 Revision 5
|
|
197
|
+
catalog_id: Optional[int] = None,
|
|
198
|
+
tenant_id: int = 1,
|
|
199
|
+
create_issues: bool = True,
|
|
200
|
+
update_control_status: bool = True,
|
|
201
|
+
create_poams: bool = False,
|
|
202
|
+
**kwargs,
|
|
203
|
+
):
|
|
204
|
+
"""
|
|
205
|
+
Initialize the Wiz Policy Compliance Integration.
|
|
206
|
+
|
|
207
|
+
:param int plan_id: RegScale Security Plan ID
|
|
208
|
+
:param str wiz_project_id: Wiz Project ID to query
|
|
209
|
+
:param str client_id: Wiz API client ID
|
|
210
|
+
:param str client_secret: Wiz API client secret
|
|
211
|
+
:param str framework_id: Wiz framework ID to filter by (default: wf-id-4)
|
|
212
|
+
:param Optional[int] catalog_id: RegScale catalog ID
|
|
213
|
+
:param int tenant_id: RegScale tenant ID
|
|
214
|
+
:param bool create_issues: Whether to create issues for failed compliance
|
|
215
|
+
:param bool update_control_status: Whether to update control implementation status
|
|
216
|
+
:param bool create_poams: Whether to mark issues as POAMs
|
|
217
|
+
"""
|
|
218
|
+
super().__init__(
|
|
219
|
+
plan_id=plan_id,
|
|
220
|
+
catalog_id=catalog_id,
|
|
221
|
+
framework=self._map_framework_id_to_name(framework_id),
|
|
222
|
+
create_issues=create_issues,
|
|
223
|
+
update_control_status=update_control_status,
|
|
224
|
+
create_poams=create_poams,
|
|
225
|
+
tenant_id=tenant_id,
|
|
226
|
+
**kwargs,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
self.wiz_project_id = wiz_project_id
|
|
230
|
+
self.client_id = client_id
|
|
231
|
+
self.client_secret = client_secret
|
|
232
|
+
self.framework_id = framework_id
|
|
233
|
+
self.wiz_endpoint = ""
|
|
234
|
+
self.access_token = ""
|
|
235
|
+
self.framework_mapping: Dict[str, str] = {}
|
|
236
|
+
self.framework_cache_file = os.path.join("artifacts", "wiz", "framework_mapping.json")
|
|
237
|
+
self.raw_policy_assessments: List[Dict[str, Any]] = []
|
|
238
|
+
|
|
239
|
+
# Caching configuration for policy assessments
|
|
240
|
+
# Default: disabled for tests; CLI enables via --cache-duration
|
|
241
|
+
self.cache_duration_minutes: int = int(kwargs.get("cache_duration_minutes", 0))
|
|
242
|
+
self.force_refresh: bool = bool(kwargs.get("force_refresh", False))
|
|
243
|
+
self.policy_cache_dir: str = os.path.join("artifacts", "wiz")
|
|
244
|
+
self.policy_cache_file: str = os.path.join(
|
|
245
|
+
self.policy_cache_dir, f"policy_assessments_{wiz_project_id}_{framework_id}.json"
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
def fetch_compliance_data(self) -> List[Any]:
|
|
249
|
+
"""
|
|
250
|
+
Fetch compliance data from Wiz GraphQL API.
|
|
251
|
+
|
|
252
|
+
:return: List of raw compliance data (will be converted by base class)
|
|
253
|
+
:rtype: List[Any]
|
|
254
|
+
"""
|
|
255
|
+
# Authenticate if not already done
|
|
256
|
+
if not self.access_token:
|
|
257
|
+
self.authenticate_wiz()
|
|
258
|
+
|
|
259
|
+
# Fetch raw policy assessments and return them
|
|
260
|
+
# The base class will call create_compliance_item() on each
|
|
261
|
+
self.raw_policy_assessments = self._fetch_policy_assessments_from_wiz()
|
|
262
|
+
return self.raw_policy_assessments
|
|
263
|
+
|
|
264
|
+
def create_compliance_item(self, raw_data: Any) -> ComplianceItem:
|
|
265
|
+
"""
|
|
266
|
+
Create a ComplianceItem from raw compliance data.
|
|
267
|
+
|
|
268
|
+
:param Any raw_data: Raw compliance data from Wiz
|
|
269
|
+
:return: ComplianceItem instance
|
|
270
|
+
:rtype: ComplianceItem
|
|
271
|
+
"""
|
|
272
|
+
return WizComplianceItem(raw_data, self)
|
|
273
|
+
|
|
274
|
+
def _map_resource_type_to_asset_type(self, compliance_item: ComplianceItem) -> str:
|
|
275
|
+
"""
|
|
276
|
+
Map Wiz resource type to RegScale asset type.
|
|
277
|
+
|
|
278
|
+
:param ComplianceItem compliance_item: Compliance item
|
|
279
|
+
:return: Asset type string
|
|
280
|
+
:rtype: str
|
|
281
|
+
"""
|
|
282
|
+
if isinstance(compliance_item, WizComplianceItem):
|
|
283
|
+
resource_type = compliance_item.resource.get("type", "").upper()
|
|
284
|
+
|
|
285
|
+
# Minimal mapping expected by tests; default to generic type name
|
|
286
|
+
name_mapping = {
|
|
287
|
+
"VIRTUAL_MACHINE": "Virtual Machine",
|
|
288
|
+
"CONTAINER": "Container",
|
|
289
|
+
"DATABASE": "Database",
|
|
290
|
+
"BUCKET": "Storage",
|
|
291
|
+
}
|
|
292
|
+
if resource_type in name_mapping:
|
|
293
|
+
return name_mapping[resource_type]
|
|
294
|
+
|
|
295
|
+
return "Cloud Resource"
|
|
296
|
+
|
|
297
|
+
def _get_component_name_from_source_type(self, compliance_item: WizComplianceItem) -> str:
|
|
298
|
+
"""
|
|
299
|
+
Build a component name from the original Wiz resource type (source type).
|
|
300
|
+
|
|
301
|
+
Example: "STORAGE_ACCOUNT" -> "Storage Account"
|
|
302
|
+
|
|
303
|
+
:param WizComplianceItem compliance_item: Compliance item containing resource information
|
|
304
|
+
:return: Human-readable component name derived from resource type
|
|
305
|
+
:rtype: str
|
|
306
|
+
"""
|
|
307
|
+
raw_type = (compliance_item.resource or {}).get("type", "Unknown Resource")
|
|
308
|
+
return raw_type.replace("_", " ").title()
|
|
309
|
+
|
|
310
|
+
def fetch_assets(self, *args, **kwargs) -> Iterator[IntegrationAsset]:
|
|
311
|
+
"""
|
|
312
|
+
Fetch assets grouped to components by asset types like scanner integrations,
|
|
313
|
+
and upsert existing assets (no duplicates). Only assets for items already
|
|
314
|
+
filtered to the selected framework are considered.
|
|
315
|
+
|
|
316
|
+
- Deduplicate by resource_id
|
|
317
|
+
- Yield assets with component_names set to their inferred group
|
|
318
|
+
- Always yield unique assets for bulk upsert (create or update)
|
|
319
|
+
"""
|
|
320
|
+
logger.info("Fetching assets from compliance items...")
|
|
321
|
+
|
|
322
|
+
# Ensure caches are loaded for downstream lookups
|
|
323
|
+
self._load_existing_records_cache()
|
|
324
|
+
|
|
325
|
+
processed_resources = set()
|
|
326
|
+
for compliance_item in self.all_compliance_items:
|
|
327
|
+
resource_id = getattr(compliance_item, "resource_id", None)
|
|
328
|
+
if not resource_id or resource_id in processed_resources:
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
asset = self.create_asset_from_compliance_item(compliance_item)
|
|
332
|
+
if asset:
|
|
333
|
+
# Derive component grouping from the source asset type (not control)
|
|
334
|
+
component_name = self._get_component_name_from_source_type(compliance_item)
|
|
335
|
+
if isinstance(getattr(asset, "component_names", None), list) and component_name:
|
|
336
|
+
if component_name not in asset.component_names:
|
|
337
|
+
asset.component_names.append(component_name)
|
|
338
|
+
|
|
339
|
+
processed_resources.add(resource_id)
|
|
340
|
+
yield asset
|
|
341
|
+
|
|
342
|
+
def fetch_findings(self, *args, **kwargs) -> Iterator[IntegrationFinding]:
|
|
343
|
+
"""
|
|
344
|
+
Produce at most one finding per (asset, control) pair to avoid duplicates.
|
|
345
|
+
|
|
346
|
+
Dedupe key: (resource_id, control_id), case-insensitive.
|
|
347
|
+
"""
|
|
348
|
+
logger.info("Fetching findings from failed compliance items (dedup by asset-control)...")
|
|
349
|
+
|
|
350
|
+
seen_keys: set[tuple[str, str]] = set()
|
|
351
|
+
for compliance_item in self.failed_compliance_items:
|
|
352
|
+
if not isinstance(compliance_item, WizComplianceItem):
|
|
353
|
+
finding = super().create_finding_from_compliance_item(compliance_item)
|
|
354
|
+
if finding:
|
|
355
|
+
yield finding
|
|
356
|
+
continue
|
|
357
|
+
|
|
358
|
+
asset_id = (compliance_item.resource_id or "").lower()
|
|
359
|
+
control = (compliance_item.control_id or "").upper()
|
|
360
|
+
if not asset_id or not control:
|
|
361
|
+
continue
|
|
362
|
+
|
|
363
|
+
key = (asset_id, control)
|
|
364
|
+
if key in seen_keys:
|
|
365
|
+
continue
|
|
366
|
+
seen_keys.add(key)
|
|
367
|
+
|
|
368
|
+
finding = self.create_finding_from_compliance_item(compliance_item)
|
|
369
|
+
if finding:
|
|
370
|
+
yield finding
|
|
371
|
+
|
|
372
|
+
def _map_framework_id_to_name(self, framework_id: str) -> str:
|
|
373
|
+
"""
|
|
374
|
+
Map framework ID to framework name.
|
|
375
|
+
|
|
376
|
+
:param str framework_id: Framework ID to map
|
|
377
|
+
:return: Human-readable framework name
|
|
378
|
+
:rtype: str
|
|
379
|
+
"""
|
|
380
|
+
# Default mappings - will be enhanced with cached data
|
|
381
|
+
default_mappings = {
|
|
382
|
+
"wf-id-4": "NIST800-53R5",
|
|
383
|
+
"wf-id-48": "NIST800-53R4",
|
|
384
|
+
"wf-id-5": "FedRAMP",
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
return default_mappings.get(framework_id, framework_id)
|
|
388
|
+
|
|
389
|
+
def create_finding_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationFinding]:
|
|
390
|
+
"""
|
|
391
|
+
Create an IntegrationFinding from a failed compliance item with proper asset/issue matching.
|
|
392
|
+
|
|
393
|
+
:param ComplianceItem compliance_item: The compliance item
|
|
394
|
+
:return: IntegrationFinding or None
|
|
395
|
+
:rtype: Optional[IntegrationFinding]
|
|
396
|
+
"""
|
|
397
|
+
if not isinstance(compliance_item, WizComplianceItem):
|
|
398
|
+
return super().create_finding_from_compliance_item(compliance_item)
|
|
399
|
+
|
|
400
|
+
try:
|
|
401
|
+
control_labels = self._get_control_labels(compliance_item)
|
|
402
|
+
severity = self._map_severity(compliance_item.severity)
|
|
403
|
+
policy_name = self._get_policy_name(compliance_item)
|
|
404
|
+
title = self._compose_title(policy_name, compliance_item)
|
|
405
|
+
description = self._compose_description(policy_name, compliance_item)
|
|
406
|
+
finding = self._build_finding(
|
|
407
|
+
control_labels=control_labels,
|
|
408
|
+
title=title,
|
|
409
|
+
description=description,
|
|
410
|
+
severity=severity,
|
|
411
|
+
compliance_item=compliance_item,
|
|
412
|
+
)
|
|
413
|
+
self._set_affected_controls(finding, compliance_item)
|
|
414
|
+
self._set_assessment_id_if_available(finding, compliance_item)
|
|
415
|
+
return finding
|
|
416
|
+
except Exception as e:
|
|
417
|
+
logger.error(f"Error creating finding from Wiz compliance item: {e}")
|
|
418
|
+
return None
|
|
419
|
+
|
|
420
|
+
# ---------- Private helpers (low-complexity building blocks) ----------
|
|
421
|
+
|
|
422
|
+
@staticmethod
|
|
423
|
+
def _get_control_labels(item: WizComplianceItem) -> List[str]:
|
|
424
|
+
"""
|
|
425
|
+
Extract control labels from a Wiz compliance item.
|
|
426
|
+
|
|
427
|
+
:param WizComplianceItem item: Compliance item to extract labels from
|
|
428
|
+
:return: List of control labels
|
|
429
|
+
:rtype: List[str]
|
|
430
|
+
"""
|
|
431
|
+
return [item.control_id] if item.control_id else []
|
|
432
|
+
|
|
433
|
+
@staticmethod
|
|
434
|
+
def _get_policy_name(item: WizComplianceItem) -> str:
|
|
435
|
+
"""
|
|
436
|
+
Extract policy name from a Wiz compliance item.
|
|
437
|
+
|
|
438
|
+
:param WizComplianceItem item: Compliance item to extract policy name from
|
|
439
|
+
:return: Policy name or 'Unknown Policy' if not found
|
|
440
|
+
:rtype: str
|
|
441
|
+
"""
|
|
442
|
+
return (item.policy.get("name") or "Unknown Policy").strip()
|
|
443
|
+
|
|
444
|
+
@staticmethod
|
|
445
|
+
def _compose_title(policy_name: str, item: WizComplianceItem) -> str:
|
|
446
|
+
"""
|
|
447
|
+
Compose a finding title from policy name and control information.
|
|
448
|
+
|
|
449
|
+
:param str policy_name: Name of the policy
|
|
450
|
+
:param WizComplianceItem item: Compliance item with control information
|
|
451
|
+
:return: Formatted title for the finding
|
|
452
|
+
:rtype: str
|
|
453
|
+
"""
|
|
454
|
+
return f"{policy_name} ({item.control_id})" if item.control_id else policy_name
|
|
455
|
+
|
|
456
|
+
def _compose_description(self, policy_name: str, item: WizComplianceItem) -> str:
|
|
457
|
+
"""
|
|
458
|
+
Compose a detailed description for a compliance finding.
|
|
459
|
+
|
|
460
|
+
:param str policy_name: Name of the policy that failed
|
|
461
|
+
:param WizComplianceItem item: Compliance item with resource and policy details
|
|
462
|
+
:return: Formatted markdown description
|
|
463
|
+
:rtype: str
|
|
464
|
+
"""
|
|
465
|
+
parts: List[str] = [
|
|
466
|
+
f"Policy compliance failure detected by Wiz for resource '{item.resource_name}'.",
|
|
467
|
+
"",
|
|
468
|
+
f"**Policy:** {policy_name}",
|
|
469
|
+
f"**Resource:** {item.resource_name} ({item.resource.get('type', 'Unknown')})",
|
|
470
|
+
f"**Control:** {item.control_id}",
|
|
471
|
+
f"**Framework:** {item.framework}",
|
|
472
|
+
f"**Result:** {item.result}",
|
|
473
|
+
]
|
|
474
|
+
|
|
475
|
+
# Policy/Remediation details
|
|
476
|
+
policy_desc = item.policy.get("description") or item.policy.get("ruleDescription")
|
|
477
|
+
if policy_desc:
|
|
478
|
+
parts.extend(["", "**Policy Description:**", policy_desc])
|
|
479
|
+
|
|
480
|
+
remediation = item.policy.get("remediationInstructions")
|
|
481
|
+
if remediation:
|
|
482
|
+
parts.extend(["", "**Remediation Instructions:**", remediation])
|
|
483
|
+
|
|
484
|
+
# Location details
|
|
485
|
+
if item.resource.get("region"):
|
|
486
|
+
parts.append(f"**Region:** {item.resource['region']}")
|
|
487
|
+
if item.resource.get("subscription"):
|
|
488
|
+
sub = item.resource["subscription"]
|
|
489
|
+
parts.append(
|
|
490
|
+
f"**Cloud Provider:** {sub.get('cloudProvider', 'Unknown')} "
|
|
491
|
+
f"(Subscription: {sub.get('name', 'Unknown')})"
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
return "\n".join(parts)
|
|
495
|
+
|
|
496
|
+
def _build_finding(
|
|
497
|
+
self,
|
|
498
|
+
*,
|
|
499
|
+
control_labels: List[str],
|
|
500
|
+
title: str,
|
|
501
|
+
description: str,
|
|
502
|
+
severity: regscale_models.IssueSeverity,
|
|
503
|
+
compliance_item: WizComplianceItem,
|
|
504
|
+
) -> IntegrationFinding:
|
|
505
|
+
"""
|
|
506
|
+
Build an IntegrationFinding from compliance item components.
|
|
507
|
+
|
|
508
|
+
:param List[str] control_labels: List of control labels
|
|
509
|
+
:param str title: Finding title
|
|
510
|
+
:param str description: Finding description
|
|
511
|
+
:param regscale_models.IssueSeverity severity: Finding severity
|
|
512
|
+
:param WizComplianceItem compliance_item: Source compliance item
|
|
513
|
+
:return: Constructed integration finding
|
|
514
|
+
:rtype: IntegrationFinding
|
|
515
|
+
"""
|
|
516
|
+
stable_rule = compliance_item.control_id or ""
|
|
517
|
+
return IntegrationFinding(
|
|
518
|
+
control_labels=control_labels,
|
|
519
|
+
title=f"Policy Compliance Failure: {title}" if compliance_item.is_fail else title,
|
|
520
|
+
category="Policy Compliance",
|
|
521
|
+
plugin_name=f"{self.title}",
|
|
522
|
+
severity=severity,
|
|
523
|
+
description=description,
|
|
524
|
+
status=regscale_models.IssueStatus.Open,
|
|
525
|
+
priority=self._map_severity_to_priority(severity),
|
|
526
|
+
plugin_id=f"policy-control:{self.framework_id}:{stable_rule}",
|
|
527
|
+
external_id=(
|
|
528
|
+
f"wiz-policy-{compliance_item.id}" if compliance_item.id else f"wiz-policy-control-{stable_rule}"
|
|
529
|
+
),
|
|
530
|
+
identification="Security Control Assessment",
|
|
531
|
+
first_seen=self.scan_date,
|
|
532
|
+
last_seen=self.scan_date,
|
|
533
|
+
scan_date=self.scan_date,
|
|
534
|
+
asset_identifier=compliance_item.resource_id,
|
|
535
|
+
vulnerability_type="Policy Compliance Violation",
|
|
536
|
+
rule_id=compliance_item.control_id,
|
|
537
|
+
baseline=compliance_item.framework,
|
|
538
|
+
remediation=compliance_item.policy.get("remediationInstructions") or "",
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
def _set_affected_controls(self, finding: IntegrationFinding, item: WizComplianceItem) -> None:
|
|
542
|
+
"""
|
|
543
|
+
Set the affected controls field on a finding from a compliance item.
|
|
544
|
+
|
|
545
|
+
:param IntegrationFinding finding: Finding to update
|
|
546
|
+
:param WizComplianceItem item: Compliance item with control information
|
|
547
|
+
:return: None
|
|
548
|
+
:rtype: None
|
|
549
|
+
"""
|
|
550
|
+
if item.control_id:
|
|
551
|
+
finding.affected_controls = self._normalize_control_id_string(item.control_id)
|
|
552
|
+
|
|
553
|
+
def _set_assessment_id_if_available(self, finding: IntegrationFinding, item: WizComplianceItem) -> None:
|
|
554
|
+
"""
|
|
555
|
+
Set the assessment ID on a finding if available from cached mappings.
|
|
556
|
+
|
|
557
|
+
:param IntegrationFinding finding: Finding to update with assessment ID
|
|
558
|
+
:param WizComplianceItem item: Compliance item with control information
|
|
559
|
+
:return: None
|
|
560
|
+
:rtype: None
|
|
561
|
+
"""
|
|
562
|
+
try:
|
|
563
|
+
ctrl_norm = self._normalize_control_id_string(item.control_id)
|
|
564
|
+
if ctrl_norm and hasattr(self, "_impl_id_by_control"):
|
|
565
|
+
impl_id = self._impl_id_by_control.get(ctrl_norm)
|
|
566
|
+
if impl_id and hasattr(self, "_assessment_by_impl_today"):
|
|
567
|
+
assess = self._assessment_by_impl_today.get(impl_id)
|
|
568
|
+
if assess:
|
|
569
|
+
finding.assessment_id = assess.id
|
|
570
|
+
logger.debug(f"Set finding.assessment_id = {assess.id} for control '{ctrl_norm}'")
|
|
571
|
+
except Exception as e:
|
|
572
|
+
logger.debug(f"Error setting finding assessment ID: {e}")
|
|
573
|
+
|
|
574
|
+
def create_asset_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationAsset]:
|
|
575
|
+
"""
|
|
576
|
+
Create an IntegrationAsset from a Wiz compliance item with enhanced metadata.
|
|
577
|
+
|
|
578
|
+
:param ComplianceItem compliance_item: The compliance item
|
|
579
|
+
:return: IntegrationAsset or None
|
|
580
|
+
:rtype: Optional[IntegrationAsset]
|
|
581
|
+
"""
|
|
582
|
+
if not isinstance(compliance_item, WizComplianceItem):
|
|
583
|
+
return super().create_asset_from_compliance_item(compliance_item)
|
|
584
|
+
|
|
585
|
+
try:
|
|
586
|
+
resource = compliance_item.resource
|
|
587
|
+
asset_type = self._map_resource_type_to_asset_type(compliance_item)
|
|
588
|
+
|
|
589
|
+
# Build asset description with cloud metadata
|
|
590
|
+
description_parts = [
|
|
591
|
+
"Cloud resource from Wiz compliance scan",
|
|
592
|
+
f"Type: {resource.get('type', 'Unknown')}",
|
|
593
|
+
]
|
|
594
|
+
|
|
595
|
+
if resource.get("region"):
|
|
596
|
+
description_parts.append(f"Region: {resource['region']}")
|
|
597
|
+
|
|
598
|
+
if resource.get("subscription"):
|
|
599
|
+
sub = resource["subscription"]
|
|
600
|
+
description_parts.append(
|
|
601
|
+
f"Cloud Provider: {sub.get('cloudProvider', 'Unknown')} "
|
|
602
|
+
f"(Subscription: {sub.get('name', 'Unknown')})"
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Add tags if available
|
|
606
|
+
tags = resource.get("tags", [])
|
|
607
|
+
if tags:
|
|
608
|
+
tag_strings = [f"{tag.get('key')}:{tag.get('value')}" for tag in tags if tag.get("key")]
|
|
609
|
+
if tag_strings:
|
|
610
|
+
description_parts.append(f"Tags: {', '.join(tag_strings)}")
|
|
611
|
+
|
|
612
|
+
# Get user ID directly from application config
|
|
613
|
+
app = Application()
|
|
614
|
+
config = app.config
|
|
615
|
+
user_id = config.get("userId")
|
|
616
|
+
|
|
617
|
+
asset = IntegrationAsset(
|
|
618
|
+
name=compliance_item.resource_name,
|
|
619
|
+
identifier=compliance_item.resource_id,
|
|
620
|
+
external_id=compliance_item.resource_id,
|
|
621
|
+
other_tracking_number=compliance_item.resource_id, # For deduplication
|
|
622
|
+
asset_type=asset_type,
|
|
623
|
+
asset_category=regscale_models.AssetCategory.Hardware,
|
|
624
|
+
description="\n".join(description_parts),
|
|
625
|
+
parent_id=self.plan_id,
|
|
626
|
+
parent_module=self.parent_module,
|
|
627
|
+
status=regscale_models.AssetStatus.Active,
|
|
628
|
+
date_last_updated=self.scan_date,
|
|
629
|
+
notes=self._create_asset_notes(compliance_item),
|
|
630
|
+
# Set asset owner ID from config
|
|
631
|
+
asset_owner_id=user_id,
|
|
632
|
+
# Enable component mapping flow downstream
|
|
633
|
+
component_names=[],
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
return asset
|
|
637
|
+
|
|
638
|
+
except Exception as e:
|
|
639
|
+
logger.error(f"Error creating asset from Wiz compliance item: {e}")
|
|
640
|
+
return None
|
|
641
|
+
|
|
642
|
+
def create_scan_history(self): # type: ignore[override]
|
|
643
|
+
"""Create or reuse scan history using base behavior."""
|
|
644
|
+
return super().create_scan_history()
|
|
645
|
+
|
|
646
|
+
def _create_asset_notes(self, compliance_item: WizComplianceItem) -> str:
|
|
647
|
+
"""
|
|
648
|
+
Create detailed notes for asset with compliance context.
|
|
649
|
+
|
|
650
|
+
:param WizComplianceItem compliance_item: Compliance item with resource details
|
|
651
|
+
:return: Formatted asset notes in markdown
|
|
652
|
+
:rtype: str
|
|
653
|
+
"""
|
|
654
|
+
resource = compliance_item.resource
|
|
655
|
+
notes_parts = [
|
|
656
|
+
"# Wiz Asset Details",
|
|
657
|
+
f"**Resource ID:** {compliance_item.resource_id}",
|
|
658
|
+
f"**Resource Type:** {resource.get('type', 'Unknown')}",
|
|
659
|
+
]
|
|
660
|
+
|
|
661
|
+
# Add subscription details
|
|
662
|
+
if resource.get("subscription"):
|
|
663
|
+
sub = resource["subscription"]
|
|
664
|
+
notes_parts.extend(
|
|
665
|
+
[
|
|
666
|
+
"",
|
|
667
|
+
"## Cloud Provider Details",
|
|
668
|
+
f"**Provider:** {sub.get('cloudProvider', 'Unknown')}",
|
|
669
|
+
f"**Subscription Name:** {sub.get('name', 'Unknown')}",
|
|
670
|
+
f"**Subscription ID:** {sub.get('externalId', 'Unknown')}",
|
|
671
|
+
]
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
# Add compliance summary
|
|
675
|
+
total_items = len(self.asset_compliance_map.get(compliance_item.resource_id, []))
|
|
676
|
+
failed_items = len(
|
|
677
|
+
[
|
|
678
|
+
item
|
|
679
|
+
for item in self.asset_compliance_map.get(compliance_item.resource_id, [])
|
|
680
|
+
if item.compliance_result in self.FAIL_STATUSES
|
|
681
|
+
]
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
if total_items > 0:
|
|
685
|
+
notes_parts.extend(
|
|
686
|
+
[
|
|
687
|
+
"",
|
|
688
|
+
"## Compliance Summary",
|
|
689
|
+
f"**Total Assessments:** {total_items}",
|
|
690
|
+
f"**Failed Assessments:** {failed_items}",
|
|
691
|
+
f"**Compliance Rate:** {((total_items - failed_items) / total_items * 100):.1f}%",
|
|
692
|
+
]
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
return "\n".join(notes_parts)
|
|
696
|
+
|
|
697
|
+
def authenticate_wiz(self) -> str:
|
|
698
|
+
"""
|
|
699
|
+
Authenticate with Wiz and return access token.
|
|
700
|
+
|
|
701
|
+
:return: Wiz access token
|
|
702
|
+
:rtype: str
|
|
703
|
+
"""
|
|
704
|
+
logger.info("Authenticating with Wiz...")
|
|
705
|
+
try:
|
|
706
|
+
token = wiz_authenticate(client_id=self.client_id, client_secret=self.client_secret)
|
|
707
|
+
if not token:
|
|
708
|
+
error_and_exit("Failed to authenticate with Wiz")
|
|
709
|
+
|
|
710
|
+
# Get Wiz endpoint from config
|
|
711
|
+
app = check_license()
|
|
712
|
+
config = app.config
|
|
713
|
+
self.wiz_endpoint = config.get("wizUrl", "")
|
|
714
|
+
if not self.wiz_endpoint:
|
|
715
|
+
error_and_exit("No Wiz URL found in configuration")
|
|
716
|
+
|
|
717
|
+
self.access_token = token
|
|
718
|
+
logger.info("Successfully authenticated with Wiz")
|
|
719
|
+
return token
|
|
720
|
+
|
|
721
|
+
except Exception as e:
|
|
722
|
+
logger.error(f"Wiz authentication failed: {str(e)}")
|
|
723
|
+
error_and_exit(f"Wiz authentication failed: {str(e)}")
|
|
724
|
+
|
|
725
|
+
def _fetch_policy_assessments_from_wiz(self) -> List[Dict[str, Any]]:
|
|
726
|
+
"""
|
|
727
|
+
Fetch policy assessments from Wiz GraphQL API.
|
|
728
|
+
|
|
729
|
+
:return: List of raw policy assessment data
|
|
730
|
+
:rtype: List[Dict[str, Any]]
|
|
731
|
+
"""
|
|
732
|
+
logger.info("Fetching policy assessments from Wiz...")
|
|
733
|
+
|
|
734
|
+
# Authenticate if not already done
|
|
735
|
+
if not self.access_token:
|
|
736
|
+
self.authenticate_wiz()
|
|
737
|
+
|
|
738
|
+
headers = self._build_wiz_headers()
|
|
739
|
+
session = self._prepare_wiz_requests_session()
|
|
740
|
+
|
|
741
|
+
# Try cache first unless forced refresh
|
|
742
|
+
cached_nodes = self._load_assessments_from_cache()
|
|
743
|
+
if cached_nodes is not None:
|
|
744
|
+
logger.info(f"Using cached Wiz policy assessments ({len(cached_nodes)})")
|
|
745
|
+
return cached_nodes
|
|
746
|
+
|
|
747
|
+
# Only include variables supported by the query (avoid validation errors)
|
|
748
|
+
page_size = 100
|
|
749
|
+
logger.info(f"Using Wiz policy assessments page size (first): {page_size}")
|
|
750
|
+
base_variables = {"first": page_size}
|
|
751
|
+
|
|
752
|
+
# Try multiple filter key variants to avoid schema differences across tenants
|
|
753
|
+
filter_variants = [
|
|
754
|
+
{"project": [self.wiz_project_id]},
|
|
755
|
+
{"projectId": [self.wiz_project_id]},
|
|
756
|
+
{"projects": [self.wiz_project_id]},
|
|
757
|
+
{}, # Empty filterBy
|
|
758
|
+
None, # Omit filterBy entirely
|
|
759
|
+
]
|
|
760
|
+
|
|
761
|
+
# First, try async client (unit tests patch this path)
|
|
762
|
+
try:
|
|
763
|
+
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
764
|
+
|
|
765
|
+
with compliance_job_progress:
|
|
766
|
+
task = compliance_job_progress.add_task(
|
|
767
|
+
f"[#f68d1f]Fetching Wiz policy assessments (async, page size: {page_size})...",
|
|
768
|
+
total=1,
|
|
769
|
+
)
|
|
770
|
+
results = run_async_queries(
|
|
771
|
+
endpoint=self.wiz_endpoint or "https://api.wiz.io/graphql",
|
|
772
|
+
headers=headers,
|
|
773
|
+
query_configs=[
|
|
774
|
+
{
|
|
775
|
+
"type": WizVulnerabilityType.CONFIGURATION,
|
|
776
|
+
"query": WIZ_POLICY_QUERY,
|
|
777
|
+
"topic_key": "policyAssessments",
|
|
778
|
+
"variables": {"first": page_size},
|
|
779
|
+
}
|
|
780
|
+
],
|
|
781
|
+
progress_tracker=compliance_job_progress,
|
|
782
|
+
max_concurrent=1,
|
|
783
|
+
)
|
|
784
|
+
compliance_job_progress.update(task, completed=1, advance=1)
|
|
785
|
+
if results and len(results) == 1 and not results[0][2]:
|
|
786
|
+
nodes = results[0][1] or []
|
|
787
|
+
filtered = self._filter_nodes_to_framework(nodes)
|
|
788
|
+
self._write_assessments_cache(filtered)
|
|
789
|
+
return filtered
|
|
790
|
+
except Exception:
|
|
791
|
+
# Fall back to requests-based method below
|
|
792
|
+
pass
|
|
793
|
+
|
|
794
|
+
filtered_nodes = self._fetch_assessments_with_variants(
|
|
795
|
+
session=session,
|
|
796
|
+
headers=headers,
|
|
797
|
+
base_variables=base_variables,
|
|
798
|
+
page_size=page_size,
|
|
799
|
+
filter_variants=filter_variants,
|
|
800
|
+
)
|
|
801
|
+
self._write_assessments_cache(filtered_nodes)
|
|
802
|
+
return filtered_nodes
|
|
803
|
+
|
|
804
|
+
def _build_wiz_headers(self) -> Dict[str, str]:
|
|
805
|
+
"""
|
|
806
|
+
Build HTTP headers for Wiz GraphQL API requests.
|
|
807
|
+
|
|
808
|
+
:return: Dictionary of HTTP headers including authorization
|
|
809
|
+
:rtype: Dict[str, str]
|
|
810
|
+
"""
|
|
811
|
+
return {
|
|
812
|
+
"Authorization": f"Bearer {self.access_token}",
|
|
813
|
+
"Content-Type": "application/json",
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
def _prepare_wiz_requests_session(self):
|
|
817
|
+
"""
|
|
818
|
+
Prepare a requests session with retry logic for Wiz API calls.
|
|
819
|
+
|
|
820
|
+
:return: Configured requests session with retry adapter
|
|
821
|
+
:rtype: requests.Session
|
|
822
|
+
"""
|
|
823
|
+
import requests
|
|
824
|
+
from requests.adapters import HTTPAdapter
|
|
825
|
+
from urllib3.util.retry import Retry
|
|
826
|
+
|
|
827
|
+
session = requests.Session()
|
|
828
|
+
retry = Retry(
|
|
829
|
+
total=5,
|
|
830
|
+
connect=5,
|
|
831
|
+
read=5,
|
|
832
|
+
backoff_factor=0.5,
|
|
833
|
+
status_forcelist=[429, 500, 502, 503, 504],
|
|
834
|
+
allowed_methods=["POST"],
|
|
835
|
+
)
|
|
836
|
+
adapter = HTTPAdapter(max_retries=retry)
|
|
837
|
+
session.mount("https://", adapter)
|
|
838
|
+
session.mount("http://", adapter)
|
|
839
|
+
return session
|
|
840
|
+
|
|
841
|
+
def _fetch_assessments_with_variants(
|
|
842
|
+
self,
|
|
843
|
+
*,
|
|
844
|
+
session,
|
|
845
|
+
headers: Dict[str, str],
|
|
846
|
+
base_variables: Dict[str, Any],
|
|
847
|
+
page_size: int,
|
|
848
|
+
filter_variants: List[Optional[Dict[str, Any]]],
|
|
849
|
+
) -> List[Dict[str, Any]]:
|
|
850
|
+
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
851
|
+
|
|
852
|
+
last_error: Optional[Exception] = None
|
|
853
|
+
|
|
854
|
+
# In unit tests, the async client is patched and we should not hit network.
|
|
855
|
+
|
|
856
|
+
with compliance_job_progress:
|
|
857
|
+
task = compliance_job_progress.add_task(
|
|
858
|
+
f"[#f68d1f]Fetching Wiz policy assessments (page size: {page_size})...",
|
|
859
|
+
total=None,
|
|
860
|
+
)
|
|
861
|
+
for fv in filter_variants:
|
|
862
|
+
try:
|
|
863
|
+
# If endpoint is not set (tests), short-circuit to async path mock
|
|
864
|
+
if not self.wiz_endpoint:
|
|
865
|
+
results = run_async_queries(
|
|
866
|
+
endpoint="https://api.wiz.io/graphql",
|
|
867
|
+
headers=headers,
|
|
868
|
+
query_configs=[
|
|
869
|
+
{
|
|
870
|
+
"type": WizVulnerabilityType.CONFIGURATION,
|
|
871
|
+
"query": WIZ_POLICY_QUERY,
|
|
872
|
+
"topic_key": "policyAssessments",
|
|
873
|
+
"variables": {**base_variables, **({"filterBy": fv} if fv is not None else {})},
|
|
874
|
+
}
|
|
875
|
+
],
|
|
876
|
+
progress_tracker=compliance_job_progress,
|
|
877
|
+
max_concurrent=1,
|
|
878
|
+
)
|
|
879
|
+
# Expected mocked structure: [(type, nodes, error)]
|
|
880
|
+
if results and len(results) == 1 and not results[0][2]:
|
|
881
|
+
nodes = results[0][1] or []
|
|
882
|
+
return self._filter_nodes_to_framework(nodes)
|
|
883
|
+
|
|
884
|
+
return self._fetch_with_filter_variant(
|
|
885
|
+
session=session,
|
|
886
|
+
headers=headers,
|
|
887
|
+
base_variables=base_variables,
|
|
888
|
+
filter_variant=fv,
|
|
889
|
+
page_size=page_size,
|
|
890
|
+
progress=compliance_job_progress,
|
|
891
|
+
task=task,
|
|
892
|
+
)
|
|
893
|
+
except Exception as exc: # noqa: BLE001 - propagate last error
|
|
894
|
+
last_error = exc
|
|
895
|
+
logger.debug(f"Filter variant {fv} failed: {exc}")
|
|
896
|
+
|
|
897
|
+
msg = f"Failed to fetch policy assessments after trying all filter variants: {last_error}"
|
|
898
|
+
logger.error(msg)
|
|
899
|
+
error_and_exit(msg)
|
|
900
|
+
|
|
901
|
+
def _variant_name(self, fv: Optional[Dict[str, Any]]) -> str:
|
|
902
|
+
"""
|
|
903
|
+
Get a human-readable name for a filter variant.
|
|
904
|
+
|
|
905
|
+
:param Optional[Dict[str, Any]] fv: Filter variant dictionary
|
|
906
|
+
:return: Human-readable variant name
|
|
907
|
+
:rtype: str
|
|
908
|
+
"""
|
|
909
|
+
if fv is None:
|
|
910
|
+
return "omitted"
|
|
911
|
+
if fv == {}:
|
|
912
|
+
return "empty"
|
|
913
|
+
try:
|
|
914
|
+
return next(iter(fv.keys()))
|
|
915
|
+
except Exception:
|
|
916
|
+
return "unknown"
|
|
917
|
+
|
|
918
|
+
def _fetch_with_filter_variant(
|
|
919
|
+
self,
|
|
920
|
+
*,
|
|
921
|
+
session,
|
|
922
|
+
headers: Dict[str, str],
|
|
923
|
+
base_variables: Dict[str, Any],
|
|
924
|
+
filter_variant: Optional[Dict[str, Any]],
|
|
925
|
+
page_size: int,
|
|
926
|
+
progress,
|
|
927
|
+
task,
|
|
928
|
+
) -> List[Dict[str, Any]]:
|
|
929
|
+
variant_name = self._variant_name(filter_variant)
|
|
930
|
+
progress.update(
|
|
931
|
+
task,
|
|
932
|
+
description=(
|
|
933
|
+
f"[#f68d1f]Fetching Wiz policy assessments (limit: {page_size}, " f"variant: {variant_name})..."
|
|
934
|
+
),
|
|
935
|
+
advance=1,
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
variables = base_variables.copy() if filter_variant is None else {**base_variables, "filterBy": filter_variant}
|
|
939
|
+
|
|
940
|
+
def on_page(page_idx: int, page_count: int, total_nodes: int) -> None:
|
|
941
|
+
progress.update(
|
|
942
|
+
task,
|
|
943
|
+
description=(
|
|
944
|
+
f"[cyan]Fetching policy assessments: page {page_idx}, "
|
|
945
|
+
f"fetched {total_nodes} nodes (last page: {page_count})"
|
|
946
|
+
),
|
|
947
|
+
advance=1,
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
nodes = self._execute_wiz_policy_query_paginated(
|
|
951
|
+
session=session, headers=headers, variables=variables, on_page=on_page
|
|
952
|
+
)
|
|
953
|
+
filtered_nodes = self._filter_nodes_to_framework(nodes)
|
|
954
|
+
progress.update(
|
|
955
|
+
task,
|
|
956
|
+
description=f"[green]✓ Completed Wiz policy assessments: {len(filtered_nodes)} nodes",
|
|
957
|
+
completed=1,
|
|
958
|
+
total=1,
|
|
959
|
+
)
|
|
960
|
+
logger.info(f"Successfully fetched {len(filtered_nodes)} policy assessments")
|
|
961
|
+
return filtered_nodes
|
|
962
|
+
|
|
963
|
+
def _execute_wiz_policy_query_paginated(
|
|
964
|
+
self,
|
|
965
|
+
*,
|
|
966
|
+
session,
|
|
967
|
+
headers: Dict[str, str],
|
|
968
|
+
variables: Dict[str, Any],
|
|
969
|
+
on_page=None,
|
|
970
|
+
) -> List[Dict[str, Any]]:
|
|
971
|
+
import requests
|
|
972
|
+
|
|
973
|
+
nodes: List[Dict[str, Any]] = []
|
|
974
|
+
after_cursor: Optional[str] = variables.get("after")
|
|
975
|
+
page_index = 0
|
|
976
|
+
while True:
|
|
977
|
+
payload_vars = variables.copy()
|
|
978
|
+
payload_vars["after"] = after_cursor
|
|
979
|
+
payload = {"query": WIZ_POLICY_QUERY, "variables": payload_vars}
|
|
980
|
+
resp = session.post(self.wiz_endpoint, json=payload, headers=headers, timeout=300)
|
|
981
|
+
if resp.status_code >= 400:
|
|
982
|
+
raise requests.HTTPError(f"{resp.status_code} {resp.text[:500]}")
|
|
983
|
+
data = resp.json()
|
|
984
|
+
if "errors" in data:
|
|
985
|
+
raise RuntimeError(str(data["errors"]))
|
|
986
|
+
topic = data.get("data", {}).get("policyAssessments", {})
|
|
987
|
+
page_nodes = topic.get("nodes", [])
|
|
988
|
+
page_info = topic.get("pageInfo", {})
|
|
989
|
+
nodes.extend(page_nodes)
|
|
990
|
+
page_index += 1
|
|
991
|
+
if on_page:
|
|
992
|
+
try:
|
|
993
|
+
on_page(page_index, len(page_nodes), len(nodes))
|
|
994
|
+
except Exception:
|
|
995
|
+
pass
|
|
996
|
+
has_next = page_info.get("hasNextPage", False)
|
|
997
|
+
after_cursor = page_info.get("endCursor")
|
|
998
|
+
if not has_next:
|
|
999
|
+
break
|
|
1000
|
+
return nodes
|
|
1001
|
+
|
|
1002
|
+
def _filter_nodes_to_framework(self, nodes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1003
|
+
filtered_nodes: List[Dict[str, Any]] = []
|
|
1004
|
+
for n in nodes:
|
|
1005
|
+
try:
|
|
1006
|
+
subcats = ((n or {}).get("policy") or {}).get("securitySubCategories", [])
|
|
1007
|
+
# If no subcategories info is present, include the node (cannot evaluate framework)
|
|
1008
|
+
if not subcats:
|
|
1009
|
+
filtered_nodes.append(n)
|
|
1010
|
+
continue
|
|
1011
|
+
if any((sc.get("category", {}).get("framework", {}).get("id") == self.framework_id) for sc in subcats):
|
|
1012
|
+
filtered_nodes.append(n)
|
|
1013
|
+
except Exception:
|
|
1014
|
+
filtered_nodes.append(n)
|
|
1015
|
+
return filtered_nodes
|
|
1016
|
+
|
|
1017
|
+
def _get_assessments_cache_path(self) -> str:
|
|
1018
|
+
"""
|
|
1019
|
+
Get the file path for policy assessments cache.
|
|
1020
|
+
|
|
1021
|
+
:return: Full path to cache file
|
|
1022
|
+
:rtype: str
|
|
1023
|
+
"""
|
|
1024
|
+
try:
|
|
1025
|
+
os.makedirs(self.policy_cache_dir, exist_ok=True)
|
|
1026
|
+
except Exception:
|
|
1027
|
+
pass
|
|
1028
|
+
return self.policy_cache_file
|
|
1029
|
+
|
|
1030
|
+
def _load_assessments_from_cache(self) -> Optional[List[Dict[str, Any]]]:
|
|
1031
|
+
"""
|
|
1032
|
+
Load policy assessments from cache file if valid and within TTL.
|
|
1033
|
+
|
|
1034
|
+
:return: Cached assessment nodes or None if cache is invalid/expired
|
|
1035
|
+
:rtype: Optional[List[Dict[str, Any]]]
|
|
1036
|
+
"""
|
|
1037
|
+
if self.force_refresh or self.cache_duration_minutes <= 0:
|
|
1038
|
+
return None
|
|
1039
|
+
try:
|
|
1040
|
+
path = self._get_assessments_cache_path()
|
|
1041
|
+
if not os.path.exists(path):
|
|
1042
|
+
return None
|
|
1043
|
+
# File age check
|
|
1044
|
+
max_age_seconds = max(0, int(self.cache_duration_minutes)) * 60
|
|
1045
|
+
age = max(0.0, (datetime.now().timestamp() - os.path.getmtime(path)))
|
|
1046
|
+
if age > max_age_seconds:
|
|
1047
|
+
return None
|
|
1048
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
1049
|
+
data = json.load(f)
|
|
1050
|
+
nodes = data.get("nodes") or data.get("assessments") or []
|
|
1051
|
+
# Defensive: ensure list
|
|
1052
|
+
if not isinstance(nodes, list):
|
|
1053
|
+
return None
|
|
1054
|
+
return nodes
|
|
1055
|
+
except Exception:
|
|
1056
|
+
return None
|
|
1057
|
+
|
|
1058
|
+
def _write_assessments_cache(self, nodes: List[Dict[str, Any]]) -> None:
|
|
1059
|
+
"""
|
|
1060
|
+
Write policy assessment nodes to cache file.
|
|
1061
|
+
|
|
1062
|
+
:param List[Dict[str, Any]] nodes: Assessment nodes to cache
|
|
1063
|
+
:return: None
|
|
1064
|
+
:rtype: None
|
|
1065
|
+
"""
|
|
1066
|
+
# Only write cache when enabled
|
|
1067
|
+
if self.cache_duration_minutes <= 0:
|
|
1068
|
+
return None
|
|
1069
|
+
try:
|
|
1070
|
+
path = self._get_assessments_cache_path()
|
|
1071
|
+
payload = {
|
|
1072
|
+
"timestamp": datetime.now().isoformat(),
|
|
1073
|
+
"wiz_project_id": self.wiz_project_id,
|
|
1074
|
+
"framework_id": self.framework_id,
|
|
1075
|
+
"nodes": nodes,
|
|
1076
|
+
}
|
|
1077
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
1078
|
+
json.dump(payload, f, ensure_ascii=False)
|
|
1079
|
+
except Exception:
|
|
1080
|
+
# Cache write failures should not interrupt flow
|
|
1081
|
+
pass
|
|
1082
|
+
|
|
1083
|
+
def write_policy_data_to_json(self) -> str:
|
|
1084
|
+
"""
|
|
1085
|
+
Write policy assessment data to JSON and JSONL files with timestamp.
|
|
1086
|
+
|
|
1087
|
+
:return: Path to the written JSON file
|
|
1088
|
+
:rtype: str
|
|
1089
|
+
"""
|
|
1090
|
+
# Create artifacts/wiz directory if it doesn't exist
|
|
1091
|
+
artifacts_dir = os.path.join("artifacts", "wiz")
|
|
1092
|
+
os.makedirs(artifacts_dir, exist_ok=True)
|
|
1093
|
+
|
|
1094
|
+
# Generate timestamped filename
|
|
1095
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1096
|
+
filename_json = f"policy_compliance_report_{timestamp}.json"
|
|
1097
|
+
filename_jsonl = f"policy_compliance_report_{timestamp}.jsonl"
|
|
1098
|
+
file_path = os.path.join(artifacts_dir, filename_json)
|
|
1099
|
+
file_path_jsonl = os.path.join(artifacts_dir, filename_jsonl)
|
|
1100
|
+
|
|
1101
|
+
# Prepare data for JSON export
|
|
1102
|
+
export_data = {
|
|
1103
|
+
"metadata": {
|
|
1104
|
+
"timestamp": timestamp,
|
|
1105
|
+
"wiz_project_id": self.wiz_project_id,
|
|
1106
|
+
"framework_id": self.framework_id,
|
|
1107
|
+
"framework_name": self.get_framework_name(self.framework_id),
|
|
1108
|
+
"total_assessments": len(self.all_compliance_items),
|
|
1109
|
+
"pass_count": len(self.all_compliance_items) - len(self.failed_compliance_items),
|
|
1110
|
+
"fail_count": len(self.failed_compliance_items),
|
|
1111
|
+
"unique_controls": len({item.control_id for item in self.all_compliance_items if item.control_id}),
|
|
1112
|
+
},
|
|
1113
|
+
"framework_mapping": self.framework_mapping,
|
|
1114
|
+
"policy_assessments": [],
|
|
1115
|
+
}
|
|
1116
|
+
|
|
1117
|
+
# Convert compliance items to serializable format
|
|
1118
|
+
for compliance_item in self.all_compliance_items:
|
|
1119
|
+
if isinstance(compliance_item, WizComplianceItem):
|
|
1120
|
+
# Filter policy subcategories to only the selected framework to avoid noise
|
|
1121
|
+
filtered_policy = dict(compliance_item.policy) if compliance_item.policy else {}
|
|
1122
|
+
if filtered_policy:
|
|
1123
|
+
subcats = filtered_policy.get("securitySubCategories", [])
|
|
1124
|
+
if subcats:
|
|
1125
|
+
target_framework_id = self.framework_id
|
|
1126
|
+
filtered_subcats = [
|
|
1127
|
+
sc
|
|
1128
|
+
for sc in subcats
|
|
1129
|
+
if sc.get("category", {}).get("framework", {}).get("id") == target_framework_id
|
|
1130
|
+
]
|
|
1131
|
+
if filtered_subcats:
|
|
1132
|
+
filtered_policy["securitySubCategories"] = filtered_subcats
|
|
1133
|
+
else:
|
|
1134
|
+
# If filter removes all, keep original to retain context
|
|
1135
|
+
pass
|
|
1136
|
+
assessment_data = {
|
|
1137
|
+
"id": compliance_item.id,
|
|
1138
|
+
"result": compliance_item.result,
|
|
1139
|
+
"control_id": compliance_item.control_id,
|
|
1140
|
+
"framework_name": compliance_item.framework,
|
|
1141
|
+
"framework_id": compliance_item.framework_id,
|
|
1142
|
+
"policy": filtered_policy or compliance_item.policy,
|
|
1143
|
+
"resource": compliance_item.resource,
|
|
1144
|
+
"output": compliance_item.output,
|
|
1145
|
+
}
|
|
1146
|
+
export_data["policy_assessments"].append(assessment_data)
|
|
1147
|
+
|
|
1148
|
+
# Write to JSON and JSONL files
|
|
1149
|
+
try:
|
|
1150
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
1151
|
+
json.dump(export_data, f, indent=2, ensure_ascii=False)
|
|
1152
|
+
|
|
1153
|
+
logger.info(f"Policy compliance data written to: {file_path}")
|
|
1154
|
+
# JSONL: aggregated by control_id (optional)
|
|
1155
|
+
if getattr(self, "write_jsonl_output", False):
|
|
1156
|
+
control_agg = self._build_control_aggregation()
|
|
1157
|
+
with open(file_path_jsonl, "w", encoding="utf-8") as jf:
|
|
1158
|
+
for control_id, ctrl in control_agg.items():
|
|
1159
|
+
jf.write(json.dumps(ctrl, ensure_ascii=False) + "\n")
|
|
1160
|
+
logger.info(f"Policy compliance JSONL written to: {file_path_jsonl}")
|
|
1161
|
+
# Best-effort cleanup to keep artifacts directory tidy
|
|
1162
|
+
self._cleanup_artifacts(artifacts_dir, keep=5)
|
|
1163
|
+
return file_path
|
|
1164
|
+
|
|
1165
|
+
except Exception as e:
|
|
1166
|
+
logger.error(f"Failed to write policy data to JSON: {str(e)}")
|
|
1167
|
+
error_and_exit(f"Failed to write policy data to JSON: {str(e)}")
|
|
1168
|
+
|
|
1169
|
+
def _build_control_aggregation(self) -> Dict[str, Dict[str, Any]]:
|
|
1170
|
+
"""
|
|
1171
|
+
Build an aggregated view per control_id for JSONL export.
|
|
1172
|
+
|
|
1173
|
+
Creates a control-centric view with assets affected and policy checks.
|
|
1174
|
+
|
|
1175
|
+
:return: Dictionary mapping control IDs to aggregated data
|
|
1176
|
+
:rtype: Dict[str, Dict[str, Any]]
|
|
1177
|
+
|
|
1178
|
+
{
|
|
1179
|
+
control_id: {
|
|
1180
|
+
"control_id": "AC-2(1)",
|
|
1181
|
+
"framework_id": "wf-id-4",
|
|
1182
|
+
"framework_name": "NIST SP 800-53 Revision 5",
|
|
1183
|
+
"failed": true,
|
|
1184
|
+
"assets_affected": [
|
|
1185
|
+
{
|
|
1186
|
+
"resource_id": "...",
|
|
1187
|
+
"resource_name": "...",
|
|
1188
|
+
"resource_type": "...",
|
|
1189
|
+
"region": "...",
|
|
1190
|
+
"subscription": "...",
|
|
1191
|
+
"checks": [
|
|
1192
|
+
{"title": "Policy name", "result": "FAIL", "remediation": "..."}
|
|
1193
|
+
]
|
|
1194
|
+
}
|
|
1195
|
+
]
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
"""
|
|
1199
|
+
control_map: Dict[str, Dict[str, Any]] = {}
|
|
1200
|
+
|
|
1201
|
+
for item in self.all_compliance_items:
|
|
1202
|
+
if not isinstance(item, WizComplianceItem):
|
|
1203
|
+
# Skip non-wiz items in this aggregation
|
|
1204
|
+
continue
|
|
1205
|
+
|
|
1206
|
+
ctrl_id = self._normalize_control_id_string(item.control_id)
|
|
1207
|
+
if not ctrl_id:
|
|
1208
|
+
continue
|
|
1209
|
+
|
|
1210
|
+
ctrl_entry = control_map.get(ctrl_id)
|
|
1211
|
+
if not ctrl_entry:
|
|
1212
|
+
ctrl_entry = {
|
|
1213
|
+
"control_id": ctrl_id,
|
|
1214
|
+
"framework_id": self.framework_id,
|
|
1215
|
+
"framework_name": self.get_framework_name(self.framework_id),
|
|
1216
|
+
"failed": False,
|
|
1217
|
+
"assets_affected": [],
|
|
1218
|
+
}
|
|
1219
|
+
# Track assets in a dict for dedupe while building, convert to list at end
|
|
1220
|
+
ctrl_entry["_assets_idx"] = {}
|
|
1221
|
+
control_map[ctrl_id] = ctrl_entry
|
|
1222
|
+
|
|
1223
|
+
# Determine fail/pass at control level
|
|
1224
|
+
if item.compliance_result in self.FAIL_STATUSES:
|
|
1225
|
+
ctrl_entry["failed"] = True
|
|
1226
|
+
|
|
1227
|
+
# Asset bucket
|
|
1228
|
+
asset_id = item.resource_id
|
|
1229
|
+
assets_idx: Dict[str, Any] = ctrl_entry["_assets_idx"] # type: ignore
|
|
1230
|
+
asset_entry = assets_idx.get(asset_id)
|
|
1231
|
+
if not asset_entry:
|
|
1232
|
+
asset_entry = {
|
|
1233
|
+
"resource_id": item.resource_id,
|
|
1234
|
+
"resource_name": item.resource_name,
|
|
1235
|
+
"resource_type": (item.resource or {}).get("type"),
|
|
1236
|
+
"region": (item.resource or {}).get("region"),
|
|
1237
|
+
"subscription": ((item.resource or {}).get("subscription") or {}).get("name"),
|
|
1238
|
+
"checks": [],
|
|
1239
|
+
}
|
|
1240
|
+
assets_idx[asset_id] = asset_entry
|
|
1241
|
+
|
|
1242
|
+
# Append policy check info
|
|
1243
|
+
policy_name = (item.policy or {}).get("name") or (item.policy or {}).get("hostConfigurationRule", {}).get(
|
|
1244
|
+
"name"
|
|
1245
|
+
)
|
|
1246
|
+
remediation = (item.policy or {}).get("remediationInstructions")
|
|
1247
|
+
if policy_name:
|
|
1248
|
+
# Deduplicate identical checks by title within an asset
|
|
1249
|
+
titles = {c.get("title") for c in asset_entry["checks"]}
|
|
1250
|
+
if policy_name not in titles:
|
|
1251
|
+
check = {
|
|
1252
|
+
"title": policy_name,
|
|
1253
|
+
"result": item.compliance_result,
|
|
1254
|
+
"remediation": remediation,
|
|
1255
|
+
}
|
|
1256
|
+
asset_entry["checks"].append(check)
|
|
1257
|
+
|
|
1258
|
+
# Convert asset index maps to lists for final output
|
|
1259
|
+
for ctrl in control_map.values():
|
|
1260
|
+
assets_idx = ctrl.pop("_assets_idx", {}) # type: ignore
|
|
1261
|
+
ctrl["assets_affected"] = list(assets_idx.values())
|
|
1262
|
+
|
|
1263
|
+
return control_map
|
|
1264
|
+
|
|
1265
|
+
@staticmethod
|
|
1266
|
+
def _normalize_control_id_string(control_id: Optional[str]) -> Optional[str]:
|
|
1267
|
+
"""
|
|
1268
|
+
Normalize control id variants to a canonical form, e.g. 'AC-4(2)'.
|
|
1269
|
+
Accepts 'ac-4 (2)', 'AC-4-2', 'AC-4(2)'. Returns uppercase base with optional '(sub)'.
|
|
1270
|
+
"""
|
|
1271
|
+
if not control_id:
|
|
1272
|
+
return None
|
|
1273
|
+
cid = control_id.strip()
|
|
1274
|
+
# Use precompiled safe regex to avoid catastrophic backtracking on crafted input
|
|
1275
|
+
m = SAFE_CONTROL_ID_RE.match(cid)
|
|
1276
|
+
if not m:
|
|
1277
|
+
return cid.upper()
|
|
1278
|
+
base = m.group(1).upper()
|
|
1279
|
+
# Subcontrol may be captured in group 2, 3, or 4 depending on the branch matched
|
|
1280
|
+
sub = m.group(2) or m.group(3) or m.group(4)
|
|
1281
|
+
return f"{base}({sub})" if sub else base
|
|
1282
|
+
|
|
1283
|
+
@staticmethod
|
|
1284
|
+
def parse_control_jsonl(jsonl_path: str) -> Dict[str, Dict[str, Any]]:
|
|
1285
|
+
"""
|
|
1286
|
+
Parse the aggregated control JSONL back into a dict keyed by control_id.
|
|
1287
|
+
"""
|
|
1288
|
+
aggregated: Dict[str, Dict[str, Any]] = {}
|
|
1289
|
+
try:
|
|
1290
|
+
with open(jsonl_path, "r", encoding="utf-8") as jf:
|
|
1291
|
+
for line in jf:
|
|
1292
|
+
line = line.strip()
|
|
1293
|
+
if not line:
|
|
1294
|
+
continue
|
|
1295
|
+
obj = json.loads(line)
|
|
1296
|
+
ctrl_id = obj.get("control_id")
|
|
1297
|
+
if ctrl_id:
|
|
1298
|
+
aggregated[ctrl_id] = obj
|
|
1299
|
+
except Exception as exc:
|
|
1300
|
+
logger.error(f"Error parsing JSONL {jsonl_path}: {exc}")
|
|
1301
|
+
return aggregated
|
|
1302
|
+
|
|
1303
|
+
def _cleanup_artifacts(self, dir_path: str, keep: int = 5) -> None:
|
|
1304
|
+
"""
|
|
1305
|
+
Keep the most recent JSON and JSONL policy_compliance_report files, delete older ones.
|
|
1306
|
+
|
|
1307
|
+
:param str dir_path: Directory containing artifacts to clean
|
|
1308
|
+
:param int keep: Number of most recent files per extension to keep
|
|
1309
|
+
:return: None
|
|
1310
|
+
:rtype: None
|
|
1311
|
+
"""
|
|
1312
|
+
try:
|
|
1313
|
+
entries = [
|
|
1314
|
+
(f, os.path.join(dir_path, f))
|
|
1315
|
+
for f in os.listdir(dir_path)
|
|
1316
|
+
if f.startswith("policy_compliance_report_")
|
|
1317
|
+
and (f.endswith(JSON_FILE_EXT) or f.endswith(JSONL_FILE_EXT))
|
|
1318
|
+
]
|
|
1319
|
+
# Group by extension to keep per-type
|
|
1320
|
+
by_ext: Dict[str, List[tuple[str, str]]] = {JSON_FILE_EXT: [], JSONL_FILE_EXT: []}
|
|
1321
|
+
for name, path in entries:
|
|
1322
|
+
ext = JSONL_FILE_EXT if name.endswith(JSONL_FILE_EXT) else JSON_FILE_EXT
|
|
1323
|
+
by_ext[ext].append((name, path))
|
|
1324
|
+
|
|
1325
|
+
for ext, files in by_ext.items():
|
|
1326
|
+
files.sort(key=lambda p: os.path.getmtime(p[1]), reverse=True)
|
|
1327
|
+
for _, old_path in files[keep:]:
|
|
1328
|
+
try:
|
|
1329
|
+
os.remove(old_path)
|
|
1330
|
+
except Exception:
|
|
1331
|
+
# Non-fatal; continue cleanup
|
|
1332
|
+
pass
|
|
1333
|
+
except Exception as e:
|
|
1334
|
+
logger.debug(f"Artifact cleanup skipped: {e}")
|
|
1335
|
+
|
|
1336
|
+
def load_or_create_framework_mapping(self) -> Dict[str, str]:
|
|
1337
|
+
"""
|
|
1338
|
+
Load framework mapping from cache file or create it by fetching from Wiz.
|
|
1339
|
+
|
|
1340
|
+
:return: Framework ID to name mapping dictionary
|
|
1341
|
+
:rtype: Dict[str, str]
|
|
1342
|
+
"""
|
|
1343
|
+
# Check if cache file exists
|
|
1344
|
+
if os.path.exists(self.framework_cache_file):
|
|
1345
|
+
logger.info("Loading framework mapping from cache file")
|
|
1346
|
+
return self._load_framework_mapping_from_cache()
|
|
1347
|
+
|
|
1348
|
+
logger.info("Framework mapping cache not found, fetching from Wiz API")
|
|
1349
|
+
return self._fetch_and_cache_framework_mapping()
|
|
1350
|
+
|
|
1351
|
+
def _load_framework_mapping_from_cache(self) -> Dict[str, str]:
|
|
1352
|
+
"""
|
|
1353
|
+
Load framework mapping from existing JSON cache file.
|
|
1354
|
+
|
|
1355
|
+
:return: Framework ID to name mapping
|
|
1356
|
+
:rtype: Dict[str, str]
|
|
1357
|
+
"""
|
|
1358
|
+
try:
|
|
1359
|
+
with open(self.framework_cache_file, "r", encoding="utf-8") as f:
|
|
1360
|
+
cache_data = json.load(f)
|
|
1361
|
+
|
|
1362
|
+
framework_mapping = cache_data.get("framework_mapping", {})
|
|
1363
|
+
cache_timestamp = cache_data.get("timestamp", "")
|
|
1364
|
+
|
|
1365
|
+
logger.info(f"Loaded {len(framework_mapping)} frameworks from cache (created: {cache_timestamp})")
|
|
1366
|
+
self.framework_mapping = framework_mapping
|
|
1367
|
+
return framework_mapping
|
|
1368
|
+
|
|
1369
|
+
except Exception as e:
|
|
1370
|
+
logger.error(f"Error loading framework mapping from cache: {str(e)}")
|
|
1371
|
+
logger.info("Falling back to fetching fresh framework data")
|
|
1372
|
+
return self._fetch_and_cache_framework_mapping()
|
|
1373
|
+
|
|
1374
|
+
def _fetch_and_cache_framework_mapping(self) -> Dict[str, str]:
|
|
1375
|
+
"""
|
|
1376
|
+
Fetch framework data from Wiz API and cache it to JSON file.
|
|
1377
|
+
|
|
1378
|
+
:return: Framework ID to name mapping
|
|
1379
|
+
:rtype: Dict[str, str]
|
|
1380
|
+
"""
|
|
1381
|
+
frameworks = self._fetch_security_frameworks()
|
|
1382
|
+
framework_mapping = self._create_framework_mapping(frameworks)
|
|
1383
|
+
self._write_framework_mapping_to_json(framework_mapping, frameworks)
|
|
1384
|
+
|
|
1385
|
+
self.framework_mapping = framework_mapping
|
|
1386
|
+
return framework_mapping
|
|
1387
|
+
|
|
1388
|
+
def _fetch_security_frameworks(self) -> List[Dict[str, Any]]:
|
|
1389
|
+
"""
|
|
1390
|
+
Fetch security frameworks from Wiz GraphQL API.
|
|
1391
|
+
|
|
1392
|
+
:return: List of framework data
|
|
1393
|
+
:rtype: List[Dict[str, Any]]
|
|
1394
|
+
"""
|
|
1395
|
+
logger.info("Fetching security frameworks from Wiz...")
|
|
1396
|
+
|
|
1397
|
+
# Authenticate if not already done
|
|
1398
|
+
if not self.access_token:
|
|
1399
|
+
self.authenticate_wiz()
|
|
1400
|
+
|
|
1401
|
+
headers = {
|
|
1402
|
+
"Authorization": f"Bearer {self.access_token}",
|
|
1403
|
+
"Content-Type": "application/json",
|
|
1404
|
+
}
|
|
1405
|
+
|
|
1406
|
+
query_config = {
|
|
1407
|
+
"type": WizVulnerabilityType.CONFIGURATION, # Using existing enum type
|
|
1408
|
+
"query": WIZ_FRAMEWORK_QUERY,
|
|
1409
|
+
"topic_key": "securityFrameworks",
|
|
1410
|
+
"variables": {"first": 200, "filterBy": {}}, # Get all frameworks, no filtering
|
|
1411
|
+
}
|
|
1412
|
+
|
|
1413
|
+
try:
|
|
1414
|
+
# Execute the query using async client with visible progress
|
|
1415
|
+
from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
|
|
1416
|
+
|
|
1417
|
+
with compliance_job_progress:
|
|
1418
|
+
task = compliance_job_progress.add_task("[#f68d1f]Fetching Wiz security frameworks...", total=1)
|
|
1419
|
+
results = run_async_queries(
|
|
1420
|
+
endpoint=self.wiz_endpoint,
|
|
1421
|
+
headers=headers,
|
|
1422
|
+
query_configs=[query_config],
|
|
1423
|
+
progress_tracker=compliance_job_progress,
|
|
1424
|
+
max_concurrent=1,
|
|
1425
|
+
)
|
|
1426
|
+
compliance_job_progress.update(task, completed=1, advance=1)
|
|
1427
|
+
|
|
1428
|
+
if not results or len(results) == 0:
|
|
1429
|
+
logger.warning("No framework results returned from Wiz")
|
|
1430
|
+
return []
|
|
1431
|
+
|
|
1432
|
+
_, nodes, error = results[0]
|
|
1433
|
+
|
|
1434
|
+
if error:
|
|
1435
|
+
logger.error(f"Error fetching security frameworks: {error}")
|
|
1436
|
+
error_and_exit(f"Error fetching security frameworks: {error}")
|
|
1437
|
+
|
|
1438
|
+
logger.info(f"Successfully fetched {len(nodes)} security frameworks")
|
|
1439
|
+
return nodes
|
|
1440
|
+
|
|
1441
|
+
except Exception as e:
|
|
1442
|
+
logger.error(f"Failed to fetch security frameworks: {str(e)}")
|
|
1443
|
+
error_and_exit(f"Failed to fetch security frameworks: {str(e)}")
|
|
1444
|
+
|
|
1445
|
+
def _create_framework_mapping(self, frameworks: List[Dict[str, Any]]) -> Dict[str, str]:
|
|
1446
|
+
"""
|
|
1447
|
+
Create framework ID to name mapping from framework data.
|
|
1448
|
+
|
|
1449
|
+
:param List[Dict[str, Any]] frameworks: Raw framework data from Wiz API
|
|
1450
|
+
:return: Dictionary mapping framework IDs to human-readable names
|
|
1451
|
+
:rtype: Dict[str, str]
|
|
1452
|
+
"""
|
|
1453
|
+
framework_mapping = {}
|
|
1454
|
+
|
|
1455
|
+
for framework in frameworks:
|
|
1456
|
+
framework_id = framework.get("id")
|
|
1457
|
+
framework_name = framework.get("name")
|
|
1458
|
+
|
|
1459
|
+
if framework_id and framework_name:
|
|
1460
|
+
framework_mapping[framework_id] = framework_name
|
|
1461
|
+
|
|
1462
|
+
logger.info(f"Created mapping for {len(framework_mapping)} frameworks")
|
|
1463
|
+
return framework_mapping
|
|
1464
|
+
|
|
1465
|
+
def _write_framework_mapping_to_json(
|
|
1466
|
+
self, framework_mapping: Dict[str, str], raw_frameworks: List[Dict[str, Any]]
|
|
1467
|
+
) -> None:
|
|
1468
|
+
"""
|
|
1469
|
+
Write framework mapping and raw data to JSON cache file.
|
|
1470
|
+
|
|
1471
|
+
:param Dict[str, str] framework_mapping: Framework ID to name mapping dictionary
|
|
1472
|
+
:param List[Dict[str, Any]] raw_frameworks: Raw framework data from Wiz API
|
|
1473
|
+
:return: None
|
|
1474
|
+
:rtype: None
|
|
1475
|
+
"""
|
|
1476
|
+
# Create artifacts/wiz directory if it doesn't exist
|
|
1477
|
+
artifacts_dir = os.path.dirname(self.framework_cache_file)
|
|
1478
|
+
os.makedirs(artifacts_dir, exist_ok=True)
|
|
1479
|
+
|
|
1480
|
+
# Prepare data for JSON export
|
|
1481
|
+
cache_data = {
|
|
1482
|
+
"metadata": {
|
|
1483
|
+
"timestamp": datetime.now().isoformat(),
|
|
1484
|
+
"total_frameworks": len(framework_mapping),
|
|
1485
|
+
"enabled_frameworks": len([f for f in raw_frameworks if f.get("enabled", False)]),
|
|
1486
|
+
"builtin_frameworks": len([f for f in raw_frameworks if f.get("builtin", False)]),
|
|
1487
|
+
"description": "Cached Wiz security framework mappings",
|
|
1488
|
+
},
|
|
1489
|
+
"framework_mapping": framework_mapping,
|
|
1490
|
+
"raw_frameworks": raw_frameworks,
|
|
1491
|
+
}
|
|
1492
|
+
|
|
1493
|
+
# Write to JSON file
|
|
1494
|
+
try:
|
|
1495
|
+
with open(self.framework_cache_file, "w", encoding="utf-8") as f:
|
|
1496
|
+
json.dump(cache_data, f, indent=2, ensure_ascii=False)
|
|
1497
|
+
|
|
1498
|
+
logger.info(f"Framework mapping cached to: {self.framework_cache_file}")
|
|
1499
|
+
|
|
1500
|
+
except Exception as e:
|
|
1501
|
+
logger.error(f"Failed to write framework mapping to cache: {str(e)}")
|
|
1502
|
+
# Don't exit here - this is not critical to the main functionality
|
|
1503
|
+
|
|
1504
|
+
def get_framework_name(self, framework_id: str) -> str:
|
|
1505
|
+
"""
|
|
1506
|
+
Get framework name by ID from cached mapping.
|
|
1507
|
+
|
|
1508
|
+
:param str framework_id: Framework ID
|
|
1509
|
+
:return: Framework name or ID if not found
|
|
1510
|
+
:rtype: str
|
|
1511
|
+
"""
|
|
1512
|
+
# Load mapping if not already loaded
|
|
1513
|
+
if not self.framework_mapping:
|
|
1514
|
+
self.load_or_create_framework_mapping()
|
|
1515
|
+
|
|
1516
|
+
return self.framework_mapping.get(framework_id, framework_id)
|
|
1517
|
+
|
|
1518
|
+
def sync_policy_compliance(self, create_issues: bool = None, update_control_status: bool = None) -> None:
|
|
1519
|
+
"""
|
|
1520
|
+
Main method to sync policy compliance data from Wiz.
|
|
1521
|
+
|
|
1522
|
+
:param bool create_issues: Whether to create issues for failed assessments (uses instance default if None)
|
|
1523
|
+
:param bool update_control_status: Whether to update control implementation status (uses instance default if None)
|
|
1524
|
+
"""
|
|
1525
|
+
logger.info("Starting Wiz policy compliance sync...")
|
|
1526
|
+
|
|
1527
|
+
try:
|
|
1528
|
+
# Use instance defaults if not specified
|
|
1529
|
+
if create_issues is None:
|
|
1530
|
+
create_issues = self.create_issues
|
|
1531
|
+
if update_control_status is None:
|
|
1532
|
+
update_control_status = self.update_control_status
|
|
1533
|
+
|
|
1534
|
+
# Step 1: Authenticate with Wiz
|
|
1535
|
+
self.authenticate_wiz()
|
|
1536
|
+
|
|
1537
|
+
# Step 2: Load or create framework mapping cache
|
|
1538
|
+
self.load_or_create_framework_mapping()
|
|
1539
|
+
|
|
1540
|
+
# Persist flags on the instance for downstream logic
|
|
1541
|
+
if create_issues is not None:
|
|
1542
|
+
self.create_issues = create_issues
|
|
1543
|
+
if update_control_status is not None:
|
|
1544
|
+
self.update_control_status = update_control_status
|
|
1545
|
+
|
|
1546
|
+
# Step 3: Process and sync using the base class
|
|
1547
|
+
self.process_compliance_data()
|
|
1548
|
+
self.sync_compliance()
|
|
1549
|
+
|
|
1550
|
+
# Step 4: Write data to JSON file for reference (post-processing)
|
|
1551
|
+
json_file = self.write_policy_data_to_json()
|
|
1552
|
+
logger.info(f"Policy compliance data saved to: {json_file}")
|
|
1553
|
+
|
|
1554
|
+
logger.info("Policy compliance sync completed successfully")
|
|
1555
|
+
|
|
1556
|
+
except Exception as e:
|
|
1557
|
+
logger.error(f"Policy compliance sync failed: {str(e)}")
|
|
1558
|
+
error_and_exit(f"Policy compliance sync failed: {str(e)}")
|
|
1559
|
+
|
|
1560
|
+
def sync_wiz_compliance(self) -> None:
|
|
1561
|
+
"""
|
|
1562
|
+
Convenience method for backward compatibility.
|
|
1563
|
+
|
|
1564
|
+
:return: None
|
|
1565
|
+
:rtype: None
|
|
1566
|
+
"""
|
|
1567
|
+
self.sync_policy_compliance()
|
|
1568
|
+
|
|
1569
|
+
def is_poam(self, finding: IntegrationFinding) -> bool: # type: ignore[override]
|
|
1570
|
+
"""
|
|
1571
|
+
Determine if an issue should be a POAM.
|
|
1572
|
+
|
|
1573
|
+
If the CLI flag `--create-poams/-cp` was provided (mapped to `self.create_poams`),
|
|
1574
|
+
force POAM for all created/updated issues. Otherwise, fall back to the default
|
|
1575
|
+
scanner behavior.
|
|
1576
|
+
"""
|
|
1577
|
+
try:
|
|
1578
|
+
if getattr(self, "create_poams", False):
|
|
1579
|
+
return True
|
|
1580
|
+
except Exception:
|
|
1581
|
+
pass
|
|
1582
|
+
return super().is_poam(finding)
|
|
1583
|
+
|
|
1584
|
+
def create_or_update_issue_from_finding(
|
|
1585
|
+
self,
|
|
1586
|
+
title: str,
|
|
1587
|
+
finding: IntegrationFinding,
|
|
1588
|
+
) -> regscale_models.Issue:
|
|
1589
|
+
"""
|
|
1590
|
+
Create/update the issue, then set it as a child of the asset and attach affected controls and remediation.
|
|
1591
|
+
|
|
1592
|
+
- Parent the issue to the asset (parentId=asset.id, parentModule='assets')
|
|
1593
|
+
- Populate affectedControls with all failed control IDs for the asset
|
|
1594
|
+
- Ensure remediationDescription contains Wiz remediationInstructions
|
|
1595
|
+
"""
|
|
1596
|
+
# Defer to base to handle dedupe and asset identifier consolidation (newline-delimited)
|
|
1597
|
+
# The base class will now automatically handle finding.assessment_id -> issue.assessmentId
|
|
1598
|
+
issue = super().create_or_update_issue_from_finding(title, finding)
|
|
1599
|
+
|
|
1600
|
+
# Post-processing for compliance-specific fields
|
|
1601
|
+
try:
|
|
1602
|
+
self._update_issue_affected_controls(issue, finding)
|
|
1603
|
+
issue.assetIdentifier = self._compute_consolidated_asset_identifier(issue, finding)
|
|
1604
|
+
self._set_control_and_assessment_ids(issue, finding)
|
|
1605
|
+
if getattr(self, "create_poams", False):
|
|
1606
|
+
issue.isPoam = True
|
|
1607
|
+
self._reparent_issue_to_asset(issue, finding)
|
|
1608
|
+
issue.save(bulk=True)
|
|
1609
|
+
except Exception as e:
|
|
1610
|
+
logger.error(f"Error in post-issue processing: {e}")
|
|
1611
|
+
import traceback
|
|
1612
|
+
|
|
1613
|
+
logger.debug(traceback.format_exc())
|
|
1614
|
+
|
|
1615
|
+
return issue
|
|
1616
|
+
|
|
1617
|
+
# -------- Helpers to reduce complexity --------
|
|
1618
|
+
def _update_issue_affected_controls(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
1619
|
+
"""
|
|
1620
|
+
Update the affected controls field on an issue from a finding.
|
|
1621
|
+
|
|
1622
|
+
:param regscale_models.Issue issue: Issue to update
|
|
1623
|
+
:param IntegrationFinding finding: Finding with control information
|
|
1624
|
+
:return: None
|
|
1625
|
+
:rtype: None
|
|
1626
|
+
"""
|
|
1627
|
+
if getattr(finding, "affected_controls", None):
|
|
1628
|
+
issue.affectedControls = finding.affected_controls
|
|
1629
|
+
elif getattr(finding, "control_labels", None):
|
|
1630
|
+
issue.affectedControls = ",".join(finding.control_labels)
|
|
1631
|
+
|
|
1632
|
+
def _compute_consolidated_asset_identifier(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> str:
|
|
1633
|
+
"""
|
|
1634
|
+
Compute a consolidated asset identifier list for an issue.
|
|
1635
|
+
|
|
1636
|
+
Aggregates all affected asset identifiers for the same control into a newline-delimited string.
|
|
1637
|
+
|
|
1638
|
+
:param regscale_models.Issue issue: Issue to consolidate identifiers for
|
|
1639
|
+
:param IntegrationFinding finding: Finding with asset information
|
|
1640
|
+
:return: Newline-delimited string of asset identifiers
|
|
1641
|
+
:rtype: str
|
|
1642
|
+
"""
|
|
1643
|
+
delimiter = "\n"
|
|
1644
|
+
identifiers: set[str] = set()
|
|
1645
|
+
# Collect identifiers from all failed items matching this control
|
|
1646
|
+
try:
|
|
1647
|
+
normalized_rule = self._normalize_control_id_string(finding.rule_id)
|
|
1648
|
+
for item in self.failed_compliance_items:
|
|
1649
|
+
try:
|
|
1650
|
+
item_ctrl = self._normalize_control_id_string(getattr(item, "control_id", None))
|
|
1651
|
+
res_id = getattr(item, "resource_id", None)
|
|
1652
|
+
if normalized_rule and item_ctrl == normalized_rule and res_id:
|
|
1653
|
+
identifiers.add(res_id)
|
|
1654
|
+
except Exception:
|
|
1655
|
+
continue
|
|
1656
|
+
except Exception:
|
|
1657
|
+
pass
|
|
1658
|
+
# Merge with existing identifiers and current finding
|
|
1659
|
+
if issue.assetIdentifier:
|
|
1660
|
+
identifiers |= {e for e in (issue.assetIdentifier or "").split(delimiter) if e}
|
|
1661
|
+
if finding.asset_identifier:
|
|
1662
|
+
identifiers.add(finding.asset_identifier)
|
|
1663
|
+
return delimiter.join(sorted(identifiers))
|
|
1664
|
+
|
|
1665
|
+
def _set_control_and_assessment_ids(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
1666
|
+
"""
|
|
1667
|
+
Set control implementation and assessment IDs on an issue.
|
|
1668
|
+
|
|
1669
|
+
:param regscale_models.Issue issue: Issue to update
|
|
1670
|
+
:param IntegrationFinding finding: Finding with control information
|
|
1671
|
+
:return: None
|
|
1672
|
+
:rtype: None
|
|
1673
|
+
"""
|
|
1674
|
+
try:
|
|
1675
|
+
ctrl_norm = self._normalize_control_id_string(finding.rule_id)
|
|
1676
|
+
impl_id = None
|
|
1677
|
+
if ctrl_norm and hasattr(self, "_impl_id_by_control"):
|
|
1678
|
+
impl_id = self._impl_id_by_control.get(ctrl_norm)
|
|
1679
|
+
if impl_id:
|
|
1680
|
+
issue.controlId = impl_id
|
|
1681
|
+
assess_id = getattr(finding, "assessment_id", None)
|
|
1682
|
+
if not assess_id and impl_id and hasattr(self, "_assessment_by_impl_today"):
|
|
1683
|
+
assess = self._assessment_by_impl_today.get(impl_id)
|
|
1684
|
+
assess_id = assess.id if assess else None
|
|
1685
|
+
if assess_id:
|
|
1686
|
+
issue.assessmentId = assess_id
|
|
1687
|
+
except Exception:
|
|
1688
|
+
pass
|
|
1689
|
+
|
|
1690
|
+
def _reparent_issue_to_asset(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
1691
|
+
"""
|
|
1692
|
+
Reparent an issue to be a child of its associated asset.
|
|
1693
|
+
|
|
1694
|
+
:param regscale_models.Issue issue: Issue to reparent
|
|
1695
|
+
:param IntegrationFinding finding: Finding with asset identifier
|
|
1696
|
+
:return: None
|
|
1697
|
+
:rtype: None
|
|
1698
|
+
"""
|
|
1699
|
+
try:
|
|
1700
|
+
asset = self.get_asset_by_identifier(finding.asset_identifier)
|
|
1701
|
+
if not asset:
|
|
1702
|
+
asset = self._ensure_asset_for_finding(finding)
|
|
1703
|
+
if asset and getattr(asset, "id", None):
|
|
1704
|
+
issue.parentId = asset.id
|
|
1705
|
+
issue.parentModule = "assets"
|
|
1706
|
+
except Exception:
|
|
1707
|
+
# If asset lookup fails, keep existing parent
|
|
1708
|
+
pass
|
|
1709
|
+
|
|
1710
|
+
def _update_scan_history(self, scan_history: regscale_models.ScanHistory) -> None:
|
|
1711
|
+
"""
|
|
1712
|
+
Update scan history with severity breakdown of deduped compliance issues.
|
|
1713
|
+
|
|
1714
|
+
:param regscale_models.ScanHistory scan_history: Scan history record
|
|
1715
|
+
"""
|
|
1716
|
+
try:
|
|
1717
|
+
from regscale.core.app.utils.app_utils import get_current_datetime
|
|
1718
|
+
|
|
1719
|
+
# Deduped pairs of (resource, canonical control)
|
|
1720
|
+
seen_pairs: set[tuple[str, str]] = set()
|
|
1721
|
+
severity_counts = {"Critical": 0, "High": 0, "Moderate": 0, "Low": 0}
|
|
1722
|
+
|
|
1723
|
+
for it in self.failed_compliance_items:
|
|
1724
|
+
try:
|
|
1725
|
+
rid = (getattr(it, "resource_id", "") or "").lower()
|
|
1726
|
+
ctrl_norm = self._normalize_control_id_string(getattr(it, "control_id", "")) or ""
|
|
1727
|
+
if not rid or not ctrl_norm:
|
|
1728
|
+
continue
|
|
1729
|
+
key = (rid, ctrl_norm)
|
|
1730
|
+
if key in seen_pairs:
|
|
1731
|
+
continue
|
|
1732
|
+
seen_pairs.add(key)
|
|
1733
|
+
|
|
1734
|
+
sev = self._map_severity(getattr(it, "severity", None))
|
|
1735
|
+
if sev == regscale_models.IssueSeverity.Critical:
|
|
1736
|
+
severity_counts["Critical"] += 1
|
|
1737
|
+
elif sev == regscale_models.IssueSeverity.High:
|
|
1738
|
+
severity_counts["High"] += 1
|
|
1739
|
+
elif sev == regscale_models.IssueSeverity.Moderate:
|
|
1740
|
+
severity_counts["Moderate"] += 1
|
|
1741
|
+
else:
|
|
1742
|
+
severity_counts["Low"] += 1
|
|
1743
|
+
except Exception:
|
|
1744
|
+
continue
|
|
1745
|
+
|
|
1746
|
+
scan_history.vCritical = severity_counts["Critical"]
|
|
1747
|
+
scan_history.vHigh = severity_counts["High"]
|
|
1748
|
+
scan_history.vMedium = severity_counts["Moderate"]
|
|
1749
|
+
scan_history.vLow = severity_counts["Low"]
|
|
1750
|
+
scan_history.vInfo = 0
|
|
1751
|
+
|
|
1752
|
+
scan_history.dateLastUpdated = get_current_datetime()
|
|
1753
|
+
scan_history.save()
|
|
1754
|
+
logger.info(
|
|
1755
|
+
"Updated scan history %s (Critical: %s, High: %s, Medium: %s, Low: %s)",
|
|
1756
|
+
getattr(scan_history, "id", 0),
|
|
1757
|
+
severity_counts["Critical"],
|
|
1758
|
+
severity_counts["High"],
|
|
1759
|
+
severity_counts["Moderate"],
|
|
1760
|
+
severity_counts["Low"],
|
|
1761
|
+
)
|
|
1762
|
+
except Exception as e:
|
|
1763
|
+
logger.error(f"Error updating scan history: {e}")
|
|
1764
|
+
|
|
1765
|
+
|
|
1766
|
+
def resolve_framework_id(framework_input: str) -> str:
|
|
1767
|
+
"""
|
|
1768
|
+
Resolve framework input to actual Wiz framework ID.
|
|
1769
|
+
|
|
1770
|
+
Supports:
|
|
1771
|
+
- Direct framework IDs (wf-id-4)
|
|
1772
|
+
- Shorthand names (nist, aws, soc2)
|
|
1773
|
+
- Partial matches (case insensitive)
|
|
1774
|
+
|
|
1775
|
+
:param str framework_input: User input for framework
|
|
1776
|
+
:return: Resolved framework ID
|
|
1777
|
+
:rtype: str
|
|
1778
|
+
:raises ValueError: If framework cannot be resolved
|
|
1779
|
+
"""
|
|
1780
|
+
if not framework_input or not framework_input.strip():
|
|
1781
|
+
error_and_exit("Framework input cannot be empty. Use --list-frameworks to see available options.")
|
|
1782
|
+
|
|
1783
|
+
framework_input = framework_input.lower().strip()
|
|
1784
|
+
|
|
1785
|
+
# Direct framework ID
|
|
1786
|
+
if framework_input.startswith("wf-id-"):
|
|
1787
|
+
if framework_input in FRAMEWORK_MAPPINGS:
|
|
1788
|
+
return framework_input
|
|
1789
|
+
else:
|
|
1790
|
+
error_and_exit(f"Unknown framework ID: {framework_input}")
|
|
1791
|
+
|
|
1792
|
+
# Shorthand lookup
|
|
1793
|
+
if framework_input in FRAMEWORK_SHORTCUTS:
|
|
1794
|
+
return FRAMEWORK_SHORTCUTS[framework_input]
|
|
1795
|
+
|
|
1796
|
+
# Partial name matching
|
|
1797
|
+
for shorthand, framework_id in FRAMEWORK_SHORTCUTS.items():
|
|
1798
|
+
if framework_input in shorthand:
|
|
1799
|
+
return framework_id
|
|
1800
|
+
|
|
1801
|
+
# Search in full framework names (case insensitive)
|
|
1802
|
+
for framework_id, framework_name in FRAMEWORK_MAPPINGS.items():
|
|
1803
|
+
if framework_input in framework_name.lower():
|
|
1804
|
+
return framework_id
|
|
1805
|
+
|
|
1806
|
+
error_and_exit(f"Could not resolve framework: '{framework_input}'. Use --list-frameworks to see available options.")
|
|
1807
|
+
|
|
1808
|
+
|
|
1809
|
+
def list_available_frameworks() -> str:
|
|
1810
|
+
"""
|
|
1811
|
+
Generate a formatted list of available frameworks.
|
|
1812
|
+
|
|
1813
|
+
:return: Formatted framework list
|
|
1814
|
+
:rtype: str
|
|
1815
|
+
"""
|
|
1816
|
+
output = []
|
|
1817
|
+
output.append("🔒 Available Wiz Compliance Frameworks")
|
|
1818
|
+
output.append("=" * 50)
|
|
1819
|
+
|
|
1820
|
+
# Show shorthand mappings first
|
|
1821
|
+
output.append("\n📋 Quick Shortcuts:")
|
|
1822
|
+
output.append("-" * 20)
|
|
1823
|
+
shortcut_items = sorted(FRAMEWORK_SHORTCUTS.items())
|
|
1824
|
+
for shorthand, framework_id in shortcut_items[:10]: # Show first 10
|
|
1825
|
+
framework_name = FRAMEWORK_MAPPINGS.get(framework_id, "Unknown")
|
|
1826
|
+
output.append(f" {shorthand:<15} → {framework_name}")
|
|
1827
|
+
|
|
1828
|
+
if len(shortcut_items) > 10:
|
|
1829
|
+
output.append(f" ... and {len(shortcut_items) - 10} more shortcuts")
|
|
1830
|
+
|
|
1831
|
+
# Show frameworks by category
|
|
1832
|
+
output.append("\n📚 All Frameworks by Category:")
|
|
1833
|
+
output.append("-" * 35)
|
|
1834
|
+
|
|
1835
|
+
for category, framework_ids in FRAMEWORK_CATEGORIES.items():
|
|
1836
|
+
output.append(f"\n🏷️ {category}:")
|
|
1837
|
+
for framework_id in framework_ids:
|
|
1838
|
+
if framework_id in FRAMEWORK_MAPPINGS:
|
|
1839
|
+
framework_name = FRAMEWORK_MAPPINGS[framework_id]
|
|
1840
|
+
output.append(f" {framework_id:<12} → {framework_name}")
|
|
1841
|
+
|
|
1842
|
+
# Usage examples
|
|
1843
|
+
output.append("\n💡 Usage Examples:")
|
|
1844
|
+
output.append("-" * 18)
|
|
1845
|
+
output.append(" # Using shortcuts:")
|
|
1846
|
+
output.append(" regscale wiz sync-policy-compliance -f nist")
|
|
1847
|
+
output.append(" regscale wiz sync-policy-compliance -f aws")
|
|
1848
|
+
output.append(" regscale wiz sync-policy-compliance -f soc2")
|
|
1849
|
+
output.append("")
|
|
1850
|
+
output.append(" # Using full framework IDs:")
|
|
1851
|
+
output.append(" regscale wiz sync-policy-compliance -f wf-id-4")
|
|
1852
|
+
output.append(" regscale wiz sync-policy-compliance -f wf-id-197")
|
|
1853
|
+
output.append("")
|
|
1854
|
+
output.append(" # Using partial names (case insensitive):")
|
|
1855
|
+
output.append(" regscale wiz sync-policy-compliance -f 'nist 800-53'")
|
|
1856
|
+
output.append(" regscale wiz sync-policy-compliance -f kubernetes")
|
|
1857
|
+
|
|
1858
|
+
return "\n".join(output)
|