regscale-cli 6.21.1.0__py3-none-any.whl → 6.21.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/core/app/application.py +8 -0
- regscale/integrations/commercial/__init__.py +8 -8
- regscale/integrations/commercial/import_all/import_all_cmd.py +2 -2
- regscale/integrations/commercial/microsoft_defender/__init__.py +0 -0
- regscale/integrations/commercial/{defender.py → microsoft_defender/defender.py} +38 -612
- regscale/integrations/commercial/microsoft_defender/defender_api.py +286 -0
- regscale/integrations/commercial/microsoft_defender/defender_constants.py +80 -0
- regscale/integrations/commercial/microsoft_defender/defender_scanner.py +168 -0
- regscale/integrations/commercial/qualys/__init__.py +24 -86
- regscale/integrations/commercial/qualys/containers.py +2 -0
- regscale/integrations/commercial/qualys/scanner.py +7 -2
- regscale/integrations/commercial/sonarcloud.py +110 -71
- regscale/integrations/commercial/wizv2/click.py +4 -1
- regscale/integrations/commercial/wizv2/data_fetcher.py +401 -0
- regscale/integrations/commercial/wizv2/finding_processor.py +295 -0
- regscale/integrations/commercial/wizv2/policy_compliance.py +1471 -204
- regscale/integrations/commercial/wizv2/policy_compliance_helpers.py +564 -0
- regscale/integrations/commercial/wizv2/scanner.py +4 -4
- regscale/integrations/compliance_integration.py +213 -61
- regscale/integrations/public/fedramp/fedramp_five.py +92 -7
- regscale/integrations/scanner_integration.py +27 -4
- regscale/models/__init__.py +1 -1
- regscale/models/integration_models/cisa_kev_data.json +79 -3
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/regscale_models/issue.py +29 -9
- regscale/models/regscale_models/milestone.py +15 -14
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/METADATA +1 -1
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/RECORD +33 -28
- tests/regscale/test_authorization.py +0 -65
- tests/regscale/test_init.py +0 -96
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/LICENSE +0 -0
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/WHEEL +0 -0
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.1.dist-info}/top_level.txt +0 -0
|
@@ -9,8 +9,8 @@ import re
|
|
|
9
9
|
from datetime import datetime
|
|
10
10
|
from typing import Dict, List, Optional, Iterator, Any
|
|
11
11
|
|
|
12
|
-
from regscale.core.app.utils.app_utils import error_and_exit, check_license
|
|
13
12
|
from regscale.core.app.application import Application
|
|
13
|
+
from regscale.core.app.utils.app_utils import error_and_exit, check_license, get_current_datetime
|
|
14
14
|
from regscale.integrations.commercial.wizv2.async_client import run_async_queries
|
|
15
15
|
from regscale.integrations.commercial.wizv2.constants import (
|
|
16
16
|
WizVulnerabilityType,
|
|
@@ -20,23 +20,37 @@ from regscale.integrations.commercial.wizv2.constants import (
|
|
|
20
20
|
FRAMEWORK_SHORTCUTS,
|
|
21
21
|
FRAMEWORK_CATEGORIES,
|
|
22
22
|
)
|
|
23
|
+
from regscale.integrations.commercial.wizv2.data_fetcher import PolicyAssessmentFetcher
|
|
24
|
+
from regscale.integrations.commercial.wizv2.finding_processor import (
|
|
25
|
+
FindingConsolidator,
|
|
26
|
+
FindingToIssueProcessor,
|
|
27
|
+
)
|
|
28
|
+
from regscale.integrations.commercial.wizv2.policy_compliance_helpers import (
|
|
29
|
+
ControlImplementationCache,
|
|
30
|
+
AssetConsolidator,
|
|
31
|
+
IssueFieldSetter,
|
|
32
|
+
ControlAssessmentProcessor,
|
|
33
|
+
)
|
|
23
34
|
from regscale.integrations.commercial.wizv2.wiz_auth import wiz_authenticate
|
|
24
35
|
from regscale.integrations.compliance_integration import ComplianceIntegration, ComplianceItem
|
|
25
36
|
from regscale.integrations.scanner_integration import (
|
|
26
37
|
ScannerIntegrationType,
|
|
27
38
|
IntegrationAsset,
|
|
28
39
|
IntegrationFinding,
|
|
40
|
+
issue_due_date,
|
|
29
41
|
)
|
|
42
|
+
from regscale.integrations.variables import ScannerVariables
|
|
30
43
|
from regscale.models import regscale_models
|
|
31
44
|
|
|
32
45
|
logger = logging.getLogger("regscale")
|
|
33
46
|
|
|
34
47
|
|
|
48
|
+
# Constants for file operations
|
|
35
49
|
JSON_FILE_EXT = ".json"
|
|
36
50
|
JSONL_FILE_EXT = ".jsonl"
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
51
|
+
MAX_DISPLAY_ASSETS = 10 # Maximum number of asset names to display in descriptions
|
|
52
|
+
CACHE_CLEANUP_KEEP_COUNT = 5 # Number of recent cache files to keep during cleanup
|
|
53
|
+
WIZ_URL = "https://api.wiz.io/graphql"
|
|
40
54
|
|
|
41
55
|
# Safer, linear-time regex for control-id normalization.
|
|
42
56
|
# Examples supported: 'AC-4', 'AC-4(2)', 'AC-4 (2)', 'AC-4-2', 'AC-4 2'
|
|
@@ -83,8 +97,8 @@ class WizComplianceItem(ComplianceItem):
|
|
|
83
97
|
filtered = [
|
|
84
98
|
sc for sc in subcategories if sc.get("category", {}).get("framework", {}).get("id") == target_framework_id
|
|
85
99
|
]
|
|
86
|
-
#
|
|
87
|
-
return filtered
|
|
100
|
+
# Return filtered results - if empty, the control_id will be empty (framework filtering working as intended)
|
|
101
|
+
return filtered
|
|
88
102
|
|
|
89
103
|
@property
|
|
90
104
|
def resource_id(self) -> str:
|
|
@@ -178,12 +192,17 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
178
192
|
|
|
179
193
|
title = "Wiz Policy Compliance Integration"
|
|
180
194
|
type = ScannerIntegrationType.CONTROL_TEST
|
|
181
|
-
#
|
|
182
|
-
|
|
195
|
+
# Use wizId field for asset identification (matches other Wiz integrations)
|
|
196
|
+
asset_identifier_field = "wizId"
|
|
197
|
+
issue_identifier_field = "wizId"
|
|
198
|
+
|
|
199
|
+
# Do not create assets - they come from separate inventory import
|
|
200
|
+
options_map_assets_to_components: bool = False
|
|
183
201
|
# Do not create vulnerabilities from compliance policy results
|
|
184
202
|
create_vulnerabilities: bool = False
|
|
185
|
-
#
|
|
186
|
-
enable_scan_history: bool =
|
|
203
|
+
# Do not create scan history - this is compliance report ingest, not a vulnerability scan
|
|
204
|
+
enable_scan_history: bool = False
|
|
205
|
+
|
|
187
206
|
# Control whether JSONL control-centric export is written alongside JSON
|
|
188
207
|
write_jsonl_output: bool = False
|
|
189
208
|
|
|
@@ -199,6 +218,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
199
218
|
create_issues: bool = True,
|
|
200
219
|
update_control_status: bool = True,
|
|
201
220
|
create_poams: bool = False,
|
|
221
|
+
regscale_module: Optional[str] = "securityplans",
|
|
202
222
|
**kwargs,
|
|
203
223
|
):
|
|
204
224
|
"""
|
|
@@ -214,9 +234,11 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
214
234
|
:param bool create_issues: Whether to create issues for failed compliance
|
|
215
235
|
:param bool update_control_status: Whether to update control implementation status
|
|
216
236
|
:param bool create_poams: Whether to mark issues as POAMs
|
|
237
|
+
:param Optional[str] regscale_module: RegScale module string (overrides default parent_module)
|
|
217
238
|
"""
|
|
218
239
|
super().__init__(
|
|
219
240
|
plan_id=plan_id,
|
|
241
|
+
parent_module=regscale_module,
|
|
220
242
|
catalog_id=catalog_id,
|
|
221
243
|
framework=self._map_framework_id_to_name(framework_id),
|
|
222
244
|
create_issues=create_issues,
|
|
@@ -226,6 +248,10 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
226
248
|
**kwargs,
|
|
227
249
|
)
|
|
228
250
|
|
|
251
|
+
# Override parent_module if regscale_module is provided
|
|
252
|
+
if regscale_module:
|
|
253
|
+
self.parent_module = regscale_module
|
|
254
|
+
|
|
229
255
|
self.wiz_project_id = wiz_project_id
|
|
230
256
|
self.client_id = client_id
|
|
231
257
|
self.client_secret = client_secret
|
|
@@ -245,21 +271,87 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
245
271
|
self.policy_cache_dir, f"policy_assessments_{wiz_project_id}_{framework_id}.json"
|
|
246
272
|
)
|
|
247
273
|
|
|
274
|
+
# Initialize helper classes for cleaner code organization
|
|
275
|
+
self._control_cache = ControlImplementationCache()
|
|
276
|
+
self._asset_consolidator = AssetConsolidator()
|
|
277
|
+
self._issue_field_setter = IssueFieldSetter(self._control_cache, plan_id, regscale_module or "securityplans")
|
|
278
|
+
self._finding_consolidator = FindingConsolidator(self)
|
|
279
|
+
self._finding_processor = FindingToIssueProcessor(self)
|
|
280
|
+
self._assessment_processor = ControlAssessmentProcessor(
|
|
281
|
+
plan_id,
|
|
282
|
+
regscale_module or "securityplans",
|
|
283
|
+
self.scan_date,
|
|
284
|
+
self.title,
|
|
285
|
+
self._map_framework_id_to_name(framework_id),
|
|
286
|
+
)
|
|
287
|
+
|
|
248
288
|
def fetch_compliance_data(self) -> List[Any]:
|
|
249
289
|
"""
|
|
250
|
-
Fetch compliance data from Wiz GraphQL API
|
|
290
|
+
Fetch compliance data from Wiz GraphQL API and filter to framework-specific
|
|
291
|
+
items for existing assets only.
|
|
251
292
|
|
|
252
|
-
:return: List of raw compliance data
|
|
293
|
+
:return: List of filtered raw compliance data
|
|
253
294
|
:rtype: List[Any]
|
|
254
295
|
"""
|
|
255
296
|
# Authenticate if not already done
|
|
256
297
|
if not self.access_token:
|
|
257
298
|
self.authenticate_wiz()
|
|
258
299
|
|
|
259
|
-
#
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
300
|
+
# Load existing assets early for filtering
|
|
301
|
+
self._load_regscale_assets()
|
|
302
|
+
|
|
303
|
+
# Use the data fetcher for cleaner code
|
|
304
|
+
fetcher = PolicyAssessmentFetcher(
|
|
305
|
+
wiz_endpoint=self.wiz_endpoint or WIZ_URL,
|
|
306
|
+
access_token=self.access_token,
|
|
307
|
+
wiz_project_id=self.wiz_project_id,
|
|
308
|
+
framework_id=self.framework_id,
|
|
309
|
+
cache_duration_minutes=self.cache_duration_minutes,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
all_policy_assessments = fetcher.fetch_policy_assessments()
|
|
313
|
+
|
|
314
|
+
if not all_policy_assessments:
|
|
315
|
+
logger.info("No policy assessments fetched from Wiz")
|
|
316
|
+
self.raw_policy_assessments = []
|
|
317
|
+
return []
|
|
318
|
+
|
|
319
|
+
# Filter to only items with existing assets in RegScale
|
|
320
|
+
filtered_assessments = self._filter_assessments_to_existing_assets(all_policy_assessments)
|
|
321
|
+
|
|
322
|
+
self.raw_policy_assessments = filtered_assessments
|
|
323
|
+
return filtered_assessments
|
|
324
|
+
|
|
325
|
+
def _filter_assessments_to_existing_assets(self, assessments: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
326
|
+
"""
|
|
327
|
+
Filter assessments to only include items with existing assets and control IDs.
|
|
328
|
+
|
|
329
|
+
:param assessments: List of raw assessments from Wiz
|
|
330
|
+
:return: Filtered list of assessments
|
|
331
|
+
"""
|
|
332
|
+
assets_exist = getattr(self, "_regscale_assets_by_wiz_id", {})
|
|
333
|
+
filtered_assessments = []
|
|
334
|
+
skipped_no_control = 0
|
|
335
|
+
skipped_no_asset = 0
|
|
336
|
+
|
|
337
|
+
for assessment in assessments:
|
|
338
|
+
# Convert to compliance item to check framework and asset existence
|
|
339
|
+
temp_item = WizComplianceItem(assessment, self)
|
|
340
|
+
|
|
341
|
+
# Skip if no control ID (not in selected framework)
|
|
342
|
+
if not temp_item.control_id:
|
|
343
|
+
skipped_no_control += 1
|
|
344
|
+
continue
|
|
345
|
+
|
|
346
|
+
# Skip if asset doesn't exist in RegScale (use cached lookup)
|
|
347
|
+
if temp_item.resource_id not in assets_exist:
|
|
348
|
+
skipped_no_asset += 1
|
|
349
|
+
continue
|
|
350
|
+
|
|
351
|
+
filtered_assessments.append(assessment)
|
|
352
|
+
logger.debug(f"Skipped {skipped_no_control} assessments with no control ID for framework.")
|
|
353
|
+
logger.debug(f"Skipped {skipped_no_asset} assessments with no existing asset in RegScale.")
|
|
354
|
+
return filtered_assessments
|
|
263
355
|
|
|
264
356
|
def create_compliance_item(self, raw_data: Any) -> ComplianceItem:
|
|
265
357
|
"""
|
|
@@ -309,66 +401,268 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
309
401
|
|
|
310
402
|
def fetch_assets(self, *args, **kwargs) -> Iterator[IntegrationAsset]:
|
|
311
403
|
"""
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
404
|
+
No assets are created in policy compliance integration.
|
|
405
|
+
Assets come from separate Wiz inventory import.
|
|
406
|
+
"""
|
|
407
|
+
return iter([])
|
|
408
|
+
|
|
409
|
+
def fetch_findings(self, *args, **kwargs) -> Iterator[IntegrationFinding]:
|
|
410
|
+
"""
|
|
411
|
+
Create consolidated findings grouped by control, with all affected resources under each control.
|
|
315
412
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
- Always yield unique assets for bulk upsert (create or update)
|
|
413
|
+
This approach groups by control first, then collects all resources that fail that control.
|
|
414
|
+
This results in one finding per control with multiple resources, making consolidation much easier.
|
|
319
415
|
"""
|
|
320
|
-
|
|
416
|
+
if not self.failed_compliance_items:
|
|
417
|
+
return
|
|
321
418
|
|
|
322
|
-
#
|
|
323
|
-
self.
|
|
419
|
+
# Use the finding consolidator for cleaner code
|
|
420
|
+
yield from self._finding_consolidator.create_consolidated_findings(self.failed_compliance_items)
|
|
324
421
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
422
|
+
def _get_all_control_ids_for_compliance_item(self, compliance_item: WizComplianceItem) -> List[str]:
|
|
423
|
+
"""
|
|
424
|
+
Get ALL control IDs that a compliance item maps to.
|
|
425
|
+
|
|
426
|
+
Wiz policies can map to multiple controls (e.g., one policy failure might affect
|
|
427
|
+
AC-4(2), AC-4(4), and SC-28(1) controls). This method returns all of them.
|
|
330
428
|
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
asset.component_names.append(component_name)
|
|
429
|
+
:param WizComplianceItem compliance_item: Compliance item to extract control IDs from
|
|
430
|
+
:return: List of control IDs this policy maps to
|
|
431
|
+
:rtype: List[str]
|
|
432
|
+
"""
|
|
433
|
+
if not compliance_item.policy:
|
|
434
|
+
return []
|
|
338
435
|
|
|
339
|
-
|
|
340
|
-
|
|
436
|
+
subcategories = compliance_item._get_filtered_subcategories()
|
|
437
|
+
if not subcategories:
|
|
438
|
+
return []
|
|
341
439
|
|
|
342
|
-
|
|
440
|
+
# Extract control IDs and deduplicate in one pass
|
|
441
|
+
unique_control_ids = []
|
|
442
|
+
seen = set()
|
|
443
|
+
|
|
444
|
+
for subcat in subcategories:
|
|
445
|
+
external_id = subcat.get("externalId", "")
|
|
446
|
+
if external_id and external_id not in seen:
|
|
447
|
+
seen.add(external_id)
|
|
448
|
+
unique_control_ids.append(external_id)
|
|
449
|
+
|
|
450
|
+
return unique_control_ids
|
|
451
|
+
|
|
452
|
+
def _group_compliance_items_by_control(self) -> Dict[str, Dict[str, WizComplianceItem]]:
|
|
343
453
|
"""
|
|
344
|
-
|
|
454
|
+
Group failed compliance items by control ID.
|
|
345
455
|
|
|
346
|
-
|
|
456
|
+
:return: Dictionary mapping control IDs to resource dictionaries
|
|
457
|
+
:rtype: Dict[str, Dict[str, WizComplianceItem]]
|
|
347
458
|
"""
|
|
348
|
-
|
|
459
|
+
control_to_resources = {} # {control_id: {resource_id: compliance_item}}
|
|
349
460
|
|
|
350
|
-
seen_keys: set[tuple[str, str]] = set()
|
|
351
461
|
for compliance_item in self.failed_compliance_items:
|
|
352
462
|
if not isinstance(compliance_item, WizComplianceItem):
|
|
353
|
-
finding = super().create_finding_from_compliance_item(compliance_item)
|
|
354
|
-
if finding:
|
|
355
|
-
yield finding
|
|
356
463
|
continue
|
|
357
464
|
|
|
358
465
|
asset_id = (compliance_item.resource_id or "").lower()
|
|
359
|
-
|
|
360
|
-
if not asset_id or not control:
|
|
466
|
+
if not asset_id:
|
|
361
467
|
continue
|
|
362
468
|
|
|
363
|
-
|
|
364
|
-
|
|
469
|
+
# Get ALL control IDs that this policy assessment maps to
|
|
470
|
+
all_control_ids = self._get_all_control_ids_for_compliance_item(compliance_item)
|
|
471
|
+
if not all_control_ids:
|
|
365
472
|
continue
|
|
366
|
-
seen_keys.add(key)
|
|
367
473
|
|
|
368
|
-
|
|
474
|
+
# Add this resource to each control it fails
|
|
475
|
+
for control_id in all_control_ids:
|
|
476
|
+
control = control_id.upper()
|
|
477
|
+
|
|
478
|
+
if control not in control_to_resources:
|
|
479
|
+
control_to_resources[control] = {}
|
|
480
|
+
|
|
481
|
+
# Use the first compliance item we find for this resource-control pair
|
|
482
|
+
# (there might be duplicates from multiple policy assessments)
|
|
483
|
+
if asset_id not in control_to_resources[control]:
|
|
484
|
+
control_to_resources[control][asset_id] = compliance_item
|
|
485
|
+
|
|
486
|
+
return control_to_resources
|
|
487
|
+
|
|
488
|
+
def _create_consolidated_findings(
|
|
489
|
+
self, control_to_resources: Dict[str, Dict[str, WizComplianceItem]]
|
|
490
|
+
) -> Iterator[IntegrationFinding]:
|
|
491
|
+
"""
|
|
492
|
+
Create consolidated findings from grouped control-resource mappings.
|
|
493
|
+
|
|
494
|
+
:param Dict[str, Dict[str, WizComplianceItem]] control_to_resources: Control groupings
|
|
495
|
+
:yield: Consolidated findings
|
|
496
|
+
:rtype: Iterator[IntegrationFinding]
|
|
497
|
+
"""
|
|
498
|
+
for control_id, resources in control_to_resources.items():
|
|
499
|
+
|
|
500
|
+
# Use the first compliance item as the base for this control's finding
|
|
501
|
+
base_compliance_item = next(iter(resources.values()))
|
|
502
|
+
|
|
503
|
+
# Create a consolidated finding for this control
|
|
504
|
+
finding = self._create_consolidated_finding_for_control(
|
|
505
|
+
control_id=control_id, compliance_item=base_compliance_item, affected_resources=list(resources.keys())
|
|
506
|
+
)
|
|
507
|
+
|
|
369
508
|
if finding:
|
|
370
509
|
yield finding
|
|
371
510
|
|
|
511
|
+
def _create_consolidated_finding_for_control(
|
|
512
|
+
self, control_id: str, compliance_item: WizComplianceItem, affected_resources: List[str]
|
|
513
|
+
) -> Optional[IntegrationFinding]:
|
|
514
|
+
"""
|
|
515
|
+
Create a consolidated finding for a control with all affected resources.
|
|
516
|
+
|
|
517
|
+
:param str control_id: The control ID (e.g., 'AC-4(2)')
|
|
518
|
+
:param WizComplianceItem compliance_item: Base compliance item for this control
|
|
519
|
+
:param List[str] affected_resources: List of Wiz resource IDs that fail this control
|
|
520
|
+
:return: Consolidated finding with all affected resources
|
|
521
|
+
:rtype: Optional[IntegrationFinding]
|
|
522
|
+
"""
|
|
523
|
+
# Filter to only resources that exist as assets in RegScale
|
|
524
|
+
asset_mappings = self._build_asset_mappings(affected_resources)
|
|
525
|
+
|
|
526
|
+
if not asset_mappings:
|
|
527
|
+
return None
|
|
528
|
+
|
|
529
|
+
# Create the base finding using the control-specific approach
|
|
530
|
+
finding = self._create_finding_for_specific_control(compliance_item, control_id)
|
|
531
|
+
if not finding:
|
|
532
|
+
return None
|
|
533
|
+
|
|
534
|
+
# Update the asset identifier and description with consolidated info
|
|
535
|
+
self._update_finding_with_consolidated_assets(finding, asset_mappings)
|
|
536
|
+
return finding
|
|
537
|
+
|
|
538
|
+
def _build_asset_mappings(self, resource_ids: List[str]) -> Dict[str, Dict[str, str]]:
|
|
539
|
+
"""
|
|
540
|
+
Build asset mappings for resources that exist in RegScale.
|
|
541
|
+
|
|
542
|
+
:param List[str] resource_ids: List of Wiz resource IDs
|
|
543
|
+
:return: Mapping of resource IDs to asset information
|
|
544
|
+
:rtype: Dict[str, Dict[str, str]]
|
|
545
|
+
"""
|
|
546
|
+
asset_mappings = {}
|
|
547
|
+
|
|
548
|
+
for resource_id in resource_ids:
|
|
549
|
+
if self._asset_exists_in_regscale(resource_id):
|
|
550
|
+
asset = self.get_asset_by_identifier(resource_id)
|
|
551
|
+
if asset and asset.name:
|
|
552
|
+
asset_mappings[resource_id] = {"name": asset.name, "wiz_id": resource_id}
|
|
553
|
+
else:
|
|
554
|
+
# Fallback to resource ID if asset name not found
|
|
555
|
+
asset_mappings[resource_id] = {"name": resource_id, "wiz_id": resource_id}
|
|
556
|
+
|
|
557
|
+
return asset_mappings
|
|
558
|
+
|
|
559
|
+
def _update_finding_with_consolidated_assets(
|
|
560
|
+
self, finding: IntegrationFinding, asset_mappings: Dict[str, Dict[str, str]]
|
|
561
|
+
) -> None:
|
|
562
|
+
"""
|
|
563
|
+
Update a finding with consolidated asset information.
|
|
564
|
+
|
|
565
|
+
:param IntegrationFinding finding: Finding to update
|
|
566
|
+
:param Dict[str, Dict[str, str]] asset_mappings: Asset mapping information
|
|
567
|
+
:return: None
|
|
568
|
+
:rtype: None
|
|
569
|
+
"""
|
|
570
|
+
# Update the asset identifier to include all asset names (clean format for POAMs)
|
|
571
|
+
consolidated_asset_identifier = self._create_consolidated_asset_identifier(asset_mappings)
|
|
572
|
+
finding.asset_identifier = consolidated_asset_identifier
|
|
573
|
+
|
|
574
|
+
# Update finding description to indicate multiple resources
|
|
575
|
+
asset_names = [info["name"] for info in asset_mappings.values()]
|
|
576
|
+
if len(asset_names) > 1:
|
|
577
|
+
finding.description = f"{finding.description}\n\nThis control failure affects {len(asset_names)} assets: {', '.join(asset_names[:MAX_DISPLAY_ASSETS])}"
|
|
578
|
+
if len(asset_names) > MAX_DISPLAY_ASSETS:
|
|
579
|
+
finding.description += f" (and {len(asset_names) - MAX_DISPLAY_ASSETS} more)"
|
|
580
|
+
|
|
581
|
+
def _create_finding_for_specific_control(
|
|
582
|
+
self, compliance_item: WizComplianceItem, control_id: str
|
|
583
|
+
) -> Optional[IntegrationFinding]:
|
|
584
|
+
"""
|
|
585
|
+
Create a finding for a specific control ID from a compliance item.
|
|
586
|
+
|
|
587
|
+
This is similar to create_finding_from_compliance_item but ensures the finding
|
|
588
|
+
uses the specific control ID rather than just the first one.
|
|
589
|
+
|
|
590
|
+
:param WizComplianceItem compliance_item: Source compliance item
|
|
591
|
+
:param str control_id: Specific control ID to create finding for
|
|
592
|
+
:return: Integration finding for this specific control
|
|
593
|
+
:rtype: Optional[IntegrationFinding]
|
|
594
|
+
"""
|
|
595
|
+
try:
|
|
596
|
+
control_labels = [control_id] if control_id else []
|
|
597
|
+
severity = self._map_severity(compliance_item.severity)
|
|
598
|
+
policy_name = self._get_policy_name(compliance_item)
|
|
599
|
+
title = f"{policy_name} ({control_id})" if control_id else policy_name
|
|
600
|
+
description = self._compose_description(policy_name, compliance_item)
|
|
601
|
+
|
|
602
|
+
finding = self._build_finding(
|
|
603
|
+
control_labels=control_labels,
|
|
604
|
+
title=title,
|
|
605
|
+
description=description,
|
|
606
|
+
severity=severity,
|
|
607
|
+
compliance_item=compliance_item,
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
# Set the specific control ID for this finding
|
|
611
|
+
finding.rule_id = control_id
|
|
612
|
+
finding.affected_controls = self._normalize_control_id_string(control_id)
|
|
613
|
+
|
|
614
|
+
# Ensure unique external_id for each control to prevent unwanted updates
|
|
615
|
+
finding.external_id = f"wiz-policy-control-{control_id.upper()}-{self.framework_id}"
|
|
616
|
+
|
|
617
|
+
self._set_assessment_id_if_available(finding, compliance_item)
|
|
618
|
+
return finding
|
|
619
|
+
|
|
620
|
+
except Exception as e:
|
|
621
|
+
logger.error(f"Error creating finding for control {control_id}: {e}")
|
|
622
|
+
return None
|
|
623
|
+
|
|
624
|
+
def _asset_exists_in_regscale(self, resource_id: str) -> bool:
|
|
625
|
+
"""
|
|
626
|
+
Check if an asset with the given Wiz resource ID exists in RegScale.
|
|
627
|
+
|
|
628
|
+
:param str resource_id: Wiz resource ID to check (stored in RegScale asset wizId field)
|
|
629
|
+
:return: True if asset exists, False otherwise
|
|
630
|
+
:rtype: bool
|
|
631
|
+
"""
|
|
632
|
+
if not resource_id:
|
|
633
|
+
return False
|
|
634
|
+
|
|
635
|
+
try:
|
|
636
|
+
# Check if we have a cached lookup of existing assets
|
|
637
|
+
if not hasattr(self, "_regscale_assets_by_wiz_id"):
|
|
638
|
+
self._load_regscale_assets()
|
|
639
|
+
|
|
640
|
+
return resource_id in self._regscale_assets_by_wiz_id
|
|
641
|
+
except Exception:
|
|
642
|
+
return False
|
|
643
|
+
|
|
644
|
+
def _load_regscale_assets(self) -> None:
|
|
645
|
+
"""
|
|
646
|
+
Load all existing assets from RegScale into a Wiz ID-based lookup cache.
|
|
647
|
+
Wiz resource IDs are stored in the RegScale asset wizId field.
|
|
648
|
+
"""
|
|
649
|
+
try:
|
|
650
|
+
logger.info("Loading existing assets from RegScale for asset existence checks...")
|
|
651
|
+
# Get all assets for the current plan
|
|
652
|
+
existing_assets = regscale_models.Asset.get_all_by_parent(
|
|
653
|
+
parent_id=self.plan_id,
|
|
654
|
+
parent_module=self.parent_module,
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
# Create Wiz ID-based lookup cache (Wiz resource ID -> RegScale asset)
|
|
658
|
+
self._regscale_assets_by_wiz_id = {asset.wizId: asset for asset in existing_assets if asset.wizId}
|
|
659
|
+
logger.info(f"Loaded {len(self._regscale_assets_by_wiz_id)} existing assets for lookup")
|
|
660
|
+
|
|
661
|
+
except Exception as e:
|
|
662
|
+
logger.error(f"Error loading RegScale assets: {e}")
|
|
663
|
+
# Initialize empty cache to avoid repeated failures
|
|
664
|
+
self._regscale_assets_by_wiz_id = {}
|
|
665
|
+
|
|
372
666
|
def _map_framework_id_to_name(self, framework_id: str) -> str:
|
|
373
667
|
"""
|
|
374
668
|
Map framework ID to framework name.
|
|
@@ -531,7 +825,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
531
825
|
first_seen=self.scan_date,
|
|
532
826
|
last_seen=self.scan_date,
|
|
533
827
|
scan_date=self.scan_date,
|
|
534
|
-
asset_identifier=compliance_item
|
|
828
|
+
asset_identifier=self._get_regscale_asset_identifier(compliance_item),
|
|
535
829
|
vulnerability_type="Policy Compliance Violation",
|
|
536
830
|
rule_id=compliance_item.control_id,
|
|
537
831
|
baseline=compliance_item.framework,
|
|
@@ -567,9 +861,8 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
567
861
|
assess = self._assessment_by_impl_today.get(impl_id)
|
|
568
862
|
if assess:
|
|
569
863
|
finding.assessment_id = assess.id
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
logger.debug(f"Error setting finding assessment ID: {e}")
|
|
864
|
+
except Exception:
|
|
865
|
+
pass
|
|
573
866
|
|
|
574
867
|
def create_asset_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationAsset]:
|
|
575
868
|
"""
|
|
@@ -616,7 +909,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
616
909
|
|
|
617
910
|
asset = IntegrationAsset(
|
|
618
911
|
name=compliance_item.resource_name,
|
|
619
|
-
identifier=compliance_item.
|
|
912
|
+
identifier=compliance_item.resource_name, # Use name only without UUID
|
|
620
913
|
external_id=compliance_item.resource_id,
|
|
621
914
|
other_tracking_number=compliance_item.resource_id, # For deduplication
|
|
622
915
|
asset_type=asset_type,
|
|
@@ -640,8 +933,8 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
640
933
|
return None
|
|
641
934
|
|
|
642
935
|
def create_scan_history(self): # type: ignore[override]
|
|
643
|
-
"""
|
|
644
|
-
return
|
|
936
|
+
"""No scan history created for compliance report ingest."""
|
|
937
|
+
return None
|
|
645
938
|
|
|
646
939
|
def _create_asset_notes(self, compliance_item: WizComplianceItem) -> str:
|
|
647
940
|
"""
|
|
@@ -741,12 +1034,11 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
741
1034
|
# Try cache first unless forced refresh
|
|
742
1035
|
cached_nodes = self._load_assessments_from_cache()
|
|
743
1036
|
if cached_nodes is not None:
|
|
744
|
-
logger.info(
|
|
1037
|
+
logger.info("Using cached Wiz policy assessments")
|
|
745
1038
|
return cached_nodes
|
|
746
1039
|
|
|
747
1040
|
# Only include variables supported by the query (avoid validation errors)
|
|
748
1041
|
page_size = 100
|
|
749
|
-
logger.info(f"Using Wiz policy assessments page size (first): {page_size}")
|
|
750
1042
|
base_variables = {"first": page_size}
|
|
751
1043
|
|
|
752
1044
|
# Try multiple filter key variants to avoid schema differences across tenants
|
|
@@ -768,7 +1060,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
768
1060
|
total=1,
|
|
769
1061
|
)
|
|
770
1062
|
results = run_async_queries(
|
|
771
|
-
endpoint=self.wiz_endpoint or
|
|
1063
|
+
endpoint=self.wiz_endpoint or WIZ_URL,
|
|
772
1064
|
headers=headers,
|
|
773
1065
|
query_configs=[
|
|
774
1066
|
{
|
|
@@ -863,7 +1155,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
863
1155
|
# If endpoint is not set (tests), short-circuit to async path mock
|
|
864
1156
|
if not self.wiz_endpoint:
|
|
865
1157
|
results = run_async_queries(
|
|
866
|
-
endpoint=
|
|
1158
|
+
endpoint=WIZ_URL,
|
|
867
1159
|
headers=headers,
|
|
868
1160
|
query_configs=[
|
|
869
1161
|
{
|
|
@@ -892,7 +1184,6 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
892
1184
|
)
|
|
893
1185
|
except Exception as exc: # noqa: BLE001 - propagate last error
|
|
894
1186
|
last_error = exc
|
|
895
|
-
logger.debug(f"Filter variant {fv} failed: {exc}")
|
|
896
1187
|
|
|
897
1188
|
msg = f"Failed to fetch policy assessments after trying all filter variants: {last_error}"
|
|
898
1189
|
logger.error(msg)
|
|
@@ -929,9 +1220,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
929
1220
|
variant_name = self._variant_name(filter_variant)
|
|
930
1221
|
progress.update(
|
|
931
1222
|
task,
|
|
932
|
-
description=(
|
|
933
|
-
f"[#f68d1f]Fetching Wiz policy assessments (limit: {page_size}, " f"variant: {variant_name})..."
|
|
934
|
-
),
|
|
1223
|
+
description=(f"[#f68d1f]Fetching Wiz policy assessments (limit: {page_size}, variant: {variant_name})..."),
|
|
935
1224
|
advance=1,
|
|
936
1225
|
)
|
|
937
1226
|
|
|
@@ -953,11 +1242,12 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
953
1242
|
filtered_nodes = self._filter_nodes_to_framework(nodes)
|
|
954
1243
|
progress.update(
|
|
955
1244
|
task,
|
|
956
|
-
description=f"[green]
|
|
1245
|
+
description=f"[green]Completed Wiz policy assessments: {len(filtered_nodes)} nodes",
|
|
957
1246
|
completed=1,
|
|
958
1247
|
total=1,
|
|
959
1248
|
)
|
|
960
|
-
logger.info(
|
|
1249
|
+
logger.info("Successfully fetched Wiz policy assessments")
|
|
1250
|
+
|
|
961
1251
|
return filtered_nodes
|
|
962
1252
|
|
|
963
1253
|
def _execute_wiz_policy_query_paginated(
|
|
@@ -1158,12 +1448,10 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1158
1448
|
for control_id, ctrl in control_agg.items():
|
|
1159
1449
|
jf.write(json.dumps(ctrl, ensure_ascii=False) + "\n")
|
|
1160
1450
|
logger.info(f"Policy compliance JSONL written to: {file_path_jsonl}")
|
|
1161
|
-
|
|
1162
|
-
self._cleanup_artifacts(artifacts_dir, keep=5)
|
|
1451
|
+
self._cleanup_artifacts(artifacts_dir, keep=CACHE_CLEANUP_KEEP_COUNT)
|
|
1163
1452
|
return file_path
|
|
1164
1453
|
|
|
1165
1454
|
except Exception as e:
|
|
1166
|
-
logger.error(f"Failed to write policy data to JSON: {str(e)}")
|
|
1167
1455
|
error_and_exit(f"Failed to write policy data to JSON: {str(e)}")
|
|
1168
1456
|
|
|
1169
1457
|
def _build_control_aggregation(self) -> Dict[str, Dict[str, Any]]:
|
|
@@ -1300,7 +1588,7 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1300
1588
|
logger.error(f"Error parsing JSONL {jsonl_path}: {exc}")
|
|
1301
1589
|
return aggregated
|
|
1302
1590
|
|
|
1303
|
-
def _cleanup_artifacts(self, dir_path: str, keep: int =
|
|
1591
|
+
def _cleanup_artifacts(self, dir_path: str, keep: int = CACHE_CLEANUP_KEEP_COUNT) -> None:
|
|
1304
1592
|
"""
|
|
1305
1593
|
Keep the most recent JSON and JSONL policy_compliance_report files, delete older ones.
|
|
1306
1594
|
|
|
@@ -1330,8 +1618,8 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1330
1618
|
except Exception:
|
|
1331
1619
|
# Non-fatal; continue cleanup
|
|
1332
1620
|
pass
|
|
1333
|
-
except Exception
|
|
1334
|
-
|
|
1621
|
+
except Exception:
|
|
1622
|
+
pass
|
|
1335
1623
|
|
|
1336
1624
|
def load_or_create_framework_mapping(self) -> Dict[str, str]:
|
|
1337
1625
|
"""
|
|
@@ -1439,7 +1727,6 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1439
1727
|
return nodes
|
|
1440
1728
|
|
|
1441
1729
|
except Exception as e:
|
|
1442
|
-
logger.error(f"Failed to fetch security frameworks: {str(e)}")
|
|
1443
1730
|
error_and_exit(f"Failed to fetch security frameworks: {str(e)}")
|
|
1444
1731
|
|
|
1445
1732
|
def _create_framework_mapping(self, frameworks: List[Dict[str, Any]]) -> Dict[str, str]:
|
|
@@ -1515,6 +1802,45 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1515
1802
|
|
|
1516
1803
|
return self.framework_mapping.get(framework_id, framework_id)
|
|
1517
1804
|
|
|
1805
|
+
def sync_compliance(self) -> None:
|
|
1806
|
+
"""
|
|
1807
|
+
Override base sync_compliance to ensure proper order for controlId/assessmentId assignment.
|
|
1808
|
+
|
|
1809
|
+
CRITICAL: Control assessments MUST be created BEFORE issues are processed
|
|
1810
|
+
to ensure controlId and assessmentId can be properly set.
|
|
1811
|
+
"""
|
|
1812
|
+
logger.info(f"Starting {self.title} compliance sync with proper assessment ordering...")
|
|
1813
|
+
|
|
1814
|
+
try:
|
|
1815
|
+
scan_history = self.create_scan_history()
|
|
1816
|
+
self.process_compliance_data()
|
|
1817
|
+
|
|
1818
|
+
# Step 1: Sync assets first
|
|
1819
|
+
self._sync_assets()
|
|
1820
|
+
|
|
1821
|
+
# Step 2: CRITICAL - Pre-populate control implementation cache BEFORE creating assessments
|
|
1822
|
+
logger.info("Pre-populating control implementation cache for issue processing...")
|
|
1823
|
+
self._populate_control_implementation_cache()
|
|
1824
|
+
|
|
1825
|
+
# Step 3: Create control assessments BEFORE issues (ensures assessmentId is available)
|
|
1826
|
+
logger.info("Creating control assessments BEFORE issue processing...")
|
|
1827
|
+
self._sync_control_assessments()
|
|
1828
|
+
|
|
1829
|
+
# Step 3.5: CRITICAL - Refresh assessment cache after assessments are created
|
|
1830
|
+
logger.info("Refreshing assessment cache with newly created assessments...")
|
|
1831
|
+
self._refresh_assessment_cache_after_creation()
|
|
1832
|
+
|
|
1833
|
+
# Step 4: NOW process issues with controlId and assessmentId properly set
|
|
1834
|
+
logger.info("Processing issues with control and assessment IDs available...")
|
|
1835
|
+
self._sync_issues()
|
|
1836
|
+
|
|
1837
|
+
self._finalize_scan_history(scan_history)
|
|
1838
|
+
|
|
1839
|
+
logger.info(f"Completed {self.title} compliance sync with proper assessment ordering")
|
|
1840
|
+
|
|
1841
|
+
except Exception as e:
|
|
1842
|
+
error_and_exit(f"Error during compliance sync: {e}")
|
|
1843
|
+
|
|
1518
1844
|
def sync_policy_compliance(self, create_issues: bool = None, update_control_status: bool = None) -> None:
|
|
1519
1845
|
"""
|
|
1520
1846
|
Main method to sync policy compliance data from Wiz.
|
|
@@ -1543,8 +1869,11 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1543
1869
|
if update_control_status is not None:
|
|
1544
1870
|
self.update_control_status = update_control_status
|
|
1545
1871
|
|
|
1546
|
-
# Step 3:
|
|
1547
|
-
|
|
1872
|
+
# Step 3: Sync using the overridden method (which ensures proper ordering)
|
|
1873
|
+
logger.info(
|
|
1874
|
+
f"Sync parameters: create_issues={self.create_issues}, update_control_status={self.update_control_status}"
|
|
1875
|
+
)
|
|
1876
|
+
|
|
1548
1877
|
self.sync_compliance()
|
|
1549
1878
|
|
|
1550
1879
|
# Step 4: Write data to JSON file for reference (post-processing)
|
|
@@ -1554,7 +1883,6 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1554
1883
|
logger.info("Policy compliance sync completed successfully")
|
|
1555
1884
|
|
|
1556
1885
|
except Exception as e:
|
|
1557
|
-
logger.error(f"Policy compliance sync failed: {str(e)}")
|
|
1558
1886
|
error_and_exit(f"Policy compliance sync failed: {str(e)}")
|
|
1559
1887
|
|
|
1560
1888
|
def sync_wiz_compliance(self) -> None:
|
|
@@ -1587,180 +1915,1119 @@ class WizPolicyComplianceIntegration(ComplianceIntegration):
|
|
|
1587
1915
|
finding: IntegrationFinding,
|
|
1588
1916
|
) -> regscale_models.Issue:
|
|
1589
1917
|
"""
|
|
1590
|
-
Create/update the issue
|
|
1918
|
+
Create/update the issue with ALL fields set BEFORE saving.
|
|
1919
|
+
|
|
1920
|
+
This method ensures proper data flow:
|
|
1921
|
+
1. Check for existing issues to prevent duplicates
|
|
1922
|
+
2. Pre-populate compliance fields on the finding
|
|
1923
|
+
3. Use parent class logic which saves with all fields set
|
|
1591
1924
|
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
- Ensure remediationDescription contains Wiz remediationInstructions
|
|
1925
|
+
This fixes the duplicate issue creation problem by using proper
|
|
1926
|
+
duplicate detection and avoids double-saving.
|
|
1595
1927
|
"""
|
|
1596
|
-
#
|
|
1597
|
-
|
|
1598
|
-
issue = super().create_or_update_issue_from_finding(title, finding)
|
|
1928
|
+
# Load cache if not already loaded for duplicate detection
|
|
1929
|
+
self._load_existing_records_cache()
|
|
1599
1930
|
|
|
1600
|
-
#
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
issue.assetIdentifier = self._compute_consolidated_asset_identifier(issue, finding)
|
|
1604
|
-
self._set_control_and_assessment_ids(issue, finding)
|
|
1605
|
-
if getattr(self, "create_poams", False):
|
|
1606
|
-
issue.isPoam = True
|
|
1607
|
-
self._reparent_issue_to_asset(issue, finding)
|
|
1608
|
-
issue.save(bulk=True)
|
|
1609
|
-
except Exception as e:
|
|
1610
|
-
logger.error(f"Error in post-issue processing: {e}")
|
|
1611
|
-
import traceback
|
|
1931
|
+
# CRITICAL: Pre-populate compliance fields on the finding BEFORE parent call
|
|
1932
|
+
# This ensures the parent class saves the issue with all fields already set
|
|
1933
|
+
self._populate_compliance_fields_on_finding(finding)
|
|
1612
1934
|
|
|
1613
|
-
|
|
1935
|
+
# CRITICAL FIX: If assessment_id is set, prepare the finding for assessment parenting
|
|
1936
|
+
if hasattr(finding, "assessment_id") and finding.assessment_id:
|
|
1937
|
+
assessment_id = finding.assessment_id
|
|
1938
|
+
logger.debug(f"PRE-SETTING ASSESSMENT PARENT: assessmentId={assessment_id}")
|
|
1614
1939
|
|
|
1615
|
-
|
|
1940
|
+
# Add parent override fields to the finding for the ScannerIntegration to use
|
|
1941
|
+
finding._override_parent_id = assessment_id
|
|
1942
|
+
finding._override_parent_module = "assessments"
|
|
1943
|
+
|
|
1944
|
+
logger.debug(f" ✅ Finding will use parent: assessments #{assessment_id}")
|
|
1945
|
+
|
|
1946
|
+
# Check for existing issue by external_id first
|
|
1947
|
+
external_id = finding.external_id
|
|
1948
|
+
existing_issue = self._find_existing_issue_cached(external_id)
|
|
1616
1949
|
|
|
1617
|
-
|
|
1618
|
-
|
|
1950
|
+
if existing_issue:
|
|
1951
|
+
return self._update_existing_issue_with_compliance_fields(existing_issue, title, finding)
|
|
1952
|
+
else:
|
|
1953
|
+
# Set finding context for our override method to access
|
|
1954
|
+
self._current_finding_context = finding
|
|
1955
|
+
try:
|
|
1956
|
+
# Parent class will now create/save the issue with compliance fields already set
|
|
1957
|
+
return super().create_or_update_issue_from_finding(title, finding)
|
|
1958
|
+
finally:
|
|
1959
|
+
# Clean up context
|
|
1960
|
+
if hasattr(self, "_current_finding_context"):
|
|
1961
|
+
delattr(self, "_current_finding_context")
|
|
1962
|
+
|
|
1963
|
+
def _update_existing_issue_with_compliance_fields(
|
|
1964
|
+
self, existing_issue: regscale_models.Issue, title: str, finding: IntegrationFinding
|
|
1965
|
+
) -> regscale_models.Issue:
|
|
1619
1966
|
"""
|
|
1620
|
-
Update
|
|
1967
|
+
Update existing issue with basic fields and enhance with compliance-specific fields.
|
|
1621
1968
|
|
|
1622
|
-
:param
|
|
1623
|
-
:param
|
|
1624
|
-
:
|
|
1625
|
-
:
|
|
1969
|
+
:param existing_issue: The existing issue to update
|
|
1970
|
+
:param title: New issue title
|
|
1971
|
+
:param finding: Finding with updated data
|
|
1972
|
+
:return: Updated issue with all fields set
|
|
1626
1973
|
"""
|
|
1627
|
-
|
|
1974
|
+
|
|
1975
|
+
# Update basic fields (similar to parent class logic)
|
|
1976
|
+
existing_issue.title = title
|
|
1977
|
+
existing_issue.description = finding.description
|
|
1978
|
+
existing_issue.severity = finding.severity
|
|
1979
|
+
existing_issue.status = finding.status
|
|
1980
|
+
existing_issue.dateLastUpdated = self.scan_date
|
|
1981
|
+
|
|
1982
|
+
# Set control-related field
|
|
1983
|
+
if getattr(finding, "control_labels", None):
|
|
1984
|
+
existing_issue.affectedControls = ",".join(finding.control_labels)
|
|
1985
|
+
elif getattr(finding, "affected_controls", None):
|
|
1986
|
+
existing_issue.affectedControls = finding.affected_controls
|
|
1987
|
+
|
|
1988
|
+
# Enhance with compliance-specific fields
|
|
1989
|
+
self._enhance_issue_with_compliance_fields(existing_issue, finding)
|
|
1990
|
+
|
|
1991
|
+
# CRITICAL FIX: Handle assessment parenting for existing issues too
|
|
1992
|
+
if hasattr(finding, "assessment_id") and finding.assessment_id:
|
|
1993
|
+
assessment_id = finding.assessment_id
|
|
1994
|
+
|
|
1995
|
+
# Set assessment as the parent
|
|
1996
|
+
existing_issue.parentId = assessment_id
|
|
1997
|
+
existing_issue.parentModule = "assessments"
|
|
1998
|
+
existing_issue.assessmentId = assessment_id
|
|
1999
|
+
|
|
2000
|
+
existing_issue.save()
|
|
2001
|
+
|
|
2002
|
+
return existing_issue
|
|
2003
|
+
|
|
2004
|
+
def _create_or_update_issue(
|
|
2005
|
+
self,
|
|
2006
|
+
finding: IntegrationFinding,
|
|
2007
|
+
issue_status,
|
|
2008
|
+
title: str,
|
|
2009
|
+
existing_issue=None,
|
|
2010
|
+
):
|
|
2011
|
+
"""
|
|
2012
|
+
Override parent method to handle assessment parenting correctly.
|
|
2013
|
+
|
|
2014
|
+
CRITICAL FIX: Check if the finding has assessment parent overrides and apply them.
|
|
2015
|
+
"""
|
|
2016
|
+
# Get consolidated asset identifier
|
|
2017
|
+
asset_identifier = self.get_consolidated_asset_identifier(finding, existing_issue)
|
|
2018
|
+
|
|
2019
|
+
# Prepare issue data
|
|
2020
|
+
issue_title = self.get_issue_title(finding) or title
|
|
2021
|
+
description = finding.description or ""
|
|
2022
|
+
remediation_description = finding.recommendation_for_mitigation or finding.remediation or ""
|
|
2023
|
+
is_poam = self.is_poam(finding)
|
|
2024
|
+
|
|
2025
|
+
if existing_issue:
|
|
2026
|
+
logger.debug(
|
|
2027
|
+
"Updating existing issue %s with assetIdentifier %s", existing_issue.id, finding.asset_identifier
|
|
2028
|
+
)
|
|
2029
|
+
|
|
2030
|
+
# If we have an existing issue, update its fields instead of creating a new one
|
|
2031
|
+
issue = existing_issue or regscale_models.Issue()
|
|
2032
|
+
|
|
2033
|
+
# CRITICAL FIX: Check for parent overrides from the finding
|
|
2034
|
+
if hasattr(finding, "_override_parent_id") and hasattr(finding, "_override_parent_module"):
|
|
2035
|
+
parent_id = finding._override_parent_id
|
|
2036
|
+
parent_module = finding._override_parent_module
|
|
2037
|
+
logger.debug(f"USING OVERRIDE PARENT: {parent_module} #{parent_id}")
|
|
2038
|
+
else:
|
|
2039
|
+
parent_id = self.plan_id
|
|
2040
|
+
parent_module = self.parent_module
|
|
2041
|
+
|
|
2042
|
+
# Update all fields (copying from ScannerIntegration but with override parent)
|
|
2043
|
+
issue.parentId = parent_id
|
|
2044
|
+
issue.parentModule = parent_module
|
|
2045
|
+
issue.vulnerabilityId = finding.vulnerability_id
|
|
2046
|
+
issue.title = issue_title
|
|
2047
|
+
issue.dateCreated = finding.date_created
|
|
2048
|
+
issue.status = issue_status
|
|
2049
|
+
issue.dateCompleted = (
|
|
2050
|
+
self.get_date_completed(finding, issue_status)
|
|
2051
|
+
if issue_status == regscale_models.IssueStatus.Closed
|
|
2052
|
+
else None
|
|
2053
|
+
)
|
|
2054
|
+
issue.severityLevel = finding.severity
|
|
2055
|
+
issue.issueOwnerId = self.assessor_id
|
|
2056
|
+
issue.securityPlanId = self.plan_id if not self.is_component else None
|
|
2057
|
+
issue.identification = finding.identification
|
|
2058
|
+
issue.dateFirstDetected = finding.first_seen
|
|
2059
|
+
|
|
2060
|
+
# Ensure a due date is always set using configured policy defaults (e.g., FedRAMP)
|
|
2061
|
+
if not finding.due_date:
|
|
2062
|
+
try:
|
|
2063
|
+
base_created = finding.date_created or issue.dateCreated
|
|
2064
|
+
finding.due_date = issue_due_date(
|
|
2065
|
+
severity=finding.severity,
|
|
2066
|
+
created_date=base_created,
|
|
2067
|
+
title=self.title,
|
|
2068
|
+
)
|
|
2069
|
+
except Exception:
|
|
2070
|
+
# Final fallback to a Low severity default if anything goes wrong
|
|
2071
|
+
base_created = finding.date_created or issue.dateCreated
|
|
2072
|
+
finding.due_date = issue_due_date(
|
|
2073
|
+
severity=regscale_models.IssueSeverity.Low,
|
|
2074
|
+
created_date=base_created,
|
|
2075
|
+
title=self.title,
|
|
2076
|
+
)
|
|
2077
|
+
issue.dueDate = finding.due_date
|
|
2078
|
+
issue.description = description
|
|
2079
|
+
issue.sourceReport = finding.source_report or self.title
|
|
2080
|
+
issue.recommendedActions = finding.recommendation_for_mitigation
|
|
2081
|
+
issue.assetIdentifier = asset_identifier
|
|
2082
|
+
issue.securityChecks = finding.security_check or finding.external_id
|
|
2083
|
+
issue.remediationDescription = remediation_description
|
|
2084
|
+
issue.integrationFindingId = self.get_finding_identifier(finding)
|
|
2085
|
+
issue.poamComments = finding.poam_comments
|
|
2086
|
+
issue.cve = finding.cve
|
|
2087
|
+
|
|
2088
|
+
# CRITICAL: Set assessmentId (this is the key fix)
|
|
2089
|
+
issue.assessmentId = finding.assessment_id
|
|
2090
|
+
logger.debug(f"SETTING assessmentId = {finding.assessment_id} with parent = {parent_module} #{parent_id}")
|
|
2091
|
+
|
|
2092
|
+
control_id = self.get_control_implementation_id_for_cci(finding.cci_ref) if finding.cci_ref else None
|
|
2093
|
+
issue.controlId = control_id
|
|
2094
|
+
|
|
2095
|
+
# Add the control implementation ids and the cci ref if it exists
|
|
2096
|
+
cci_control_ids = [control_id] if control_id is not None else []
|
|
2097
|
+
if finding.affected_controls:
|
|
1628
2098
|
issue.affectedControls = finding.affected_controls
|
|
1629
|
-
elif
|
|
1630
|
-
issue.affectedControls = ",".join(finding.control_labels)
|
|
2099
|
+
elif finding.control_labels:
|
|
2100
|
+
issue.affectedControls = ", ".join(sorted({cl for cl in finding.control_labels if cl}))
|
|
2101
|
+
|
|
2102
|
+
issue.controlImplementationIds = list(set(finding._control_implementation_ids + cci_control_ids)) # noqa
|
|
2103
|
+
issue.isPoam = is_poam
|
|
2104
|
+
issue.basisForAdjustment = (
|
|
2105
|
+
finding.basis_for_adjustment if finding.basis_for_adjustment else f"{self.title} import"
|
|
2106
|
+
)
|
|
2107
|
+
issue.pluginId = finding.plugin_id
|
|
2108
|
+
issue.originalRiskRating = regscale_models.Issue.assign_risk_rating(finding.severity)
|
|
2109
|
+
issue.changes = "<p>Current: {}</p><p>Planned: {}</p>".format(
|
|
2110
|
+
finding.milestone_changes, finding.planned_milestone_changes
|
|
2111
|
+
)
|
|
2112
|
+
issue.adjustedRiskRating = finding.adjusted_risk_rating
|
|
2113
|
+
issue.riskAdjustment = finding.risk_adjustment
|
|
2114
|
+
issue.operationalRequirement = finding.operational_requirements
|
|
2115
|
+
issue.deviationRationale = finding.deviation_rationale
|
|
2116
|
+
issue.dateLastUpdated = get_current_datetime()
|
|
2117
|
+
issue.affectedControls = finding.affected_controls
|
|
2118
|
+
|
|
2119
|
+
if finding.cve:
|
|
2120
|
+
issue = self.lookup_kev_and_update_issue(cve=finding.cve, issue=issue, cisa_kevs=self._kev_data)
|
|
2121
|
+
|
|
2122
|
+
if existing_issue:
|
|
2123
|
+
logger.debug(f"Saving existing issue {issue.id} with assessmentId={issue.assessmentId}")
|
|
2124
|
+
issue.save(bulk=True)
|
|
2125
|
+
else:
|
|
2126
|
+
logger.info(f"Creating new issue with assessmentId={issue.assessmentId}")
|
|
2127
|
+
issue = issue.create_or_update(
|
|
2128
|
+
bulk_update=True, defaults={"otherIdentifier": self._get_other_identifier(finding, is_poam)}
|
|
2129
|
+
)
|
|
2130
|
+
if issue and issue.id:
|
|
2131
|
+
logger.debug(f"Issue created with ID: {issue.id}")
|
|
2132
|
+
self.extra_data_to_properties(finding, issue.id)
|
|
2133
|
+
else:
|
|
2134
|
+
logger.error(f" Issue creation failed - no ID returned for finding {finding.external_id}")
|
|
2135
|
+
return None
|
|
2136
|
+
|
|
2137
|
+
# Only create milestones if issue has an ID
|
|
2138
|
+
if issue and issue.id:
|
|
2139
|
+
# Check if existing issue needs initial milestone creation
|
|
2140
|
+
if existing_issue and ScannerVariables.useMilestones:
|
|
2141
|
+
self._ensure_issue_has_milestone(issue, finding)
|
|
2142
|
+
|
|
2143
|
+
self._handle_property_and_milestone_creation(issue, finding, existing_issue)
|
|
2144
|
+
else:
|
|
2145
|
+
logger.debug("Skipping milestone creation - issue has no ID")
|
|
2146
|
+
|
|
2147
|
+
return issue
|
|
1631
2148
|
|
|
1632
|
-
def
|
|
2149
|
+
def _populate_compliance_fields_on_finding(self, finding: IntegrationFinding) -> None:
|
|
1633
2150
|
"""
|
|
1634
|
-
|
|
2151
|
+
Pre-populate compliance-specific fields on the finding before issue creation.
|
|
1635
2152
|
|
|
1636
|
-
|
|
2153
|
+
This ensures controlId and assessmentId are set on the finding object
|
|
2154
|
+
so the parent class can save the issue with all fields in one operation.
|
|
1637
2155
|
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
2156
|
+
The parent class expects:
|
|
2157
|
+
- finding.assessment_id -> issue.assessmentId
|
|
2158
|
+
- finding.cci_ref -> calls get_control_implementation_id_for_cci() -> issue.controlId
|
|
2159
|
+
|
|
2160
|
+
:param finding: Finding to populate with compliance fields
|
|
1642
2161
|
"""
|
|
1643
|
-
delimiter = "\n"
|
|
1644
|
-
identifiers: set[str] = set()
|
|
1645
|
-
# Collect identifiers from all failed items matching this control
|
|
1646
2162
|
try:
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
2163
|
+
# Set compliance fields on the finding itself before issue creation
|
|
2164
|
+
if hasattr(finding, "rule_id") and finding.rule_id:
|
|
2165
|
+
control_id = self._normalize_control_id_string(finding.rule_id)
|
|
2166
|
+
if control_id:
|
|
2167
|
+
|
|
2168
|
+
# Get control implementation ID
|
|
2169
|
+
impl_id = self._issue_field_setter._get_or_find_implementation_id(control_id)
|
|
2170
|
+
if impl_id:
|
|
2171
|
+
# Store the control ID as cci_ref so parent class calls our override method
|
|
2172
|
+
finding.cci_ref = control_id
|
|
2173
|
+
# Cache the implementation ID for our override method
|
|
2174
|
+
finding._wiz_control_implementation_id = impl_id
|
|
2175
|
+
|
|
2176
|
+
# Get assessment ID and set it on the finding (parent class uses this directly)
|
|
2177
|
+
assess_id = self._issue_field_setter._get_or_find_assessment_id(impl_id)
|
|
2178
|
+
if assess_id:
|
|
2179
|
+
finding.assessment_id = assess_id
|
|
1656
2180
|
except Exception:
|
|
1657
2181
|
pass
|
|
1658
|
-
# Merge with existing identifiers and current finding
|
|
1659
|
-
if issue.assetIdentifier:
|
|
1660
|
-
identifiers |= {e for e in (issue.assetIdentifier or "").split(delimiter) if e}
|
|
1661
|
-
if finding.asset_identifier:
|
|
1662
|
-
identifiers.add(finding.asset_identifier)
|
|
1663
|
-
return delimiter.join(sorted(identifiers))
|
|
1664
2182
|
|
|
1665
|
-
def
|
|
2183
|
+
def _ensure_issue_has_milestone(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
1666
2184
|
"""
|
|
1667
|
-
|
|
2185
|
+
Ensure that an existing issue has at least one milestone.
|
|
1668
2186
|
|
|
1669
|
-
|
|
1670
|
-
|
|
2187
|
+
This method checks if an existing issue has any milestones, and if not,
|
|
2188
|
+
creates an initial "Issue created" milestone. This handles cases where
|
|
2189
|
+
issues were created before milestone tracking was enabled, or were
|
|
2190
|
+
created through other means without milestones.
|
|
2191
|
+
|
|
2192
|
+
:param issue: The existing issue to check for milestones
|
|
2193
|
+
:param finding: The finding data
|
|
1671
2194
|
:return: None
|
|
1672
|
-
:rtype: None
|
|
1673
2195
|
"""
|
|
1674
2196
|
try:
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
2197
|
+
# Check if the issue already has milestones
|
|
2198
|
+
# We need to make a direct API call because the Milestone model's endpoint configuration
|
|
2199
|
+
# doesn't include the module parameter that the API expects
|
|
2200
|
+
from regscale.models.regscale_models.milestone import Milestone
|
|
2201
|
+
|
|
2202
|
+
try:
|
|
2203
|
+
existing_milestones = Milestone.get_all_by_parent(parent_id=issue.id, parent_module="issues")
|
|
2204
|
+
logger.debug(f"Fetched {len(existing_milestones)} existing milestones for issue {issue.id}")
|
|
2205
|
+
except Exception as api_error:
|
|
2206
|
+
# If the API call fails, log it and assume no milestones exist
|
|
2207
|
+
logger.debug(f"Could not fetch existing milestones for issue {issue.id}: {api_error}")
|
|
2208
|
+
existing_milestones = []
|
|
2209
|
+
|
|
2210
|
+
if not existing_milestones:
|
|
2211
|
+
# Create an initial milestone for the existing issue
|
|
2212
|
+
logger.debug(f"Creating initial milestone for existing issue {issue.id} that had no milestones")
|
|
2213
|
+
|
|
2214
|
+
# Use the issue's dateCreated if available, otherwise use current date
|
|
2215
|
+
if hasattr(issue, "dateCreated") and issue.dateCreated:
|
|
2216
|
+
# Convert to string if it's a datetime object (e.g., in tests)
|
|
2217
|
+
if hasattr(issue.dateCreated, "isoformat"):
|
|
2218
|
+
milestone_date = issue.dateCreated.isoformat()
|
|
2219
|
+
else:
|
|
2220
|
+
milestone_date = issue.dateCreated
|
|
2221
|
+
else:
|
|
2222
|
+
milestone_date = get_current_datetime()
|
|
2223
|
+
|
|
2224
|
+
regscale_models.Milestone(
|
|
2225
|
+
title=f"Issue created by {self.title}",
|
|
2226
|
+
milestoneDate=milestone_date,
|
|
2227
|
+
responsiblePersonId=self.assessor_id,
|
|
2228
|
+
parentID=issue.id,
|
|
2229
|
+
parentModule=regscale_models.Issue.get_module_slug(),
|
|
2230
|
+
).create()
|
|
2231
|
+
|
|
2232
|
+
logger.debug(f"Created initial milestone for existing issue {issue.id}")
|
|
2233
|
+
except Exception as e:
|
|
2234
|
+
logger.warning(f"Could not check/create milestone for issue {issue.id}: {e}")
|
|
2235
|
+
|
|
2236
|
+
def _enhance_issue_with_compliance_fields(self, issue: regscale_models.Issue, finding: IntegrationFinding) -> None:
|
|
2237
|
+
"""
|
|
2238
|
+
Enhance an issue with compliance-specific fields (controlId and assessmentId).
|
|
2239
|
+
|
|
2240
|
+
NOTE: This method is now primarily for the existing issue update path.
|
|
2241
|
+
New issues should have fields set via _populate_compliance_fields_on_finding.
|
|
2242
|
+
|
|
2243
|
+
:param issue: Issue object to enhance
|
|
2244
|
+
:param finding: Finding with control data
|
|
2245
|
+
"""
|
|
2246
|
+
try:
|
|
2247
|
+
# Set control implementation and assessment IDs using our field setter
|
|
2248
|
+
if hasattr(finding, "rule_id") and finding.rule_id:
|
|
2249
|
+
control_id = self._normalize_control_id_string(finding.rule_id)
|
|
2250
|
+
if control_id:
|
|
2251
|
+
result = self._issue_field_setter.set_control_and_assessment_ids(issue, control_id)
|
|
2252
|
+
if not result.success:
|
|
2253
|
+
logger.warning(f"Failed to set compliance fields for '{control_id}': {result.error_message}")
|
|
1687
2254
|
except Exception:
|
|
1688
2255
|
pass
|
|
1689
2256
|
|
|
1690
|
-
def
|
|
2257
|
+
def get_control_implementation_id_for_cci(self, cci: Optional[str]) -> Optional[int]:
|
|
2258
|
+
"""
|
|
2259
|
+
Override parent method to return control implementation ID for Wiz control IDs.
|
|
2260
|
+
|
|
2261
|
+
The parent class calls this method when finding.cci_ref is set, and uses the
|
|
2262
|
+
returned value to set issue.controlId. We store our control implementation
|
|
2263
|
+
ID on the finding and return it here.
|
|
2264
|
+
|
|
2265
|
+
:param cci: Control identifier (e.g., 'AC-2(1)') stored in finding.cci_ref
|
|
2266
|
+
:return: Control implementation ID if found, None otherwise
|
|
1691
2267
|
"""
|
|
1692
|
-
|
|
2268
|
+
# Check if this is a call with our cached implementation ID on the current finding
|
|
2269
|
+
if hasattr(self, "_current_finding_context"):
|
|
2270
|
+
finding = self._current_finding_context
|
|
2271
|
+
if (
|
|
2272
|
+
hasattr(finding, "_wiz_control_implementation_id")
|
|
2273
|
+
and hasattr(finding, "cci_ref")
|
|
2274
|
+
and finding.cci_ref == cci
|
|
2275
|
+
):
|
|
2276
|
+
impl_id = finding._wiz_control_implementation_id
|
|
2277
|
+
return impl_id
|
|
2278
|
+
|
|
2279
|
+
# Fallback: try to look it up directly (for edge cases)
|
|
2280
|
+
if cci:
|
|
2281
|
+
control_id = self._normalize_control_id_string(cci)
|
|
2282
|
+
if control_id:
|
|
2283
|
+
impl_id = self._issue_field_setter._get_or_find_implementation_id(control_id)
|
|
2284
|
+
if impl_id:
|
|
2285
|
+
return impl_id
|
|
2286
|
+
|
|
2287
|
+
# Final fallback to parent class behavior
|
|
2288
|
+
return super().get_control_implementation_id_for_cci(cci)
|
|
2289
|
+
|
|
2290
|
+
def _populate_control_implementation_cache(self) -> None:
|
|
2291
|
+
"""
|
|
2292
|
+
Pre-populate the control implementation and assessment caches.
|
|
2293
|
+
|
|
2294
|
+
CRITICAL: This ensures controlId and assessmentId can be reliably set on issues.
|
|
2295
|
+
This method loads control implementations and their associated assessments into
|
|
2296
|
+
cache to enable fast lookups during issue processing.
|
|
1693
2297
|
|
|
1694
|
-
:param regscale_models.Issue issue: Issue to reparent
|
|
1695
|
-
:param IntegrationFinding finding: Finding with asset identifier
|
|
1696
2298
|
:return: None
|
|
1697
2299
|
:rtype: None
|
|
1698
2300
|
"""
|
|
1699
2301
|
try:
|
|
1700
|
-
|
|
1701
|
-
if not asset:
|
|
1702
|
-
asset = self._ensure_asset_for_finding(finding)
|
|
1703
|
-
if asset and getattr(asset, "id", None):
|
|
1704
|
-
issue.parentId = asset.id
|
|
1705
|
-
issue.parentModule = "assets"
|
|
1706
|
-
except Exception:
|
|
1707
|
-
# If asset lookup fails, keep existing parent
|
|
1708
|
-
pass
|
|
2302
|
+
from regscale.models import regscale_models
|
|
1709
2303
|
|
|
1710
|
-
|
|
2304
|
+
logger.info("Pre-populating control implementation cache for issue processing...")
|
|
2305
|
+
|
|
2306
|
+
# Get all control implementations for this plan
|
|
2307
|
+
implementations = regscale_models.ControlImplementation.get_all_by_parent(
|
|
2308
|
+
parent_id=self.plan_id, parent_module=self.parent_module
|
|
2309
|
+
)
|
|
2310
|
+
|
|
2311
|
+
if not implementations:
|
|
2312
|
+
logger.warning("No control implementations found for this plan")
|
|
2313
|
+
return
|
|
2314
|
+
|
|
2315
|
+
logger.info(f"Found {len(implementations)} control implementations to cache")
|
|
2316
|
+
|
|
2317
|
+
# Cache SecurityControl lookups to avoid repeated API calls
|
|
2318
|
+
security_control_cache = {}
|
|
2319
|
+
controls_mapped = 0
|
|
2320
|
+
assessments_mapped = 0
|
|
2321
|
+
|
|
2322
|
+
for impl in implementations:
|
|
2323
|
+
try:
|
|
2324
|
+
# Skip if no controlID reference
|
|
2325
|
+
if not hasattr(impl, "controlID") or not impl.controlID:
|
|
2326
|
+
continue
|
|
2327
|
+
|
|
2328
|
+
# Get or cache the security control
|
|
2329
|
+
if impl.controlID not in security_control_cache:
|
|
2330
|
+
security_control = regscale_models.SecurityControl.get_object(object_id=impl.controlID)
|
|
2331
|
+
security_control_cache[impl.controlID] = security_control
|
|
2332
|
+
else:
|
|
2333
|
+
security_control = security_control_cache[impl.controlID]
|
|
2334
|
+
|
|
2335
|
+
if security_control and hasattr(security_control, "controlId"):
|
|
2336
|
+
# Normalize and cache the control ID mapping
|
|
2337
|
+
normalized_id = self._normalize_control_id_string(security_control.controlId)
|
|
2338
|
+
if normalized_id:
|
|
2339
|
+
self._impl_id_by_control[normalized_id] = impl.id
|
|
2340
|
+
controls_mapped += 1
|
|
2341
|
+
|
|
2342
|
+
# Also try to cache the most recent assessment
|
|
2343
|
+
try:
|
|
2344
|
+
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2345
|
+
parent_id=impl.id, parent_module="controls"
|
|
2346
|
+
)
|
|
2347
|
+
if assessments:
|
|
2348
|
+
# Get the most recent assessment
|
|
2349
|
+
assessments.sort(key=lambda a: a.id if hasattr(a, "id") else 0, reverse=True)
|
|
2350
|
+
self._assessment_by_impl_today[impl.id] = assessments[0]
|
|
2351
|
+
assessments_mapped += 1
|
|
2352
|
+
except Exception:
|
|
2353
|
+
pass
|
|
2354
|
+
|
|
2355
|
+
except Exception:
|
|
2356
|
+
continue
|
|
2357
|
+
|
|
2358
|
+
logger.info("Control implementation cache populated:")
|
|
2359
|
+
logger.info(f" - {controls_mapped} control ID mappings")
|
|
2360
|
+
logger.info(f" - {assessments_mapped} assessment mappings")
|
|
2361
|
+
|
|
2362
|
+
except Exception as e:
|
|
2363
|
+
logger.error(f"Error populating control implementation cache: {e}")
|
|
2364
|
+
|
|
2365
|
+
def _refresh_assessment_cache_after_creation(self) -> None:
|
|
1711
2366
|
"""
|
|
1712
|
-
|
|
2367
|
+
Refresh the assessment cache after control assessments have been created.
|
|
1713
2368
|
|
|
1714
|
-
:
|
|
2369
|
+
CRITICAL: This ensures that newly created assessments from the sync_control_assessments
|
|
2370
|
+
step are available when processing issues. Without this, assessmentId will not be set
|
|
2371
|
+
on issues because the cache only contains old assessments.
|
|
2372
|
+
|
|
2373
|
+
:return: None
|
|
2374
|
+
:rtype: None
|
|
1715
2375
|
"""
|
|
1716
2376
|
try:
|
|
1717
|
-
from regscale.
|
|
2377
|
+
from regscale.models import regscale_models
|
|
2378
|
+
from datetime import datetime
|
|
2379
|
+
|
|
2380
|
+
logger.info("Refreshing assessment cache with newly created assessments...")
|
|
1718
2381
|
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
severity_counts = {"Critical": 0, "High": 0, "Moderate": 0, "Low": 0}
|
|
2382
|
+
refreshed_count = 0
|
|
2383
|
+
today = datetime.now().date()
|
|
1722
2384
|
|
|
1723
|
-
for
|
|
2385
|
+
# Only refresh assessments for implementations we know about
|
|
2386
|
+
for control_id, impl_id in self._impl_id_by_control.items():
|
|
1724
2387
|
try:
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
2388
|
+
# Get all assessments for this implementation
|
|
2389
|
+
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2390
|
+
parent_id=impl_id, parent_module="controls"
|
|
2391
|
+
)
|
|
2392
|
+
|
|
2393
|
+
if not assessments:
|
|
1728
2394
|
continue
|
|
1729
|
-
|
|
1730
|
-
|
|
2395
|
+
|
|
2396
|
+
# Find today's assessment (most recent created today)
|
|
2397
|
+
today_assessments = []
|
|
2398
|
+
for assessment in assessments:
|
|
2399
|
+
assessment_date = None
|
|
2400
|
+
try:
|
|
2401
|
+
# Try to get assessment date from various fields
|
|
2402
|
+
date_fields = ["actualFinish", "plannedFinish", "dateCreated"]
|
|
2403
|
+
for field in date_fields:
|
|
2404
|
+
if hasattr(assessment, field) and getattr(assessment, field):
|
|
2405
|
+
date_value = getattr(assessment, field)
|
|
2406
|
+
if isinstance(date_value, str):
|
|
2407
|
+
from regscale.core.app.utils.app_utils import regscale_string_to_datetime
|
|
2408
|
+
|
|
2409
|
+
assessment_date = regscale_string_to_datetime(date_value).date()
|
|
2410
|
+
elif hasattr(date_value, "date"):
|
|
2411
|
+
assessment_date = date_value.date()
|
|
2412
|
+
else:
|
|
2413
|
+
assessment_date = date_value
|
|
2414
|
+
break
|
|
2415
|
+
|
|
2416
|
+
if assessment_date == today:
|
|
2417
|
+
today_assessments.append(assessment)
|
|
2418
|
+
except Exception:
|
|
2419
|
+
continue
|
|
2420
|
+
|
|
2421
|
+
# Use most recent today's assessment, or fallback to most recent overall
|
|
2422
|
+
if today_assessments:
|
|
2423
|
+
best_assessment = max(today_assessments, key=lambda a: getattr(a, "id", 0))
|
|
2424
|
+
else:
|
|
2425
|
+
best_assessment = max(assessments, key=lambda a: getattr(a, "id", 0))
|
|
2426
|
+
|
|
2427
|
+
# Update the cache
|
|
2428
|
+
self._assessment_by_impl_today[impl_id] = best_assessment
|
|
2429
|
+
refreshed_count += 1
|
|
2430
|
+
|
|
2431
|
+
except Exception:
|
|
2432
|
+
continue
|
|
2433
|
+
|
|
2434
|
+
logger.info(f"Assessment cache refreshed: {refreshed_count} assessments updated")
|
|
2435
|
+
|
|
2436
|
+
except Exception as e:
|
|
2437
|
+
logger.error(f"Error refreshing assessment cache: {e}")
|
|
2438
|
+
|
|
2439
|
+
def _find_control_implementation_id(self, control_id: str) -> Optional[int]:
|
|
2440
|
+
"""
|
|
2441
|
+
Find control implementation ID by querying the database directly.
|
|
2442
|
+
OPTIMIZED: Uses controlID field directly and caches SecurityControl lookups.
|
|
2443
|
+
|
|
2444
|
+
:param str control_id: Normalized control ID (e.g., 'AC-2(1)')
|
|
2445
|
+
:return: Control implementation ID if found
|
|
2446
|
+
:rtype: Optional[int]
|
|
2447
|
+
"""
|
|
2448
|
+
try:
|
|
2449
|
+
from regscale.models import regscale_models
|
|
2450
|
+
|
|
2451
|
+
# First check cache
|
|
2452
|
+
if hasattr(self, "_impl_id_by_control") and control_id in self._impl_id_by_control:
|
|
2453
|
+
cached_id = self._impl_id_by_control[control_id]
|
|
2454
|
+
return cached_id
|
|
2455
|
+
|
|
2456
|
+
# Get all control implementations for this plan
|
|
2457
|
+
implementations = regscale_models.ControlImplementation.get_all_by_parent(
|
|
2458
|
+
parent_id=self.plan_id, parent_module=self.parent_module
|
|
2459
|
+
)
|
|
2460
|
+
|
|
2461
|
+
# Create a cache for SecurityControl lookups to avoid repeated API calls
|
|
2462
|
+
security_control_cache = {}
|
|
2463
|
+
|
|
2464
|
+
for impl in implementations:
|
|
2465
|
+
try:
|
|
2466
|
+
# Use controlID field which references the SecurityControl
|
|
2467
|
+
if not hasattr(impl, "controlID") or not impl.controlID:
|
|
1731
2468
|
continue
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
elif sev == regscale_models.IssueSeverity.High:
|
|
1738
|
-
severity_counts["High"] += 1
|
|
1739
|
-
elif sev == regscale_models.IssueSeverity.Moderate:
|
|
1740
|
-
severity_counts["Moderate"] += 1
|
|
2469
|
+
|
|
2470
|
+
# Check if we've already looked up this security control
|
|
2471
|
+
if impl.controlID not in security_control_cache:
|
|
2472
|
+
security_control = regscale_models.SecurityControl.get_object(object_id=impl.controlID)
|
|
2473
|
+
security_control_cache[impl.controlID] = security_control
|
|
1741
2474
|
else:
|
|
1742
|
-
|
|
2475
|
+
security_control = security_control_cache[impl.controlID]
|
|
2476
|
+
|
|
2477
|
+
if security_control and hasattr(security_control, "controlId"):
|
|
2478
|
+
impl_control_id = self._normalize_control_id_string(security_control.controlId)
|
|
2479
|
+
|
|
2480
|
+
if impl_control_id == control_id:
|
|
2481
|
+
logger.info(f"Found control implementation {impl.id} for control {control_id}")
|
|
2482
|
+
# Cache it for future lookups
|
|
2483
|
+
if not hasattr(self, "_impl_id_by_control"):
|
|
2484
|
+
self._impl_id_by_control = {}
|
|
2485
|
+
self._impl_id_by_control[control_id] = impl.id
|
|
2486
|
+
return impl.id
|
|
1743
2487
|
except Exception:
|
|
1744
2488
|
continue
|
|
1745
2489
|
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
2490
|
+
logger.warning(
|
|
2491
|
+
f"No control implementation found for control {control_id} among {len(implementations)} implementations"
|
|
2492
|
+
)
|
|
2493
|
+
return None
|
|
2494
|
+
except Exception as e:
|
|
2495
|
+
logger.error(f"Error finding control implementation for {control_id}: {e}")
|
|
2496
|
+
return None
|
|
2497
|
+
|
|
2498
|
+
def _find_assessment_id_for_implementation(self, implementation_id: int) -> Optional[int]:
|
|
2499
|
+
"""
|
|
2500
|
+
Find the most recent assessment ID for a control implementation.
|
|
2501
|
+
IMPROVED: Better date handling and caching.
|
|
2502
|
+
|
|
2503
|
+
:param int implementation_id: Control implementation ID
|
|
2504
|
+
:return: Assessment ID if found
|
|
2505
|
+
:rtype: Optional[int]
|
|
2506
|
+
"""
|
|
2507
|
+
try:
|
|
2508
|
+
from regscale.models import regscale_models
|
|
2509
|
+
from datetime import datetime
|
|
2510
|
+
from regscale.core.app.utils.app_utils import regscale_string_to_datetime
|
|
2511
|
+
|
|
2512
|
+
# Check cache first
|
|
2513
|
+
if hasattr(self, "_assessment_by_impl_today") and implementation_id in self._assessment_by_impl_today:
|
|
2514
|
+
cached_assessment = self._assessment_by_impl_today[implementation_id]
|
|
2515
|
+
if cached_assessment and hasattr(cached_assessment, "id"):
|
|
2516
|
+
logger.debug(
|
|
2517
|
+
f"Found cached assessment {cached_assessment.id} for implementation {implementation_id}"
|
|
2518
|
+
)
|
|
2519
|
+
return cached_assessment.id
|
|
2520
|
+
|
|
2521
|
+
# Get assessments for this control implementation
|
|
2522
|
+
assessments = regscale_models.Assessment.get_all_by_parent(
|
|
2523
|
+
parent_id=implementation_id, parent_module="controls"
|
|
2524
|
+
)
|
|
2525
|
+
|
|
2526
|
+
if not assessments:
|
|
2527
|
+
logger.warning(f"No assessments found for control implementation {implementation_id}")
|
|
2528
|
+
return None
|
|
2529
|
+
|
|
2530
|
+
# Find the most recent assessment (preferably from today)
|
|
2531
|
+
today = datetime.now().date()
|
|
2532
|
+
today_assessments = []
|
|
2533
|
+
recent_assessments = []
|
|
2534
|
+
|
|
2535
|
+
for assessment in assessments:
|
|
2536
|
+
try:
|
|
2537
|
+
assessment_date = None
|
|
2538
|
+
|
|
2539
|
+
# Try multiple date fields in order of preference
|
|
2540
|
+
date_fields = ["plannedStart", "actualFinish", "plannedFinish", "dateCreated"]
|
|
2541
|
+
for field in date_fields:
|
|
2542
|
+
if hasattr(assessment, field) and getattr(assessment, field):
|
|
2543
|
+
date_value = getattr(assessment, field)
|
|
2544
|
+
if isinstance(date_value, str):
|
|
2545
|
+
assessment_date = regscale_string_to_datetime(date_value).date()
|
|
2546
|
+
elif hasattr(date_value, "date"):
|
|
2547
|
+
assessment_date = date_value.date()
|
|
2548
|
+
else:
|
|
2549
|
+
assessment_date = date_value
|
|
2550
|
+
break
|
|
2551
|
+
|
|
2552
|
+
if assessment_date:
|
|
2553
|
+
if assessment_date == today:
|
|
2554
|
+
today_assessments.append(assessment)
|
|
2555
|
+
else:
|
|
2556
|
+
recent_assessments.append((assessment, assessment_date))
|
|
2557
|
+
else:
|
|
2558
|
+
# Assessment with no parseable date
|
|
2559
|
+
recent_assessments.append((assessment, None))
|
|
2560
|
+
except Exception:
|
|
2561
|
+
recent_assessments.append((assessment, None))
|
|
2562
|
+
|
|
2563
|
+
# Prefer today's assessments
|
|
2564
|
+
if today_assessments:
|
|
2565
|
+
# Sort by ID (highest/newest first) if multiple today
|
|
2566
|
+
today_assessments.sort(key=lambda a: a.id if hasattr(a, "id") else 0, reverse=True)
|
|
2567
|
+
assessment = today_assessments[0]
|
|
2568
|
+
logger.info(f"Found today's assessment {assessment.id} for control implementation {implementation_id}")
|
|
2569
|
+
# Cache it for future lookups
|
|
2570
|
+
if not hasattr(self, "_assessment_by_impl_today"):
|
|
2571
|
+
self._assessment_by_impl_today = {}
|
|
2572
|
+
self._assessment_by_impl_today[implementation_id] = assessment
|
|
2573
|
+
return assessment.id
|
|
2574
|
+
|
|
2575
|
+
# Fall back to most recent assessment
|
|
2576
|
+
if recent_assessments:
|
|
2577
|
+
# Sort by date (newest first), handling None dates
|
|
2578
|
+
recent_assessments.sort(
|
|
2579
|
+
key=lambda x: (x[1] if x[1] else datetime.min.date(), x[0].id if hasattr(x[0], "id") else 0),
|
|
2580
|
+
reverse=True,
|
|
2581
|
+
)
|
|
2582
|
+
assessment = recent_assessments[0][0]
|
|
2583
|
+
logger.info(f"Found recent assessment {assessment.id} for control implementation {implementation_id}")
|
|
2584
|
+
# Cache it even if not today's
|
|
2585
|
+
if not hasattr(self, "_assessment_by_impl_today"):
|
|
2586
|
+
self._assessment_by_impl_today = {}
|
|
2587
|
+
self._assessment_by_impl_today[implementation_id] = assessment
|
|
2588
|
+
return assessment.id
|
|
2589
|
+
|
|
2590
|
+
logger.warning(f"No usable assessments found for control implementation {implementation_id}")
|
|
2591
|
+
return None
|
|
2592
|
+
except Exception as e:
|
|
2593
|
+
logger.error(f"Error finding assessment for control implementation {implementation_id}: {e}")
|
|
2594
|
+
return None
|
|
2595
|
+
|
|
2596
|
+
def _reparent_issue_to_asset(self, issue: regscale_models.Issue) -> None:
|
|
2597
|
+
"""
|
|
2598
|
+
Reparent issue to the control implementation instead of the security plan.
|
|
2599
|
+
This ensures issues are properly associated with their control implementations.
|
|
2600
|
+
|
|
2601
|
+
:param regscale_models.Issue issue: Issue to reparent to control implementation
|
|
2602
|
+
:param IntegrationFinding finding: Finding with control information
|
|
2603
|
+
:return: None
|
|
2604
|
+
:rtype: None
|
|
2605
|
+
"""
|
|
2606
|
+
# If we have a control implementation ID, parent the issue to it
|
|
2607
|
+
if issue.controlId:
|
|
2608
|
+
issue.parentId = issue.controlId
|
|
2609
|
+
issue.parentModule = "controls"
|
|
2610
|
+
else:
|
|
2611
|
+
# Fall back to security plan if no control implementation found
|
|
2612
|
+
pass
|
|
2613
|
+
|
|
2614
|
+
def _update_scan_history(self, scan_history: regscale_models.ScanHistory) -> None:
|
|
2615
|
+
"""
|
|
2616
|
+
No scan history updates for compliance report ingest.
|
|
2617
|
+
|
|
2618
|
+
:param regscale_models.ScanHistory scan_history: Scan history record (unused)
|
|
2619
|
+
"""
|
|
2620
|
+
# No scan history for compliance report ingest
|
|
2621
|
+
pass
|
|
2622
|
+
|
|
2623
|
+
def _process_control_assessments(self) -> None:
|
|
2624
|
+
"""
|
|
2625
|
+
Process control assessments only for controls that have validated compliance items
|
|
2626
|
+
with existing assets in RegScale. This ensures we don't create assessments for
|
|
2627
|
+
controls that have no assets in our boundary.
|
|
2628
|
+
"""
|
|
2629
|
+
logger.info("Starting control assessment processing for Wiz compliance integration")
|
|
2630
|
+
|
|
2631
|
+
# Ensure existing records cache is loaded
|
|
2632
|
+
self._load_existing_records_cache()
|
|
2633
|
+
|
|
2634
|
+
implementations = self._get_control_implementations()
|
|
2635
|
+
if not implementations:
|
|
2636
|
+
logger.warning("No control implementations found for assessment processing")
|
|
2637
|
+
return
|
|
1751
2638
|
|
|
1752
|
-
|
|
1753
|
-
|
|
2639
|
+
# Get all potential control IDs from compliance data
|
|
2640
|
+
all_potential_controls = set(self.passing_controls.keys()) | set(self.failing_controls.keys())
|
|
2641
|
+
logger.debug(
|
|
2642
|
+
f"Found {len(all_potential_controls)} potential controls from compliance data: {sorted(all_potential_controls)}"
|
|
2643
|
+
)
|
|
2644
|
+
|
|
2645
|
+
# Validate each control has actual assets in our boundary before processing
|
|
2646
|
+
validated_controls_with_assets = {}
|
|
2647
|
+
validated_passing_controls = {}
|
|
2648
|
+
validated_failing_controls = {}
|
|
2649
|
+
|
|
2650
|
+
for control_id in all_potential_controls:
|
|
2651
|
+
# Get all compliance items for this control
|
|
2652
|
+
control_items = self._get_validated_control_compliance_items(control_id)
|
|
2653
|
+
|
|
2654
|
+
if not control_items:
|
|
2655
|
+
continue
|
|
2656
|
+
|
|
2657
|
+
# Check if we have any assets for the compliance items
|
|
2658
|
+
asset_identifiers = set()
|
|
2659
|
+
assets_found = 0
|
|
2660
|
+
|
|
2661
|
+
for item in control_items:
|
|
2662
|
+
if hasattr(item, "resource_name") and item.resource_name:
|
|
2663
|
+
resource_id = getattr(item, "resource_id", "")
|
|
2664
|
+
# Verify the asset actually exists in RegScale
|
|
2665
|
+
if self._asset_exists_in_regscale(resource_id):
|
|
2666
|
+
asset_identifiers.add(item.resource_name)
|
|
2667
|
+
assets_found += 1
|
|
2668
|
+
else:
|
|
2669
|
+
logger.debug(
|
|
2670
|
+
f"Control {control_id}: Asset {resource_id} ({item.resource_name}) not found in RegScale"
|
|
2671
|
+
)
|
|
2672
|
+
logger.debug(f"Found {assets_found} valid assets for control {control_id}")
|
|
2673
|
+
if not asset_identifiers:
|
|
2674
|
+
continue
|
|
2675
|
+
|
|
2676
|
+
# This control has valid assets, include it in processing
|
|
2677
|
+
validated_controls_with_assets[control_id] = list(asset_identifiers)
|
|
2678
|
+
|
|
2679
|
+
# Preserve the pass/fail status for validated controls
|
|
2680
|
+
if control_id in self.failing_controls:
|
|
2681
|
+
validated_failing_controls[control_id] = self.failing_controls[control_id]
|
|
2682
|
+
elif control_id in self.passing_controls:
|
|
2683
|
+
validated_passing_controls[control_id] = self.passing_controls[control_id]
|
|
2684
|
+
|
|
2685
|
+
if not validated_controls_with_assets:
|
|
2686
|
+
logger.warning(" No controls have assets in RegScale boundary - no control assessments will be created")
|
|
2687
|
+
logger.info("SUMMARY: 0 control assessments created (no assets exist in RegScale)")
|
|
2688
|
+
return
|
|
2689
|
+
|
|
2690
|
+
assessments_created = 0
|
|
2691
|
+
processed_impl_today: set[int] = set()
|
|
2692
|
+
|
|
2693
|
+
# Only process validated controls that have assets in our boundary
|
|
2694
|
+
for control_id in validated_controls_with_assets.keys():
|
|
2695
|
+
created = self._process_single_control_assessment(
|
|
2696
|
+
control_id=control_id,
|
|
2697
|
+
implementations=implementations,
|
|
2698
|
+
processed_impl_today=processed_impl_today,
|
|
2699
|
+
)
|
|
2700
|
+
assessments_created += created
|
|
2701
|
+
|
|
2702
|
+
# Calculate stats only for validated controls
|
|
2703
|
+
validated_control_ids = set(validated_controls_with_assets.keys())
|
|
2704
|
+
passing_assessments = len([cid for cid in validated_control_ids if cid not in validated_failing_controls])
|
|
2705
|
+
failing_assessments = len([cid for cid in validated_control_ids if cid in validated_failing_controls])
|
|
2706
|
+
|
|
2707
|
+
if assessments_created > 0:
|
|
1754
2708
|
logger.info(
|
|
1755
|
-
"
|
|
1756
|
-
getattr(scan_history, "id", 0),
|
|
1757
|
-
severity_counts["Critical"],
|
|
1758
|
-
severity_counts["High"],
|
|
1759
|
-
severity_counts["Moderate"],
|
|
1760
|
-
severity_counts["Low"],
|
|
2709
|
+
f"Created {assessments_created} control assessments: {passing_assessments} passing, {failing_assessments} failing"
|
|
1761
2710
|
)
|
|
2711
|
+
else:
|
|
2712
|
+
logger.warning(
|
|
2713
|
+
f"No control assessments were actually created (0 assessments) despite finding {len(validated_controls_with_assets)} controls with assets"
|
|
2714
|
+
)
|
|
2715
|
+
|
|
2716
|
+
logger.info(
|
|
2717
|
+
f"CONTROL ASSESSMENT SUMMARY: {assessments_created} assessments created for {len(validated_controls_with_assets)} validated controls"
|
|
2718
|
+
)
|
|
2719
|
+
|
|
2720
|
+
def _sync_assessment_cache_from_base_class(self) -> None:
|
|
2721
|
+
"""
|
|
2722
|
+
Sync assessments from base class cache to our control cache.
|
|
2723
|
+
|
|
2724
|
+
This ensures that assessments created by the base class ComplianceIntegration
|
|
2725
|
+
are available to our IssueFieldSetter for linking issues to assessments.
|
|
2726
|
+
"""
|
|
2727
|
+
try:
|
|
2728
|
+
# Copy assessments from base class cache to our cache
|
|
2729
|
+
base_cache = getattr(self, "_assessment_by_impl_today", {})
|
|
2730
|
+
synced_count = 0
|
|
2731
|
+
|
|
2732
|
+
for impl_id, assessment in base_cache.items():
|
|
2733
|
+
self._control_cache.set_assessment(impl_id, assessment)
|
|
2734
|
+
synced_count += 1
|
|
2735
|
+
|
|
2736
|
+
logger.info(f"Synced {synced_count} assessments from base class cache to control cache")
|
|
2737
|
+
|
|
1762
2738
|
except Exception as e:
|
|
1763
|
-
logger.
|
|
2739
|
+
logger.warning(f"Failed to sync assessment cache: {e}")
|
|
2740
|
+
|
|
2741
|
+
def _get_validated_control_compliance_items(self, control_id: str) -> List[ComplianceItem]:
|
|
2742
|
+
"""
|
|
2743
|
+
Get validated compliance items for a specific control.
|
|
2744
|
+
Only returns items that have existing assets in RegScale boundary.
|
|
2745
|
+
|
|
2746
|
+
:param str control_id: Control identifier to filter by
|
|
2747
|
+
:return: List of validated compliance items for the control
|
|
2748
|
+
:rtype: List[ComplianceItem]
|
|
2749
|
+
"""
|
|
2750
|
+
validated_items: List[ComplianceItem] = []
|
|
2751
|
+
|
|
2752
|
+
for item in self.all_compliance_items:
|
|
2753
|
+
# Check if this item matches the control
|
|
2754
|
+
matches_control = False
|
|
2755
|
+
if hasattr(item, "control_ids"):
|
|
2756
|
+
item_control_ids = getattr(item, "control_ids", [])
|
|
2757
|
+
if any(cid.lower() == control_id.lower() for cid in item_control_ids):
|
|
2758
|
+
matches_control = True
|
|
2759
|
+
elif hasattr(item, "control_id") and item.control_id.lower() == control_id.lower():
|
|
2760
|
+
matches_control = True
|
|
2761
|
+
|
|
2762
|
+
if not matches_control:
|
|
2763
|
+
continue
|
|
2764
|
+
|
|
2765
|
+
# Additional validation: ensure the asset exists in RegScale
|
|
2766
|
+
resource_id = getattr(item, "resource_id", "")
|
|
2767
|
+
if resource_id and self._asset_exists_in_regscale(resource_id):
|
|
2768
|
+
validated_items.append(item)
|
|
2769
|
+
else:
|
|
2770
|
+
logger.debug(
|
|
2771
|
+
f"Filtered out compliance item for control {control_id} - asset {resource_id} not in RegScale"
|
|
2772
|
+
)
|
|
2773
|
+
|
|
2774
|
+
return validated_items
|
|
2775
|
+
|
|
2776
|
+
def _get_control_compliance_items(self, control_id: str) -> List[ComplianceItem]:
|
|
2777
|
+
"""
|
|
2778
|
+
Get all compliance items for a specific control.
|
|
2779
|
+
All items have already been filtered to framework-specific items with existing assets.
|
|
2780
|
+
|
|
2781
|
+
:param str control_id: Control identifier to filter by
|
|
2782
|
+
:return: List of compliance items for the control
|
|
2783
|
+
:rtype: List[ComplianceItem]
|
|
2784
|
+
"""
|
|
2785
|
+
items: List[ComplianceItem] = []
|
|
2786
|
+
|
|
2787
|
+
for item in self.all_compliance_items:
|
|
2788
|
+
# Check if this item matches the control
|
|
2789
|
+
matches_control = False
|
|
2790
|
+
if hasattr(item, "control_ids"):
|
|
2791
|
+
item_control_ids = getattr(item, "control_ids", [])
|
|
2792
|
+
if any(cid.lower() == control_id.lower() for cid in item_control_ids):
|
|
2793
|
+
matches_control = True
|
|
2794
|
+
elif hasattr(item, "control_id") and item.control_id.lower() == control_id.lower():
|
|
2795
|
+
matches_control = True
|
|
2796
|
+
|
|
2797
|
+
if matches_control:
|
|
2798
|
+
items.append(item)
|
|
2799
|
+
|
|
2800
|
+
return items
|
|
2801
|
+
|
|
2802
|
+
# flake8: noqa: C901
|
|
2803
|
+
def get_asset_by_identifier(self, identifier: str) -> Optional["regscale_models.Asset"]:
|
|
2804
|
+
"""
|
|
2805
|
+
Override asset lookup for Wiz policy compliance integration.
|
|
2806
|
+
|
|
2807
|
+
For policy compliance, the identifier should be the Wiz resource ID.
|
|
2808
|
+
We'll try multiple lookup strategies to find the corresponding RegScale asset.
|
|
2809
|
+
|
|
2810
|
+
:param str identifier: Asset identifier (should be Wiz resource ID)
|
|
2811
|
+
:return: Asset if found, None otherwise
|
|
2812
|
+
:rtype: Optional[regscale_models.Asset]
|
|
2813
|
+
"""
|
|
2814
|
+
|
|
2815
|
+
# First try the standard lookup by identifier (uses asset_map_by_identifier)
|
|
2816
|
+
asset = super().get_asset_by_identifier(identifier)
|
|
2817
|
+
if asset:
|
|
2818
|
+
return asset
|
|
2819
|
+
|
|
2820
|
+
# If not found, try to find using our cached RegScale assets by Wiz ID
|
|
2821
|
+
try:
|
|
2822
|
+
if hasattr(self, "_regscale_assets_by_wiz_id") and self._regscale_assets_by_wiz_id:
|
|
2823
|
+
# Direct lookup by Wiz ID (most common case)
|
|
2824
|
+
if identifier in self._regscale_assets_by_wiz_id:
|
|
2825
|
+
regscale_asset = self._regscale_assets_by_wiz_id[identifier]
|
|
2826
|
+
return regscale_asset
|
|
2827
|
+
|
|
2828
|
+
# Fallback: check all assets for name/identifier matches
|
|
2829
|
+
for wiz_id, regscale_asset in self._regscale_assets_by_wiz_id.items():
|
|
2830
|
+
# Check if asset name matches the identifier
|
|
2831
|
+
if regscale_asset.name == identifier:
|
|
2832
|
+
return regscale_asset
|
|
2833
|
+
|
|
2834
|
+
# Also check identifier field
|
|
2835
|
+
if hasattr(regscale_asset, "identifier") and regscale_asset.identifier == identifier:
|
|
2836
|
+
return regscale_asset
|
|
2837
|
+
|
|
2838
|
+
# Check other tracking number
|
|
2839
|
+
if (
|
|
2840
|
+
hasattr(regscale_asset, "otherTrackingNumber")
|
|
2841
|
+
and regscale_asset.otherTrackingNumber == identifier
|
|
2842
|
+
):
|
|
2843
|
+
logger.debug(
|
|
2844
|
+
f"Found asset via otherTrackingNumber match: {regscale_asset.name} (Wiz ID: {wiz_id})"
|
|
2845
|
+
)
|
|
2846
|
+
return regscale_asset
|
|
2847
|
+
|
|
2848
|
+
except Exception:
|
|
2849
|
+
pass
|
|
2850
|
+
|
|
2851
|
+
# Asset not found
|
|
2852
|
+
return None
|
|
2853
|
+
|
|
2854
|
+
def _ensure_asset_for_finding(self, finding: IntegrationFinding) -> Optional["regscale_models.Asset"]:
|
|
2855
|
+
"""
|
|
2856
|
+
Override asset creation for Wiz policy compliance integration.
|
|
2857
|
+
|
|
2858
|
+
We don't create assets in policy compliance integration - they come from
|
|
2859
|
+
separate Wiz inventory import. If an asset isn't found, we skip the finding.
|
|
2860
|
+
|
|
2861
|
+
:param IntegrationFinding finding: Finding that needs an asset
|
|
2862
|
+
:return: None (we don't create assets)
|
|
2863
|
+
:rtype: Optional[regscale_models.Asset]
|
|
2864
|
+
"""
|
|
2865
|
+
return None
|
|
2866
|
+
|
|
2867
|
+
def _process_consolidated_issues(self, findings: List[IntegrationFinding]) -> None:
|
|
2868
|
+
"""
|
|
2869
|
+
Process pre-consolidated findings to create issues.
|
|
2870
|
+
|
|
2871
|
+
Since fetch_findings() now creates consolidated findings (one per control with all resources),
|
|
2872
|
+
this method simply creates issues directly from each finding.
|
|
2873
|
+
|
|
2874
|
+
:param List[IntegrationFinding] findings: List of pre-consolidated findings to process
|
|
2875
|
+
"""
|
|
2876
|
+
if not findings:
|
|
2877
|
+
return
|
|
2878
|
+
|
|
2879
|
+
issues_processed = 0
|
|
2880
|
+
|
|
2881
|
+
for finding in findings:
|
|
2882
|
+
try:
|
|
2883
|
+
control_id = self._normalize_control_id_string(finding.rule_id) or finding.rule_id
|
|
2884
|
+
|
|
2885
|
+
# Create issue title
|
|
2886
|
+
issue_title = self.get_issue_title(finding)
|
|
2887
|
+
|
|
2888
|
+
# Create issue directly from the consolidated finding
|
|
2889
|
+
issue = self.create_or_update_issue_from_finding(title=issue_title, finding=finding)
|
|
2890
|
+
if issue:
|
|
2891
|
+
issues_processed += 1
|
|
2892
|
+
|
|
2893
|
+
else:
|
|
2894
|
+
logger.debug(
|
|
2895
|
+
f"Failed to create issue for control {control_id} - create_or_update_issue_from_finding returned None"
|
|
2896
|
+
)
|
|
2897
|
+
|
|
2898
|
+
except Exception as e:
|
|
2899
|
+
logger.error(f"Error processing consolidated issue for control {control_id}: {e}")
|
|
2900
|
+
|
|
2901
|
+
# Store the count for summary reporting
|
|
2902
|
+
self._issues_processed_count = issues_processed
|
|
2903
|
+
|
|
2904
|
+
def _find_existing_issue_for_control(self) -> Optional["regscale_models.Issue"]:
|
|
2905
|
+
"""
|
|
2906
|
+
Find existing issue for a specific control.
|
|
2907
|
+
|
|
2908
|
+
:param str control_id: Control ID to search for
|
|
2909
|
+
:return: Existing issue if found
|
|
2910
|
+
:rtype: Optional[regscale_models.Issue]
|
|
2911
|
+
"""
|
|
2912
|
+
# This is a simplified check - in practice you might want to search by external_id or other fields
|
|
2913
|
+
# that uniquely identify control-specific issues
|
|
2914
|
+
return None # For now, always create new issues
|
|
2915
|
+
|
|
2916
|
+
def sync_compliance(self, *args, **kwargs) -> None:
|
|
2917
|
+
"""Override sync to use consolidated issue processing and add summary reporting."""
|
|
2918
|
+
# Initialize issue counter
|
|
2919
|
+
self._issues_created_count = 0
|
|
2920
|
+
|
|
2921
|
+
try:
|
|
2922
|
+
# Initialize cache dictionaries if not already initialized
|
|
2923
|
+
if not hasattr(self, "_impl_id_by_control"):
|
|
2924
|
+
self._impl_id_by_control = {}
|
|
2925
|
+
if not hasattr(self, "_assessment_by_impl_today"):
|
|
2926
|
+
self._assessment_by_impl_today = {}
|
|
2927
|
+
|
|
2928
|
+
# Ensure existing records cache is loaded before processing
|
|
2929
|
+
self._load_existing_records_cache()
|
|
2930
|
+
|
|
2931
|
+
# CRITICAL: Pre-populate control implementation cache before any processing
|
|
2932
|
+
logger.info("Pre-populating control implementation cache for reliable issue linking...")
|
|
2933
|
+
self._populate_control_implementation_cache()
|
|
2934
|
+
|
|
2935
|
+
# Call parent's compliance data processing (assessments, etc.) but skip issue creation
|
|
2936
|
+
original_create_issues = self.create_issues
|
|
2937
|
+
self.create_issues = False # Disable base class issue creation
|
|
2938
|
+
super().sync_compliance() # Call the base ComplianceIntegration.sync_compliance method
|
|
2939
|
+
self.create_issues = original_create_issues # Restore setting
|
|
2940
|
+
|
|
2941
|
+
# CRITICAL: Copy assessments from base class cache to our cache so IssueFieldSetter can find them
|
|
2942
|
+
self._sync_assessment_cache_from_base_class()
|
|
2943
|
+
|
|
2944
|
+
# Now handle issue creation with consolidated logic
|
|
2945
|
+
if self.create_issues:
|
|
2946
|
+
findings = list(self.fetch_findings())
|
|
2947
|
+
if findings:
|
|
2948
|
+
self._process_consolidated_issues(findings)
|
|
2949
|
+
|
|
2950
|
+
# Provide concise summary
|
|
2951
|
+
issues_processed = getattr(self, "_issues_processed_count", 0)
|
|
2952
|
+
|
|
2953
|
+
if issues_processed > 0:
|
|
2954
|
+
# Count actual unique issues in the database for this security plan
|
|
2955
|
+
from regscale.models import regscale_models
|
|
2956
|
+
|
|
2957
|
+
actual_issues = len(
|
|
2958
|
+
regscale_models.Issue.get_all_by_parent(parent_id=self.plan_id, parent_module=self.parent_module)
|
|
2959
|
+
)
|
|
2960
|
+
|
|
2961
|
+
logger.info(
|
|
2962
|
+
f"SUMMARY: Processed {issues_processed} policy violations resulting in {actual_issues} consolidated issues for failed controls for assets in RegScale"
|
|
2963
|
+
)
|
|
2964
|
+
else:
|
|
2965
|
+
logger.info("SUMMARY: No issues processed - no failed controls with existing assets")
|
|
2966
|
+
|
|
2967
|
+
except Exception as e:
|
|
2968
|
+
error_and_exit(f"Error during Wiz compliance sync: {e}")
|
|
2969
|
+
|
|
2970
|
+
def _get_regscale_asset_identifier(self, compliance_item: "WizComplianceItem") -> str:
|
|
2971
|
+
"""
|
|
2972
|
+
Get the appropriate RegScale asset identifier for a compliance item.
|
|
2973
|
+
|
|
2974
|
+
For Wiz integrations, the asset_identifier_field is "wizId", so we need to return
|
|
2975
|
+
the Wiz resource ID that will match what's stored in the RegScale Asset's wizId field.
|
|
2976
|
+
|
|
2977
|
+
:param WizComplianceItem compliance_item: Compliance item with resource information
|
|
2978
|
+
:return: Wiz resource ID that matches the RegScale Asset's wizId field
|
|
2979
|
+
:rtype: str
|
|
2980
|
+
"""
|
|
2981
|
+
resource_id = getattr(compliance_item, "resource_id", "")
|
|
2982
|
+
resource_name = getattr(compliance_item, "resource_name", "")
|
|
2983
|
+
|
|
2984
|
+
# For Wiz policy compliance, the asset identifier should be the Wiz resource ID
|
|
2985
|
+
# because that's what gets stored in RegScale Asset's wizId field (asset_identifier_field = "wizId")
|
|
2986
|
+
if resource_id:
|
|
2987
|
+
return resource_id
|
|
2988
|
+
|
|
2989
|
+
# Fallback (should not normally happen since resource_id is required)
|
|
2990
|
+
return resource_name or "Unknown Resource"
|
|
2991
|
+
|
|
2992
|
+
def _create_consolidated_asset_identifier(self, asset_mappings: Dict[str, Dict[str, str]]) -> str:
|
|
2993
|
+
"""
|
|
2994
|
+
Create a consolidated asset identifier with only asset names (one per line).
|
|
2995
|
+
|
|
2996
|
+
Format: "Asset Name 1\nAsset Name 2\nAsset Name 3"
|
|
2997
|
+
This format provides clean, human-readable asset names for POAMs and issues
|
|
2998
|
+
without cluttering them with Wiz resource IDs.
|
|
2999
|
+
|
|
3000
|
+
:param Dict[str, Dict[str, str]] asset_mappings: Map of Wiz resource IDs to asset info
|
|
3001
|
+
:return: Consolidated identifier string with asset names only
|
|
3002
|
+
:rtype: str
|
|
3003
|
+
"""
|
|
3004
|
+
if not asset_mappings:
|
|
3005
|
+
return ""
|
|
3006
|
+
|
|
3007
|
+
# Create entries that show only asset names (one per line)
|
|
3008
|
+
identifier_parts = []
|
|
3009
|
+
# Sort by asset name for consistent ordering
|
|
3010
|
+
sorted_mappings = sorted(asset_mappings.items(), key=lambda x: x[1]["name"])
|
|
3011
|
+
for wiz_id, asset_info in sorted_mappings:
|
|
3012
|
+
asset_name = asset_info["name"]
|
|
3013
|
+
wiz_resource_id = asset_info["wiz_id"]
|
|
3014
|
+
|
|
3015
|
+
# Format: Just the asset name (no Wiz resource ID for cleaner POAMs)
|
|
3016
|
+
if asset_name != wiz_resource_id:
|
|
3017
|
+
# Asset was successfully mapped, show only the name
|
|
3018
|
+
identifier_part = asset_name
|
|
3019
|
+
else:
|
|
3020
|
+
# Asset lookup failed, use the Wiz resource ID as fallback
|
|
3021
|
+
identifier_part = wiz_resource_id
|
|
3022
|
+
|
|
3023
|
+
identifier_parts.append(identifier_part)
|
|
3024
|
+
|
|
3025
|
+
# Join with newlines for multi-asset issues
|
|
3026
|
+
consolidated_identifier = "\n".join(identifier_parts)
|
|
3027
|
+
logger.debug(
|
|
3028
|
+
f"Created consolidated asset identifier with {len(identifier_parts)} assets: {consolidated_identifier}"
|
|
3029
|
+
)
|
|
3030
|
+
return consolidated_identifier
|
|
1764
3031
|
|
|
1765
3032
|
|
|
1766
3033
|
def resolve_framework_id(framework_input: str) -> str:
|
|
@@ -1818,7 +3085,7 @@ def list_available_frameworks() -> str:
|
|
|
1818
3085
|
output.append("=" * 50)
|
|
1819
3086
|
|
|
1820
3087
|
# Show shorthand mappings first
|
|
1821
|
-
output.append("\
|
|
3088
|
+
output.append("\nQuick Shortcuts:")
|
|
1822
3089
|
output.append("-" * 20)
|
|
1823
3090
|
shortcut_items = sorted(FRAMEWORK_SHORTCUTS.items())
|
|
1824
3091
|
for shorthand, framework_id in shortcut_items[:10]: # Show first 10
|