regscale-cli 6.23.0.1__py3-none-any.whl → 6.24.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of regscale-cli might be problematic. Click here for more details.

Files changed (43) hide show
  1. regscale/_version.py +1 -1
  2. regscale/core/app/application.py +2 -0
  3. regscale/integrations/commercial/__init__.py +1 -0
  4. regscale/integrations/commercial/sarif/sarif_converter.py +1 -1
  5. regscale/integrations/commercial/wizv2/click.py +109 -2
  6. regscale/integrations/commercial/wizv2/compliance_report.py +1485 -0
  7. regscale/integrations/commercial/wizv2/constants.py +72 -2
  8. regscale/integrations/commercial/wizv2/data_fetcher.py +61 -0
  9. regscale/integrations/commercial/wizv2/file_cleanup.py +104 -0
  10. regscale/integrations/commercial/wizv2/issue.py +775 -27
  11. regscale/integrations/commercial/wizv2/policy_compliance.py +599 -181
  12. regscale/integrations/commercial/wizv2/reports.py +243 -0
  13. regscale/integrations/commercial/wizv2/scanner.py +668 -245
  14. regscale/integrations/compliance_integration.py +304 -51
  15. regscale/integrations/due_date_handler.py +210 -0
  16. regscale/integrations/public/cci_importer.py +444 -0
  17. regscale/integrations/scanner_integration.py +718 -153
  18. regscale/models/integration_models/CCI_List.xml +1 -0
  19. regscale/models/integration_models/cisa_kev_data.json +18 -3
  20. regscale/models/integration_models/synqly_models/capabilities.json +1 -1
  21. regscale/models/regscale_models/form_field_value.py +1 -1
  22. regscale/models/regscale_models/milestone.py +1 -0
  23. regscale/models/regscale_models/regscale_model.py +225 -60
  24. regscale/models/regscale_models/security_plan.py +3 -2
  25. regscale/regscale.py +7 -0
  26. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/METADATA +9 -9
  27. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/RECORD +43 -26
  28. tests/fixtures/test_fixture.py +13 -8
  29. tests/regscale/integrations/public/__init__.py +0 -0
  30. tests/regscale/integrations/public/test_alienvault.py +220 -0
  31. tests/regscale/integrations/public/test_cci.py +458 -0
  32. tests/regscale/integrations/public/test_cisa.py +1021 -0
  33. tests/regscale/integrations/public/test_emass.py +518 -0
  34. tests/regscale/integrations/public/test_fedramp.py +851 -0
  35. tests/regscale/integrations/public/test_fedramp_cis_crm.py +3661 -0
  36. tests/regscale/integrations/public/test_file_uploads.py +506 -0
  37. tests/regscale/integrations/public/test_oscal.py +453 -0
  38. tests/regscale/models/test_form_field_value_integration.py +304 -0
  39. tests/regscale/models/test_module_integration.py +582 -0
  40. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/LICENSE +0 -0
  41. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/WHEEL +0 -0
  42. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/entry_points.txt +0 -0
  43. {regscale_cli-6.23.0.1.dist-info → regscale_cli-6.24.0.0.dist-info}/top_level.txt +0 -0
@@ -225,9 +225,7 @@ class ComplianceIntegration(ScannerIntegration, ABC):
225
225
  )
226
226
 
227
227
  for asset in existing_assets:
228
- # Cache by external_id, identifier, and other_tracking_number for flexible lookup
229
- if hasattr(asset, "externalId") and asset.externalId:
230
- self._existing_assets_cache[asset.externalId] = asset
228
+ # Cache by identifier and other_tracking_number for flexible lookup
231
229
  if hasattr(asset, "identifier") and asset.identifier:
232
230
  self._existing_assets_cache[asset.identifier] = asset
233
231
  if hasattr(asset, "otherTrackingNumber") and asset.otherTrackingNumber:
@@ -283,13 +281,11 @@ class ComplianceIntegration(ScannerIntegration, ABC):
283
281
  wiz_issues = 0
284
282
  for issue in all_issues:
285
283
  # Cache by external_id and other_identifier for flexible lookup
286
- if hasattr(issue, "externalId") and issue.externalId:
287
- self._existing_issues_cache[issue.externalId] = issue
288
- if "wiz-policy" in issue.externalId.lower():
289
- wiz_issues += 1
290
- logger.debug(f"Cached Wiz issue: {issue.id} -> external_id: {issue.externalId}")
291
284
  if hasattr(issue, "otherIdentifier") and issue.otherIdentifier:
292
285
  self._existing_issues_cache[issue.otherIdentifier] = issue
286
+ if "wiz-policy" in issue.otherIdentifier.lower():
287
+ wiz_issues += 1
288
+ logger.debug(f"Cached Wiz issue: {issue.id} -> other_identifier: {issue.otherIdentifier}")
293
289
 
294
290
  logger.debug(f"Cached {wiz_issues} Wiz policy issues out of {len(all_issues)} total issues")
295
291
 
@@ -411,17 +407,32 @@ class ComplianceIntegration(ScannerIntegration, ABC):
411
407
  """
412
408
  logger.info("Processing compliance data...")
413
409
 
414
- # Reset state to avoid double counting on repeated calls
410
+ self._reset_compliance_state()
411
+ allowed_controls = self._build_allowed_controls_set()
412
+ raw_compliance_data = self.fetch_compliance_data()
413
+
414
+ processing_stats = self._process_raw_compliance_items(raw_compliance_data, allowed_controls)
415
+ self._log_processing_summary(raw_compliance_data, processing_stats)
416
+
417
+ # Perform control-level categorization based on aggregated results
418
+ self._categorize_controls_by_aggregation()
419
+ self._log_final_results()
420
+
421
+ def _reset_compliance_state(self) -> None:
422
+ """Reset state to avoid double counting on repeated calls."""
415
423
  self.all_compliance_items = []
416
424
  self.failed_compliance_items = []
417
425
  self.passing_controls = {}
418
426
  self.failing_controls = {}
419
427
  self.asset_compliance_map.clear()
420
428
 
421
- # Build allowed control IDs from plan/catalog controls to restrict scope
429
+ def _build_allowed_controls_set(self) -> set[str]:
430
+ """Build allowed control IDs from plan/catalog controls to restrict scope."""
422
431
  allowed_controls_normalized: set[str] = set()
423
432
  try:
424
433
  controls = self._get_controls()
434
+ logger.debug(f"Loaded {len(controls)} controls from plan/catalog")
435
+
425
436
  for ctl in controls:
426
437
  cid = (ctl.get("controlId") or "").strip()
427
438
  if not cid:
@@ -429,56 +440,220 @@ class ComplianceIntegration(ScannerIntegration, ABC):
429
440
  base, sub = self._normalize_control_id(cid)
430
441
  normalized = f"{base}({sub})" if sub else base
431
442
  allowed_controls_normalized.add(normalized)
432
- except Exception:
433
- # If controls cannot be loaded, proceed without additional filtering
443
+
444
+ logger.debug(f"Built allowed_controls_normalized set with {len(allowed_controls_normalized)} entries")
445
+ if allowed_controls_normalized:
446
+ sample = sorted(allowed_controls_normalized)[:5]
447
+ logger.debug(f"Sample allowed controls: {sample}")
448
+ except Exception as e:
449
+ logger.warning(f"Could not load controls from plan/catalog: {e}")
434
450
  allowed_controls_normalized = set()
435
451
 
436
- # Fetch raw compliance data
437
- raw_compliance_data = self.fetch_compliance_data()
452
+ return allowed_controls_normalized
453
+
454
+ def _process_raw_compliance_items(self, raw_compliance_data: list, allowed_controls: set) -> dict:
455
+ """Process raw compliance items and return processing statistics.
456
+ :param list raw_compliance_data: Raw compliance data from external system
457
+ :param set allowed_controls: Allowed control IDs
458
+ :return: Processed compliance items
459
+ :rtype: dict
460
+ """
461
+ stats = {"skipped_no_control": 0, "skipped_no_resource": 0, "skipped_not_in_plan": 0, "processed_count": 0}
438
462
 
439
- # Convert to ComplianceItem objects
440
463
  for raw_item in raw_compliance_data:
441
464
  try:
442
465
  compliance_item = self.create_compliance_item(raw_item)
443
- # Skip items that do not resolve to a control or resource
444
- if not getattr(compliance_item, "control_id", "") or not getattr(compliance_item, "resource_id", ""):
466
+ if not self._process_single_compliance_item(compliance_item, allowed_controls, stats):
445
467
  continue
446
-
447
- # If we have an allowed set, restrict to only controls in current plan/catalog
448
- if allowed_controls_normalized:
449
- base, sub = self._normalize_control_id(getattr(compliance_item, "control_id", ""))
450
- norm_item = f"{base}({sub})" if sub else base
451
- if norm_item not in allowed_controls_normalized:
452
- continue
453
- self.all_compliance_items.append(compliance_item)
454
-
455
- # Build asset mapping
456
- self.asset_compliance_map[compliance_item.resource_id].append(compliance_item)
457
-
458
- # Categorize by result
459
- if compliance_item.compliance_result in self.FAIL_STATUSES:
460
- self.failed_compliance_items.append(compliance_item)
461
- # Track failing controls (control can fail if ANY asset fails)
462
- control_key = compliance_item.control_id.lower()
463
- self.failing_controls[control_key] = compliance_item
464
- # Remove from passing if it was there
465
- self.passing_controls.pop(control_key, None)
466
-
467
- elif compliance_item.compliance_result in self.PASS_STATUSES:
468
- control_key = compliance_item.control_id.lower()
469
- # Only mark as passing if not already failing
470
- if control_key not in self.failing_controls:
471
- self.passing_controls[control_key] = compliance_item
472
-
473
468
  except Exception as e:
474
469
  logger.error(f"Error processing compliance item: {e}")
475
470
  continue
476
471
 
472
+ return stats
473
+
474
+ def _process_single_compliance_item(self, compliance_item: Any, allowed_controls: set, stats: dict) -> bool:
475
+ """Process a single compliance item and update statistics. Returns True if processed successfully."""
476
+ control_id = getattr(compliance_item, "control_id", "")
477
+ resource_id = getattr(compliance_item, "resource_id", "")
478
+
479
+ if not control_id:
480
+ stats["skipped_no_control"] += 1
481
+ return False
482
+ if not resource_id:
483
+ stats["skipped_no_resource"] += 1
484
+ return False
485
+
486
+ if not self._should_process_item(compliance_item, control_id, allowed_controls, stats):
487
+ return False
488
+
489
+ self._add_processed_item(compliance_item, stats)
490
+ return True
491
+
492
+ def _should_process_item(self, compliance_item: Any, control_id: str, allowed_controls: set, stats: dict) -> bool:
493
+ """Determine if an item should be processed based on control filtering."""
494
+ if not allowed_controls:
495
+ return True
496
+
497
+ base, sub = self._normalize_control_id(control_id)
498
+ norm_item = f"{base}({sub})" if sub else base
499
+
500
+ if norm_item in allowed_controls:
501
+ return True
502
+
503
+ # Allow PASS controls through even if they don't have existing implementations
504
+ if compliance_item.compliance_result in self.PASS_STATUSES:
505
+ return True
506
+
507
+ stats["skipped_not_in_plan"] += 1
508
+ if stats["skipped_not_in_plan"] <= 3:
509
+ logger.debug(f"Skipping control {norm_item} - not in plan (result: {compliance_item.compliance_result})")
510
+ return False
511
+
512
+ def _add_processed_item(self, compliance_item: Any, stats: dict) -> None:
513
+ """Add a processed item to collections and update statistics."""
514
+ self.all_compliance_items.append(compliance_item)
515
+ stats["processed_count"] += 1
516
+
517
+ # Build asset mapping
518
+ self.asset_compliance_map[compliance_item.resource_id].append(compliance_item)
519
+
520
+ # Categorize by result
521
+ if compliance_item.compliance_result in self.FAIL_STATUSES:
522
+ self.failed_compliance_items.append(compliance_item)
523
+
524
+ def _log_processing_summary(self, raw_compliance_data: list, stats: dict) -> None:
525
+ """Log summary of compliance data processing."""
526
+ logger.debug("Compliance item processing summary:")
527
+ logger.debug(f" - Total raw items: {len(raw_compliance_data)}")
528
+ logger.debug(f" - Skipped (no control_id): {stats['skipped_no_control']}")
529
+ logger.debug(f" - Skipped (no resource_id): {stats['skipped_no_resource']}")
530
+ logger.debug(f" - Skipped (not in plan): {stats['skipped_not_in_plan']}")
531
+ logger.debug(f" - Processed successfully: {stats['processed_count']}")
532
+
533
+ def _log_final_results(self) -> None:
534
+ """Log final processing results."""
477
535
  logger.debug(
478
536
  f"Processed {len(self.all_compliance_items)} compliance items: "
479
537
  f"{len(self.all_compliance_items) - len(self.failed_compliance_items)} passing, "
480
538
  f"{len(self.failed_compliance_items)} failing"
481
539
  )
540
+ logger.debug(
541
+ f"Control categorization: {len(self.passing_controls)} passing controls, "
542
+ f"{len(self.failing_controls)} failing controls"
543
+ )
544
+
545
+ def _categorize_controls_by_aggregation(self) -> None:
546
+ """
547
+ Categorize controls as passing or failing based on aggregated results across all compliance items.
548
+
549
+ This method uses project-scoped aggregation logic instead of the previous "any fail = control fails"
550
+ approach. For project-scoped integrations (like Wiz), this provides more accurate control status.
551
+ """
552
+
553
+ # Group all compliance items by control ID
554
+ control_items = self._group_items_by_control()
555
+
556
+ # Analyze each control's results
557
+ for control_key, items in control_items.items():
558
+ self._categorize_single_control(control_key, items)
559
+
560
+ def _group_items_by_control(self) -> dict:
561
+ """Group compliance items by control ID."""
562
+ from collections import defaultdict
563
+
564
+ control_items = defaultdict(list)
565
+ for item in self.all_compliance_items:
566
+ control_key = item.control_id.lower()
567
+ control_items[control_key].append(item)
568
+
569
+ return control_items
570
+
571
+ def _categorize_single_control(self, control_key: str, items: list) -> None:
572
+ """Categorize a single control based on its compliance items."""
573
+ from collections import Counter
574
+
575
+ results = [item.compliance_result for item in items]
576
+ result_counts = Counter(results)
577
+ total_items = len(results)
578
+
579
+ fail_count, pass_count = self._count_pass_fail_results(result_counts)
580
+
581
+ if fail_count == 0 and pass_count > 0:
582
+ self._mark_control_as_passing(control_key, items, pass_count, fail_count)
583
+ elif fail_count > 0:
584
+ self._handle_control_with_failures(control_key, items, fail_count, pass_count, total_items)
585
+ else:
586
+ logger.debug(f"Control {control_key} has unclear results: {dict(result_counts)}")
587
+
588
+ def _count_pass_fail_results(self, result_counts: dict) -> tuple[int, int]:
589
+ """Count pass and fail results from result counts."""
590
+ fail_statuses_lower = [status.lower() for status in self.FAIL_STATUSES]
591
+ pass_statuses_lower = [status.lower() for status in self.PASS_STATUSES]
592
+
593
+ fail_count = 0
594
+ pass_count = 0
595
+
596
+ for result, count in result_counts.items():
597
+ result_lower = result.lower()
598
+ if result_lower in fail_statuses_lower:
599
+ fail_count += count
600
+ elif result_lower in pass_statuses_lower:
601
+ pass_count += count
602
+
603
+ return fail_count, pass_count
604
+
605
+ def _mark_control_as_passing(self, control_key: str, items: list, pass_count: int, fail_count: int) -> None:
606
+ """Mark a control as passing."""
607
+ self.passing_controls[control_key] = items[0] # Use first item as representative
608
+ logger.debug(f"Control {control_key} marked as PASSING: {pass_count}P/{fail_count}F")
609
+
610
+ def _handle_control_with_failures(
611
+ self, control_key: str, items: list, fail_count: int, pass_count: int, total_items: int
612
+ ) -> None:
613
+ """Handle a control that has some failures."""
614
+ fail_ratio = fail_count / total_items
615
+ failure_threshold = getattr(self, "control_failure_threshold", 0.2)
616
+
617
+ if fail_ratio > failure_threshold:
618
+ self._mark_control_as_failing(control_key, items, pass_count, fail_count, fail_ratio, failure_threshold)
619
+ else:
620
+ self._mark_control_as_passing_with_warnings(
621
+ control_key, items, pass_count, fail_count, fail_ratio, failure_threshold
622
+ )
623
+
624
+ def _mark_control_as_failing(
625
+ self,
626
+ control_key: str,
627
+ items: list,
628
+ pass_count: int,
629
+ fail_count: int,
630
+ fail_ratio: float,
631
+ failure_threshold: float,
632
+ ) -> None:
633
+ """Mark a control as failing due to significant failures."""
634
+ fail_statuses_lower = [status.lower() for status in self.FAIL_STATUSES]
635
+ failing_item = next(item for item in items if item.compliance_result.lower() in fail_statuses_lower)
636
+ self.failing_controls[control_key] = failing_item
637
+ logger.debug(
638
+ f"Control {control_key} marked as FAILING: {pass_count}P/{fail_count}F "
639
+ f"({fail_ratio:.1%} fail rate > {failure_threshold:.1%} threshold)"
640
+ )
641
+
642
+ def _mark_control_as_passing_with_warnings(
643
+ self,
644
+ control_key: str,
645
+ items: list,
646
+ pass_count: int,
647
+ fail_count: int,
648
+ fail_ratio: float,
649
+ failure_threshold: float,
650
+ ) -> None:
651
+ """Mark a control as passing despite low failure rate."""
652
+ self.passing_controls[control_key] = items[0]
653
+ logger.debug(
654
+ f"Control {control_key} marked as PASSING (low fail rate): {pass_count}P/{fail_count}F "
655
+ f"({fail_ratio:.1%} fail rate < {failure_threshold:.1%} threshold)"
656
+ )
482
657
 
483
658
  def create_asset_from_compliance_item(self, compliance_item: ComplianceItem) -> Optional[IntegrationAsset]:
484
659
  """
@@ -660,6 +835,11 @@ class ComplianceIntegration(ScannerIntegration, ABC):
660
835
  assets_processed = self.update_regscale_assets(iter(assets))
661
836
  self._log_asset_results(assets_processed)
662
837
 
838
+ # Refresh the asset map after creating/updating assets to ensure
839
+ # the map contains all assets for issue creation
840
+ logger.debug("Refreshing asset map after asset sync...")
841
+ self.asset_map_by_identifier.update(self.get_asset_map())
842
+
663
843
  def _log_asset_results(self, assets_processed: int) -> None:
664
844
  """
665
845
  Log asset processing results.
@@ -707,8 +887,32 @@ class ComplianceIntegration(ScannerIntegration, ABC):
707
887
  logger.debug("No findings to process into issues")
708
888
  return
709
889
 
710
- issues_created, issues_skipped = self._process_findings_to_issues(findings)
711
- self._log_issue_results(issues_created, issues_skipped)
890
+ # Ensure asset map is populated before processing issues
891
+ # This handles cases where assets were created in previous runs
892
+ if not self.asset_map_by_identifier:
893
+ logger.debug("Loading asset map before issue processing...")
894
+ self.asset_map_by_identifier.update(self.get_asset_map())
895
+
896
+ findings_processed, findings_skipped = self._process_findings_to_issues(findings)
897
+
898
+ # CRITICAL FIX: Flush bulk issue operations to database
899
+ # This ensures all issues created/updated in bulk mode are persisted
900
+ logger.debug(f"Calling bulk_save for {findings_processed} processed findings ({findings_skipped} skipped)...")
901
+ issue_results = regscale_models.Issue.bulk_save()
902
+ logger.debug(
903
+ f"Bulk save completed - created: {issue_results.get('created_count', 0)}, updated: {issue_results.get('updated_count', 0)}"
904
+ )
905
+
906
+ # Update result counts with actual database operations
907
+ if hasattr(self, "_results"):
908
+ if "issues" not in self._results:
909
+ self._results["issues"] = {}
910
+ self._results["issues"].update(issue_results)
911
+
912
+ # Use actual database results for logging
913
+ issues_created = issue_results.get("created_count", 0)
914
+ issues_updated = issue_results.get("updated_count", 0)
915
+ self._log_issue_results_accurate(issues_created, issues_updated, findings_processed, findings_skipped)
712
916
 
713
917
  def _process_findings_to_issues(self, findings: List[IntegrationFinding]) -> tuple[int, int]:
714
918
  """
@@ -720,14 +924,20 @@ class ComplianceIntegration(ScannerIntegration, ABC):
720
924
  issues_created = 0
721
925
  issues_skipped = 0
722
926
 
723
- for finding in findings:
927
+ logger.debug(f"Processing {len(findings)} findings into issues...")
928
+ for i, finding in enumerate(findings):
724
929
  try:
930
+ logger.debug(
931
+ f"Processing finding {i + 1}/{len(findings)}: external_id='{finding.external_id}', asset_identifier='{finding.asset_identifier}"
932
+ )
725
933
  if self._process_single_finding(finding):
726
934
  issues_created += 1
935
+ logger.debug(f" -> Finding {i + 1} processed successfully")
727
936
  else:
728
937
  issues_skipped += 1
938
+ logger.debug(f" -> Finding {i + 1} skipped")
729
939
  except Exception as e:
730
- logger.error(f"Error processing finding: {e}")
940
+ logger.error(f"Error processing finding {i + 1}: {e}")
731
941
  issues_skipped += 1
732
942
 
733
943
  return issues_created, issues_skipped
@@ -739,14 +949,25 @@ class ComplianceIntegration(ScannerIntegration, ABC):
739
949
  :param finding: Finding to process
740
950
  :return: True if issue was created/updated, False if skipped
741
951
  """
952
+ logger.debug(
953
+ f" -> Processing finding: external_id='{finding.external_id}', asset_identifier='{finding.asset_identifier}'"
954
+ )
955
+
742
956
  asset = self._get_or_create_asset_for_finding(finding)
743
957
  if not asset:
958
+ logger.debug(f" -> Asset not found/created for identifier '{finding.asset_identifier}', skipping finding")
744
959
  self._log_asset_not_found_error(finding)
745
960
  return False
746
961
 
962
+ logger.debug(f" -> Found/created asset {asset.id} for identifier '{finding.asset_identifier}'")
747
963
  issue_title = self.get_issue_title(finding)
748
964
  issue = self.create_or_update_issue_from_finding(title=issue_title, finding=finding)
749
- return issue is not None
965
+ success = issue is not None
966
+ if success and issue:
967
+ logger.debug(f" -> Successfully processed finding -> issue {issue.id}")
968
+ else:
969
+ logger.debug(" -> Failed to create/update issue for finding")
970
+ return success
750
971
 
751
972
  def _get_or_create_asset_for_finding(self, finding: IntegrationFinding) -> Optional[regscale_models.Asset]:
752
973
  """
@@ -778,6 +999,7 @@ class ComplianceIntegration(ScannerIntegration, ABC):
778
999
  def _log_issue_results(self, issues_created: int, issues_skipped: int) -> None:
779
1000
  """
780
1001
  Log issue processing results.
1002
+ DEPRECATED: Use _log_issue_results_accurate for accurate reporting.
781
1003
 
782
1004
  :param int issues_created: Number of issues created/updated
783
1005
  :param int issues_skipped: Number of issues skipped
@@ -791,6 +1013,36 @@ class ComplianceIntegration(ScannerIntegration, ABC):
791
1013
  else:
792
1014
  logger.debug("No issues processed")
793
1015
 
1016
+ def _log_issue_results_accurate(
1017
+ self, issues_created: int, issues_updated: int, findings_processed: int, findings_skipped: int
1018
+ ) -> None:
1019
+ """
1020
+ Log accurate issue processing results based on actual database operations.
1021
+
1022
+ :param int issues_created: Number of new issues created in database
1023
+ :param int issues_updated: Number of existing issues updated in database
1024
+ :param int findings_processed: Number of findings that were processed
1025
+ :param int findings_skipped: Number of findings that were skipped
1026
+ :return: None
1027
+ :rtype: None
1028
+ """
1029
+ total_db_operations = issues_created + issues_updated
1030
+
1031
+ if total_db_operations > 0:
1032
+ logger.info(
1033
+ f"Processed {findings_processed} findings into issues: {issues_created} new issues created, {issues_updated} existing issues updated"
1034
+ )
1035
+ if findings_skipped > 0:
1036
+ logger.info(f"Skipped {findings_skipped} findings (assets not found)")
1037
+ elif findings_skipped > 0:
1038
+ logger.warning(
1039
+ f"Issues processed: 0 created/updated, {findings_skipped} findings skipped (assets not found)"
1040
+ )
1041
+ else:
1042
+ logger.debug(
1043
+ f"Processed {findings_processed} findings but no database changes were needed (all issues up-to-date)"
1044
+ )
1045
+
794
1046
  def _finalize_scan_history(self, scan_history: regscale_models.ScanHistory) -> None:
795
1047
  """
796
1048
  Finalize scan history with error handling.
@@ -1568,17 +1820,18 @@ class ComplianceIntegration(ScannerIntegration, ABC):
1568
1820
 
1569
1821
  # Check for existing issue by external_id first
1570
1822
  external_id = finding.external_id
1823
+ logger.debug(f"Looking for existing issue with external_id: '{external_id}'")
1571
1824
  existing_issue = self._find_existing_issue_cached(external_id)
1572
1825
 
1573
1826
  if existing_issue:
1574
1827
  logger.debug(
1575
- f"Found existing issue {existing_issue.id} for external_id {external_id}, updating instead of creating"
1828
+ f"Found existing issue {existing_issue.id} (other_identifier: '{existing_issue.otherIdentifier}') for lookup external_id '{external_id}', updating instead of creating"
1576
1829
  )
1577
1830
 
1578
1831
  # Update existing issue with new finding data
1579
1832
  existing_issue.title = title
1580
1833
  existing_issue.description = finding.description
1581
- existing_issue.severity = finding.severity
1834
+ existing_issue.severityLevel = finding.severity
1582
1835
  existing_issue.status = finding.status
1583
1836
  # Ensure affectedControls is updated from the finding's control id
1584
1837
  try: