regscale-cli 6.21.1.0__py3-none-any.whl → 6.21.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. regscale/_version.py +1 -1
  2. regscale/core/app/application.py +7 -0
  3. regscale/integrations/commercial/__init__.py +8 -8
  4. regscale/integrations/commercial/import_all/import_all_cmd.py +2 -2
  5. regscale/integrations/commercial/microsoft_defender/__init__.py +0 -0
  6. regscale/integrations/commercial/{defender.py → microsoft_defender/defender.py} +38 -612
  7. regscale/integrations/commercial/microsoft_defender/defender_api.py +286 -0
  8. regscale/integrations/commercial/microsoft_defender/defender_constants.py +80 -0
  9. regscale/integrations/commercial/microsoft_defender/defender_scanner.py +168 -0
  10. regscale/integrations/commercial/qualys/__init__.py +24 -86
  11. regscale/integrations/commercial/qualys/containers.py +2 -0
  12. regscale/integrations/commercial/qualys/scanner.py +7 -2
  13. regscale/integrations/commercial/sonarcloud.py +110 -71
  14. regscale/integrations/commercial/wizv2/click.py +4 -1
  15. regscale/integrations/commercial/wizv2/data_fetcher.py +401 -0
  16. regscale/integrations/commercial/wizv2/finding_processor.py +295 -0
  17. regscale/integrations/commercial/wizv2/policy_compliance.py +1402 -203
  18. regscale/integrations/commercial/wizv2/policy_compliance_helpers.py +564 -0
  19. regscale/integrations/commercial/wizv2/scanner.py +4 -4
  20. regscale/integrations/compliance_integration.py +212 -60
  21. regscale/integrations/public/fedramp/fedramp_five.py +92 -7
  22. regscale/integrations/scanner_integration.py +27 -4
  23. regscale/models/__init__.py +1 -1
  24. regscale/models/integration_models/cisa_kev_data.json +33 -3
  25. regscale/models/integration_models/synqly_models/capabilities.json +1 -1
  26. regscale/models/regscale_models/issue.py +29 -9
  27. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/METADATA +1 -1
  28. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/RECORD +32 -27
  29. tests/regscale/test_authorization.py +0 -65
  30. tests/regscale/test_init.py +0 -96
  31. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/LICENSE +0 -0
  32. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/WHEEL +0 -0
  33. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/entry_points.txt +0 -0
  34. {regscale_cli-6.21.1.0.dist-info → regscale_cli-6.21.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,401 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """Data fetching and caching logic for Wiz Policy Compliance."""
4
+
5
+ import json
6
+ import logging
7
+ import os
8
+ from datetime import datetime
9
+ from typing import Dict, List, Optional, Any, Callable
10
+
11
+ from regscale.integrations.commercial.wizv2.async_client import run_async_queries
12
+ from regscale.integrations.commercial.wizv2.constants import WizVulnerabilityType, WIZ_POLICY_QUERY
13
+
14
+ logger = logging.getLogger("regscale")
15
+
16
+
17
+ class WizDataCache:
18
+ """Manages caching of Wiz API responses."""
19
+
20
+ def __init__(self, cache_dir: str, cache_duration_minutes: int = 0) -> None:
21
+ """
22
+ Initialize the Wiz data cache.
23
+
24
+ :param cache_dir: Directory to store cache files
25
+ :param cache_duration_minutes: Cache TTL in minutes (0 = disabled)
26
+ """
27
+ self.cache_dir = cache_dir
28
+ self.cache_duration_minutes = cache_duration_minutes
29
+ self.force_refresh = False
30
+
31
+ def get_cache_file_path(self, wiz_project_id: str, framework_id: str) -> str:
32
+ """
33
+ Get cache file path for given project and framework.
34
+
35
+ :param wiz_project_id: Wiz project ID
36
+ :param framework_id: Framework ID
37
+ :return: Full path to cache file
38
+ """
39
+ os.makedirs(self.cache_dir, exist_ok=True)
40
+ return os.path.join(self.cache_dir, f"policy_assessments_{wiz_project_id}_{framework_id}.json")
41
+
42
+ def is_cache_valid(self, cache_file: str) -> bool:
43
+ """
44
+ Check if cache file exists and is within TTL.
45
+
46
+ :param cache_file: Path to cache file to check
47
+ :return: True if cache is valid, False otherwise
48
+ """
49
+ if self.force_refresh or self.cache_duration_minutes <= 0:
50
+ return False
51
+
52
+ if not os.path.exists(cache_file):
53
+ return False
54
+
55
+ try:
56
+ max_age_seconds = self.cache_duration_minutes * 60
57
+ file_age = datetime.now().timestamp() - os.path.getmtime(cache_file)
58
+ return file_age <= max_age_seconds
59
+ except Exception:
60
+ return False
61
+
62
+ def load_from_cache(self, cache_file: str) -> Optional[List[Dict[str, Any]]]:
63
+ """
64
+ Load data from cache file.
65
+
66
+ :param cache_file: Path to cache file to load
67
+ :return: Cached assessment nodes if valid, None otherwise
68
+ """
69
+ try:
70
+ with open(cache_file, "r", encoding="utf-8") as f:
71
+ data = json.load(f)
72
+
73
+ nodes = data.get("nodes") or data.get("assessments") or []
74
+ return nodes if isinstance(nodes, list) else None
75
+ except Exception as e:
76
+ logger.debug(f"Error loading cache: {e}")
77
+ return None
78
+
79
+ def save_to_cache(
80
+ self, cache_file: str, nodes: List[Dict[str, Any]], wiz_project_id: str, framework_id: str
81
+ ) -> None:
82
+ """
83
+ Save data to cache file.
84
+
85
+ :param cache_file: Path to cache file
86
+ :param nodes: Assessment nodes to cache
87
+ :param wiz_project_id: Wiz project ID for metadata
88
+ :param framework_id: Framework ID for metadata
89
+ """
90
+ if self.cache_duration_minutes <= 0:
91
+ return
92
+
93
+ try:
94
+ payload = {
95
+ "timestamp": datetime.now().isoformat(),
96
+ "wiz_project_id": wiz_project_id,
97
+ "framework_id": framework_id,
98
+ "nodes": nodes,
99
+ }
100
+ with open(cache_file, "w", encoding="utf-8") as f:
101
+ json.dump(payload, f, ensure_ascii=False)
102
+ except Exception as e:
103
+ logger.debug(f"Error writing cache: {e}")
104
+
105
+
106
+ class WizApiClient:
107
+ """Handles Wiz API interactions."""
108
+
109
+ def __init__(self, endpoint: str, access_token: str) -> None:
110
+ """
111
+ Initialize the Wiz API client.
112
+
113
+ :param endpoint: Wiz GraphQL API endpoint URL
114
+ :param access_token: Wiz API access token
115
+ """
116
+ self.endpoint = endpoint
117
+ self.access_token = access_token
118
+
119
+ def get_headers(self) -> Dict[str, str]:
120
+ """
121
+ Get HTTP headers for API requests.
122
+
123
+ :return: Dictionary of HTTP headers including authorization
124
+ """
125
+ return {
126
+ "Authorization": f"Bearer {self.access_token}",
127
+ "Content-Type": "application/json",
128
+ }
129
+
130
+ def fetch_policy_assessments_async(
131
+ self, wiz_project_id: str, progress_callback: Optional[Callable] = None
132
+ ) -> List[Dict[str, Any]]:
133
+ """
134
+ Fetch policy assessments using async client.
135
+
136
+ :param wiz_project_id: Wiz project ID
137
+ :param progress_callback: Optional progress callback
138
+ :return: List of policy assessment nodes
139
+ """
140
+ try:
141
+ page_size = 100
142
+ query_config = {
143
+ "type": WizVulnerabilityType.CONFIGURATION,
144
+ "query": WIZ_POLICY_QUERY,
145
+ "topic_key": "policyAssessments",
146
+ "variables": {"first": page_size},
147
+ }
148
+
149
+ # Import here to avoid circular imports during testing
150
+ from regscale.integrations.commercial.wizv2.utils import compliance_job_progress
151
+
152
+ with compliance_job_progress:
153
+ task = compliance_job_progress.add_task(
154
+ f"[#f68d1f]Fetching Wiz policy assessments (async, page size: {page_size})...",
155
+ total=1,
156
+ )
157
+
158
+ results = run_async_queries(
159
+ endpoint=self.endpoint,
160
+ headers=self.get_headers(),
161
+ query_configs=[query_config],
162
+ progress_tracker=compliance_job_progress,
163
+ max_concurrent=1,
164
+ )
165
+
166
+ compliance_job_progress.update(task, completed=1, advance=1)
167
+
168
+ if results and len(results) == 1 and not results[0][2]:
169
+ return results[0][1] or []
170
+
171
+ return []
172
+ except Exception as e:
173
+ logger.debug(f"Async fetch failed: {e}")
174
+ raise
175
+
176
+ def fetch_policy_assessments_requests(
177
+ self,
178
+ base_variables: Dict[str, Any],
179
+ filter_variants: List[Optional[Dict[str, Any]]],
180
+ progress_callback: Optional[Callable] = None,
181
+ ) -> List[Dict[str, Any]]:
182
+ """
183
+ Fetch policy assessments using requests library with filter variants.
184
+
185
+ :param base_variables: Base GraphQL variables
186
+ :param filter_variants: List of filter variants to try
187
+ :param progress_callback: Optional progress callback
188
+ :return: List of policy assessment nodes
189
+ """
190
+
191
+ session = self._create_requests_session()
192
+ last_error = None
193
+
194
+ for filter_variant in filter_variants:
195
+ try:
196
+ variables = base_variables.copy()
197
+ if filter_variant is not None:
198
+ variables["filterBy"] = filter_variant
199
+
200
+ nodes = self._execute_paginated_query(session, variables, progress_callback)
201
+ return nodes
202
+ except Exception as e:
203
+ last_error = e
204
+ logger.debug(f"Filter variant {filter_variant} failed: {e}")
205
+ continue
206
+
207
+ raise Exception(f"All filter variants failed. Last error: {last_error}")
208
+
209
+ def _create_requests_session(self):
210
+ """
211
+ Create requests session with retry logic.
212
+
213
+ :return: Configured requests session with retry adapter
214
+ """
215
+ import requests
216
+ from requests.adapters import HTTPAdapter
217
+ from urllib3.util.retry import Retry
218
+
219
+ session = requests.Session()
220
+ retry = Retry(
221
+ total=5,
222
+ connect=5,
223
+ read=5,
224
+ backoff_factor=0.5,
225
+ status_forcelist=[429, 500, 502, 503, 504],
226
+ allowed_methods=["POST"],
227
+ )
228
+ adapter = HTTPAdapter(max_retries=retry)
229
+ session.mount("https://", adapter)
230
+ return session
231
+
232
+ def _execute_paginated_query(
233
+ self, session, variables: Dict[str, Any], progress_callback: Optional[Callable] = None
234
+ ) -> List[Dict[str, Any]]:
235
+ """
236
+ Execute paginated GraphQL query.
237
+
238
+ :param session: Requests session object
239
+ :param variables: GraphQL query variables
240
+ :param progress_callback: Optional callback for progress updates
241
+ :return: List of assessment nodes from all pages
242
+ """
243
+ import requests
244
+
245
+ nodes = []
246
+ after_cursor = variables.get("after")
247
+ page_index = 0
248
+
249
+ while True:
250
+ payload_vars = variables.copy()
251
+ payload_vars["after"] = after_cursor
252
+ payload = {"query": WIZ_POLICY_QUERY, "variables": payload_vars}
253
+
254
+ response = session.post(self.endpoint, json=payload, headers=self.get_headers(), timeout=300)
255
+
256
+ if response.status_code >= 400:
257
+ raise requests.HTTPError(f"{response.status_code} {response.text[:500]}")
258
+
259
+ data = response.json()
260
+ if "errors" in data:
261
+ raise RuntimeError(str(data["errors"]))
262
+
263
+ topic = data.get("data", {}).get("policyAssessments", {})
264
+ page_nodes = topic.get("nodes", [])
265
+ page_info = topic.get("pageInfo", {})
266
+
267
+ nodes.extend(page_nodes)
268
+ page_index += 1
269
+
270
+ if progress_callback:
271
+ try:
272
+ progress_callback(page_index, len(page_nodes), len(nodes))
273
+ except Exception:
274
+ pass
275
+
276
+ has_next = page_info.get("hasNextPage", False)
277
+ after_cursor = page_info.get("endCursor")
278
+
279
+ if not has_next:
280
+ break
281
+
282
+ return nodes
283
+
284
+
285
+ class PolicyAssessmentFetcher:
286
+ """Main class for fetching Wiz policy assessments."""
287
+
288
+ def __init__(
289
+ self,
290
+ wiz_endpoint: str,
291
+ access_token: str,
292
+ wiz_project_id: str,
293
+ framework_id: str,
294
+ cache_duration_minutes: int = 0,
295
+ ) -> None:
296
+ """
297
+ Initialize the policy assessment fetcher.
298
+
299
+ :param wiz_endpoint: Wiz GraphQL API endpoint URL
300
+ :param access_token: Wiz API access token
301
+ :param wiz_project_id: Wiz project ID to query
302
+ :param framework_id: Framework ID to filter by
303
+ :param cache_duration_minutes: Cache TTL in minutes (0 = disabled)
304
+ """
305
+ self.api_client = WizApiClient(wiz_endpoint, access_token)
306
+ self.wiz_project_id = wiz_project_id
307
+ self.framework_id = framework_id
308
+ self.cache = WizDataCache("artifacts/wiz", cache_duration_minutes)
309
+
310
+ def fetch_policy_assessments(self) -> List[Dict[str, Any]]:
311
+ """
312
+ Fetch policy assessments from Wiz API with caching.
313
+
314
+ :return: List of filtered policy assessment nodes
315
+ """
316
+ logger.info("Fetching policy assessments from Wiz...")
317
+
318
+ # Try cache first
319
+ cache_file = self.cache.get_cache_file_path(self.wiz_project_id, self.framework_id)
320
+
321
+ if self.cache.is_cache_valid(cache_file):
322
+ cached_nodes = self.cache.load_from_cache(cache_file)
323
+ if cached_nodes is not None:
324
+ logger.info("Using cached Wiz policy assessments")
325
+ return cached_nodes
326
+
327
+ # Fetch from API
328
+ try:
329
+ # Try async client first
330
+ nodes = self._fetch_with_async_client()
331
+ except Exception:
332
+ # Fall back to requests
333
+ logger.debug("Async client failed, falling back to requests")
334
+ nodes = self._fetch_with_requests()
335
+
336
+ # Filter to framework
337
+ filtered_nodes = self._filter_nodes_to_framework(nodes)
338
+
339
+ # Save to cache
340
+ self.cache.save_to_cache(cache_file, filtered_nodes, self.wiz_project_id, self.framework_id)
341
+
342
+ return filtered_nodes
343
+
344
+ def _fetch_with_async_client(self) -> List[Dict[str, Any]]:
345
+ """
346
+ Fetch using async client.
347
+
348
+ :return: List of policy assessment nodes
349
+ """
350
+ return self.api_client.fetch_policy_assessments_async(self.wiz_project_id)
351
+
352
+ def _fetch_with_requests(self) -> List[Dict[str, Any]]:
353
+ """
354
+ Fetch using requests with filter variants.
355
+
356
+ :return: List of policy assessment nodes
357
+ """
358
+ page_size = 100
359
+ base_variables = {"first": page_size}
360
+
361
+ # Try multiple filter variants
362
+ filter_variants = [
363
+ {"project": [self.wiz_project_id]},
364
+ {"projectId": [self.wiz_project_id]},
365
+ {"projects": [self.wiz_project_id]},
366
+ {}, # Empty filterBy
367
+ None, # Omit filterBy entirely
368
+ ]
369
+
370
+ return self.api_client.fetch_policy_assessments_requests(base_variables, filter_variants)
371
+
372
+ def _filter_nodes_to_framework(self, nodes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
373
+ """
374
+ Filter nodes to only include items from the target framework.
375
+
376
+ :param nodes: Raw assessment nodes from Wiz API
377
+ :return: Filtered nodes belonging to the target framework
378
+ """
379
+ filtered_nodes = []
380
+
381
+ for node in nodes:
382
+ try:
383
+ subcats = ((node or {}).get("policy") or {}).get("securitySubCategories", [])
384
+
385
+ # Include if no subcategories (can't evaluate framework)
386
+ if not subcats:
387
+ filtered_nodes.append(node)
388
+ continue
389
+
390
+ # Include if any subcategory matches our framework
391
+ for subcat in subcats:
392
+ framework_id = subcat.get("category", {}).get("framework", {}).get("id")
393
+ if framework_id == self.framework_id:
394
+ filtered_nodes.append(node)
395
+ break
396
+
397
+ except Exception:
398
+ # Include on error (defensive)
399
+ filtered_nodes.append(node)
400
+
401
+ return filtered_nodes
@@ -0,0 +1,295 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """Finding processing and consolidation logic for Wiz Policy Compliance."""
4
+
5
+ import logging
6
+ from typing import Dict, List, Optional, Iterator, Any, Set, Union
7
+ from collections import defaultdict
8
+
9
+ from regscale.integrations.scanner_integration import IntegrationFinding
10
+ from regscale.integrations.commercial.wizv2.policy_compliance_helpers import AssetConsolidator
11
+ from regscale.models import regscale_models
12
+
13
+ logger = logging.getLogger("regscale")
14
+
15
+
16
+ class WizComplianceItem:
17
+ """Interface representing a compliance item from the main class."""
18
+
19
+ def __init__(self, compliance_item: Any) -> None:
20
+ """
21
+ Initialize wrapper for compliance item.
22
+
23
+ :param compliance_item: The compliance item object to wrap
24
+ """
25
+ self._item = compliance_item
26
+
27
+ @property
28
+ def resource_id(self) -> str:
29
+ return getattr(self._item, "resource_id", "")
30
+
31
+ @property
32
+ def control_id(self) -> str:
33
+ return getattr(self._item, "control_id", "")
34
+
35
+ @property
36
+ def is_fail(self) -> bool:
37
+ return getattr(self._item, "is_fail", False)
38
+
39
+ def get_all_control_ids(self) -> List[str]:
40
+ """
41
+ Get all control IDs this item maps to.
42
+
43
+ :return: List of control IDs this policy assessment affects
44
+ """
45
+ # This would be implemented by the main class
46
+ if hasattr(self._item, "_get_all_control_ids_for_compliance_item"):
47
+ return self._item._get_all_control_ids_for_compliance_item(self._item)
48
+ return [self.control_id] if self.control_id else []
49
+
50
+
51
+ class FindingConsolidator:
52
+ """Consolidates multiple compliance items into control-centric findings."""
53
+
54
+ def __init__(self, integration_instance: Any) -> None:
55
+ """
56
+ Initialize the finding consolidator.
57
+
58
+ :param integration_instance: The Wiz integration instance
59
+ """
60
+ self.integration = integration_instance
61
+ self.asset_consolidator = AssetConsolidator()
62
+
63
+ def create_consolidated_findings(self, failed_compliance_items: List[Any]) -> Iterator[IntegrationFinding]:
64
+ """
65
+ Create consolidated findings grouped by control ID.
66
+
67
+ :param failed_compliance_items: List of failed compliance items
68
+ :yield: Consolidated findings
69
+ """
70
+ if not failed_compliance_items:
71
+ logger.debug("No failed compliance items to process")
72
+ return
73
+
74
+ logger.debug("Starting control-centric finding consolidation")
75
+
76
+ # Group compliance items by control ID
77
+ control_groups = self._group_by_control(failed_compliance_items)
78
+
79
+ if not control_groups:
80
+ logger.debug("No control groupings created")
81
+ return
82
+
83
+ logger.debug(f"Grouped into {len(control_groups)} controls with failing resources")
84
+
85
+ # Create consolidated findings for each control
86
+ findings_created = 0
87
+ for control_id, resources in control_groups.items():
88
+ finding = self._create_consolidated_finding_for_control(control_id, resources)
89
+ if finding:
90
+ findings_created += 1
91
+ yield finding
92
+
93
+ logger.debug(f"Generated {findings_created} consolidated findings")
94
+
95
+ def _group_by_control(self, compliance_items: List[Any]) -> Dict[str, Dict[str, Any]]:
96
+ """
97
+ Group compliance items by control ID.
98
+
99
+ :param compliance_items: List of compliance items to group
100
+ :return: Dict mapping control IDs to resource dictionaries
101
+ """
102
+ control_groups = defaultdict(dict) # {control_id: {resource_id: compliance_item}}
103
+
104
+ for item in compliance_items:
105
+ wrapped_item = WizComplianceItem(item)
106
+ resource_id = wrapped_item.resource_id
107
+
108
+ if not resource_id:
109
+ continue
110
+
111
+ # Get all control IDs this item maps to
112
+ all_control_ids = wrapped_item.get_all_control_ids()
113
+ if not all_control_ids:
114
+ continue
115
+
116
+ # Add this resource to each control it fails
117
+ for control_id in all_control_ids:
118
+ normalized_control = control_id.upper()
119
+ resource_key = resource_id.lower()
120
+
121
+ # Use first occurrence for each resource-control pair
122
+ if resource_key not in control_groups[normalized_control]:
123
+ control_groups[normalized_control][resource_key] = item
124
+
125
+ return dict(control_groups)
126
+
127
+ def _create_consolidated_finding_for_control(
128
+ self, control_id: str, resources: Dict[str, Any]
129
+ ) -> Optional[IntegrationFinding]:
130
+ """
131
+ Create a consolidated finding for a control with all affected resources.
132
+
133
+ :param control_id: Control identifier
134
+ :param resources: Dict of resource_id -> compliance_item
135
+ :return: Consolidated finding or None
136
+ """
137
+ logger.debug(f"Creating consolidated finding for control {control_id} with {len(resources)} resources")
138
+
139
+ # Filter to only resources that exist as assets in RegScale
140
+ asset_mappings = self._build_asset_mappings(list(resources.keys()))
141
+
142
+ if not asset_mappings:
143
+ logger.debug(f"No existing assets found for control {control_id}")
144
+ return None
145
+
146
+ logger.debug(f"Creating finding for control {control_id} with {len(asset_mappings)} existing assets")
147
+
148
+ # Use the first compliance item as the base for the finding
149
+ base_item = next(iter(resources.values()))
150
+
151
+ # Create the base finding
152
+ finding = self._create_base_finding(base_item, control_id)
153
+ if not finding:
154
+ return None
155
+
156
+ # Update with consolidated asset information
157
+ self._update_finding_with_assets(finding, asset_mappings)
158
+
159
+ return finding
160
+
161
+ def _build_asset_mappings(self, resource_ids: List[str]) -> Dict[str, Dict[str, str]]:
162
+ """
163
+ Build asset mappings for resources that exist in RegScale.
164
+
165
+ :param resource_ids: List of Wiz resource IDs to check
166
+ :return: Dict mapping resource_ids to asset info (name, wiz_id)
167
+ """
168
+ asset_mappings = {}
169
+
170
+ for resource_id in resource_ids:
171
+ if self.integration._asset_exists_in_regscale(resource_id):
172
+ asset = self.integration.get_asset_by_identifier(resource_id)
173
+ if asset and hasattr(asset, "name") and asset.name:
174
+ asset_mappings[resource_id] = {"name": asset.name, "wiz_id": resource_id}
175
+ else:
176
+ # Fallback to resource ID if asset name not found
177
+ asset_mappings[resource_id] = {"name": resource_id, "wiz_id": resource_id}
178
+
179
+ return asset_mappings
180
+
181
+ def _create_base_finding(self, compliance_item: Any, control_id: str) -> Optional[IntegrationFinding]:
182
+ """Create a base finding from a compliance item for a specific control."""
183
+ try:
184
+ # Use the integration's existing method but for specific control
185
+ if hasattr(self.integration, "_create_finding_for_specific_control"):
186
+ return self.integration._create_finding_for_specific_control(compliance_item, control_id)
187
+ else:
188
+ # Fallback to generic method
189
+ return self.integration.create_finding_from_compliance_item(compliance_item)
190
+ except Exception as e:
191
+ logger.error(f"Error creating base finding for control {control_id}: {e}")
192
+ return None
193
+
194
+ def _update_finding_with_assets(
195
+ self, finding: IntegrationFinding, asset_mappings: Dict[str, Dict[str, str]]
196
+ ) -> None:
197
+ """
198
+ Update finding with consolidated asset information.
199
+
200
+ :param finding: Finding to update
201
+ :param asset_mappings: Asset mapping information
202
+ """
203
+ # Update asset identifier with all assets
204
+ consolidated_identifier = self.asset_consolidator.create_consolidated_asset_identifier(asset_mappings)
205
+ finding.asset_identifier = consolidated_identifier
206
+
207
+ # Update description for multiple assets
208
+ asset_names = [info["name"] for info in asset_mappings.values()]
209
+ self.asset_consolidator.update_finding_description_for_multiple_assets(finding, len(asset_names), asset_names)
210
+
211
+
212
+ class FindingToIssueProcessor:
213
+ """Processes findings into RegScale issues."""
214
+
215
+ def __init__(self, integration_instance: Any) -> None:
216
+ """
217
+ Initialize the finding to issue processor.
218
+
219
+ :param integration_instance: The Wiz integration instance
220
+ """
221
+ self.integration = integration_instance
222
+
223
+ def process_findings_to_issues(self, findings: List[IntegrationFinding]) -> tuple[int, int]:
224
+ """
225
+ Process findings into issues and return counts.
226
+
227
+ :param findings: List of consolidated findings to process
228
+ :return: Tuple of (issues_created, issues_skipped)
229
+ """
230
+ issues_created = 0
231
+ issues_skipped = 0
232
+
233
+ for finding in findings:
234
+ try:
235
+ if self._process_single_finding(finding):
236
+ issues_created += 1
237
+ else:
238
+ issues_skipped += 1
239
+ except Exception as e:
240
+ logger.error(f"Error processing finding: {e}")
241
+ issues_skipped += 1
242
+
243
+ return issues_created, issues_skipped
244
+
245
+ def _process_single_finding(self, finding: IntegrationFinding) -> bool:
246
+ """
247
+ Process a single finding into an issue.
248
+
249
+ :param finding: Finding to process
250
+ :return: True if successful, False if skipped
251
+ """
252
+ # Verify assets exist
253
+ if not self._verify_assets_exist(finding):
254
+ logger.debug(f"Asset not found for finding {finding.external_id}")
255
+ return False
256
+
257
+ # Create or update the issue
258
+ try:
259
+ issue_title = self.integration.get_issue_title(finding)
260
+ issue = self.integration.create_or_update_issue_from_finding(title=issue_title, finding=finding)
261
+ return issue is not None
262
+ except Exception as e:
263
+ logger.error(f"Error creating/updating issue: {e}")
264
+ return False
265
+
266
+ def _verify_assets_exist(self, finding: IntegrationFinding) -> bool:
267
+ """
268
+ Verify that assets referenced in the finding exist.
269
+
270
+ :param finding: Finding with asset identifiers to verify
271
+ :return: True if all assets exist in RegScale, False otherwise
272
+ """
273
+ if not hasattr(finding, "asset_identifier") or not finding.asset_identifier:
274
+ return False
275
+
276
+ # For consolidated findings, asset_identifier may contain multiple assets
277
+ identifiers = finding.asset_identifier.split("\n")
278
+
279
+ for identifier in identifiers:
280
+ identifier = identifier.strip()
281
+ if not identifier:
282
+ continue
283
+
284
+ # Extract resource ID from format "Asset Name (resource-id)"
285
+ if "(" in identifier and identifier.endswith(")"):
286
+ resource_id = identifier.split("(")[-1].rstrip(")")
287
+ else:
288
+ resource_id = identifier
289
+
290
+ # Check if asset exists
291
+ if not self.integration._asset_exists_in_regscale(resource_id):
292
+ logger.debug(f"Asset {resource_id} does not exist in RegScale")
293
+ return False
294
+
295
+ return True