regscale-cli 6.16.1.0__py3-none-any.whl → 6.16.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of regscale-cli might be problematic. Click here for more details.

Files changed (46) hide show
  1. regscale/__init__.py +1 -1
  2. regscale/core/app/internal/login.py +1 -1
  3. regscale/core/app/internal/poam_editor.py +1 -1
  4. regscale/core/app/utils/api_handler.py +4 -11
  5. regscale/integrations/commercial/__init__.py +2 -2
  6. regscale/integrations/commercial/ad.py +1 -1
  7. regscale/integrations/commercial/crowdstrike.py +0 -1
  8. regscale/integrations/commercial/grype/__init__.py +3 -0
  9. regscale/integrations/commercial/grype/commands.py +72 -0
  10. regscale/integrations/commercial/grype/scanner.py +390 -0
  11. regscale/integrations/commercial/import_all/import_all_cmd.py +2 -2
  12. regscale/integrations/commercial/opentext/__init__.py +6 -0
  13. regscale/integrations/commercial/opentext/commands.py +77 -0
  14. regscale/integrations/commercial/opentext/scanner.py +449 -85
  15. regscale/integrations/commercial/qualys.py +50 -61
  16. regscale/integrations/commercial/servicenow.py +1 -0
  17. regscale/integrations/commercial/snyk.py +2 -2
  18. regscale/integrations/commercial/synqly/ticketing.py +29 -0
  19. regscale/integrations/commercial/trivy/__init__.py +5 -0
  20. regscale/integrations/commercial/trivy/commands.py +74 -0
  21. regscale/integrations/commercial/trivy/scanner.py +276 -0
  22. regscale/integrations/commercial/veracode.py +1 -1
  23. regscale/integrations/commercial/wizv2/utils.py +1 -1
  24. regscale/integrations/jsonl_scanner_integration.py +869 -0
  25. regscale/integrations/public/fedramp/fedramp_common.py +4 -4
  26. regscale/integrations/public/fedramp/inventory_items.py +3 -3
  27. regscale/integrations/scanner_integration.py +225 -59
  28. regscale/models/integration_models/cisa_kev_data.json +65 -7
  29. regscale/models/integration_models/{flat_file_importer.py → flat_file_importer/__init__.py} +29 -8
  30. regscale/models/integration_models/snyk.py +141 -15
  31. regscale/models/integration_models/synqly_models/capabilities.json +1 -1
  32. regscale/models/integration_models/tenable_models/integration.py +42 -7
  33. regscale/models/integration_models/veracode.py +91 -48
  34. regscale/models/regscale_models/regscale_model.py +1 -1
  35. regscale/models/regscale_models/user.py +3 -4
  36. regscale/models/regscale_models/vulnerability.py +21 -0
  37. regscale/utils/version.py +3 -5
  38. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/METADATA +3 -3
  39. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/RECORD +43 -38
  40. regscale/integrations/commercial/grype.py +0 -165
  41. regscale/integrations/commercial/opentext/click.py +0 -99
  42. regscale/integrations/commercial/trivy.py +0 -162
  43. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/LICENSE +0 -0
  44. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/WHEEL +0 -0
  45. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/entry_points.txt +0 -0
  46. {regscale_cli-6.16.1.0.dist-info → regscale_cli-6.16.3.0.dist-info}/top_level.txt +0 -0
@@ -1,97 +1,434 @@
1
1
  """
2
- Web inspect Scanner Class
2
+ WebInspect Scanner Integration for RegScale.
3
+
4
+ This module provides integration between OpenText WebInspect scanner and RegScale,
5
+ allowing you to import WebInspect scan results into RegScale as assets and findings.
3
6
  """
4
7
 
8
+ import dataclasses
9
+ import json
5
10
  import logging
6
- from typing import Iterator
7
- from typing import List, Optional
11
+ import os
12
+ import traceback
13
+ from typing import Any, Dict, List, Optional, Union, Tuple, cast, Iterator, Set
14
+
15
+ from pathlib import Path
8
16
 
17
+ from regscale.core.app.utils.file_utils import find_files, read_file
18
+ from regscale.integrations.jsonl_scanner_integration import JSONLScannerIntegration
9
19
  from regscale.integrations.scanner_integration import IntegrationAsset, IntegrationFinding
10
- from regscale.models import ImportValidater
11
- from regscale.models import IssueSeverity, IssueStatus, regscale_models
12
- from regscale.models.integration_models.flat_file_importer import FlatFileImporter
20
+ from regscale.models import IssueSeverity, AssetStatus, IssueStatus, ImportValidater
13
21
 
14
22
  logger = logging.getLogger("regscale")
15
- XML = "*.xml"
16
23
 
17
24
 
18
- class WebInspect(FlatFileImporter):
19
- finding_severity_map = {
20
- 4: regscale_models.IssueSeverity.Critical,
21
- 3: regscale_models.IssueSeverity.High,
22
- 2: regscale_models.IssueSeverity.Moderate,
23
- 1: regscale_models.IssueSeverity.Low,
24
- 0: regscale_models.IssueSeverity.NotAssigned,
25
+ class WebInspectIntegration(JSONLScannerIntegration):
26
+ """Class for handling OpenText WebInspect scanner integration."""
27
+
28
+ title: str = "WebInspect"
29
+ finding_severity_map: Dict[int, Any] = {
30
+ 4: IssueSeverity.Critical.value,
31
+ 3: IssueSeverity.High.value,
32
+ 2: IssueSeverity.Moderate.value,
33
+ 1: IssueSeverity.Low.value,
34
+ 0: IssueSeverity.NotAssigned.value,
25
35
  }
26
36
 
27
- def __init__(self, **kwargs):
28
- self.name = kwargs.get("name", "Web Inspect")
29
- self.required_headers = [
30
- "Issues",
31
- ]
32
- self.mapping_file = kwargs.get("mappings_path")
33
- self.disable_mapping = kwargs.get("disable_mapping")
34
- self.validater = ImportValidater(
35
- self.required_headers, kwargs.get("file_path"), self.mapping_file, self.disable_mapping, xml_tag="Scan"
36
- )
37
- self.headers = self.validater.parsed_headers
38
- self.mapping = self.validater.mapping
39
- data = self.mapping.get_value(self.validater.data, "Issues", {}).get("Issue", [])
40
- vuln_count = 0
41
- for item in data:
42
- severity_int = int(item.get("Severity", 0))
43
- severity = self.finding_severity_map.get(severity_int, IssueSeverity.NotAssigned)
44
-
45
- if severity in (IssueSeverity.High, IssueSeverity.Moderate, IssueSeverity.Low):
46
- vuln_count += 1
47
- super().__init__(
48
- logger=logger,
49
- headers=self.headers,
50
- asset_func=self.create_asset,
51
- vuln_func=self.create_vuln,
52
- extra_headers_allowed=True,
53
- finding_severity_map=self.finding_severity_map,
54
- asset_count=len(data),
55
- vuln_count=vuln_count,
56
- **kwargs,
57
- )
37
+ # Constants for file paths
38
+ ASSETS_FILE = "./artifacts/webinspect_assets.jsonl"
39
+ FINDINGS_FILE = "./artifacts/webinspect_findings.jsonl"
40
+
41
+ def __init__(self, *args, **kwargs):
42
+ """Initialize the WebInspectIntegration."""
43
+ # Override file_pattern for XML files
44
+ kwargs["file_pattern"] = "*.xml"
45
+ kwargs["read_files_only"] = True
46
+ self.disable_mapping = kwargs["disable_mapping"] = True
47
+ # kwargs["re"]
48
+ super().__init__(*args, **kwargs)
49
+
50
+ def is_valid_file(self, data: Any, file_path: Union[Path, str]) -> Tuple[bool, Optional[Dict[str, Any]]]:
51
+ """
52
+ Check if the provided data is a valid WebInspect scan result.
53
+
54
+ Validates that the data is from a WebInspect XML file with the required structure.
55
+ Logs a warning with the file path and returns (False, None) if invalid.
56
+
57
+ :param Any data: Data parsed from the file (string content for XML when read_files_only is True, or file path otherwise)
58
+ :param Union[Path, str] file_path: Path to the file being processed
59
+ :return: Tuple of (is_valid, validated_data) where validated_data includes validater, mapping, and data if valid
60
+ :rtype: Tuple[bool, Optional[Dict[str, Any]]]
61
+ """
62
+ if self.read_files_only:
63
+ # Data is the XML content as a string
64
+ if not isinstance(data, str):
65
+ logger.warning(f"Data is not a string (expected XML content) for file {file_path}")
66
+ return False, None
67
+
68
+ try:
69
+ # Create a temporary file since ImportValidater requires a file path
70
+ import tempfile
71
+
72
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".xml", delete=False) as temp_file:
73
+ temp_file.write(data)
74
+ temp_path = temp_file.name
75
+
76
+ validater = ImportValidater(
77
+ required_headers=["Issues"],
78
+ file_path=temp_path,
79
+ mapping_file_path="", # Empty string instead of None
80
+ disable_mapping=True,
81
+ xml_tag="Scan", # XML root tag
82
+ )
83
+
84
+ # Clean up the temporary file
85
+ try:
86
+ os.unlink(temp_path)
87
+ except OSError:
88
+ pass
89
+ except Exception:
90
+ error_message = traceback.format_exc()
91
+ logger.warning(f"Error processing WebInspect XML content for file {file_path}: {str(error_message)}")
92
+ return False, None
93
+ else:
94
+ # Data is the file path
95
+ if not isinstance(data, (str, Path)):
96
+ logger.warning(f"Data is not a file path when read_files_only is False for file {file_path}")
97
+ return False, None
98
+
99
+ try:
100
+ validater = ImportValidater(
101
+ required_headers=["Issues"],
102
+ file_path=str(data),
103
+ mapping_file_path="", # Empty string instead of None
104
+ disable_mapping=True,
105
+ xml_tag="Scan", # XML root tag
106
+ )
107
+ except Exception as e:
108
+ logger.warning(f"Error processing WebInspect file {data} for file {file_path}: {str(e)}")
109
+ return False, None
110
+
111
+ # Check if validater produced usable data
112
+ if not validater.data or not validater.parsed_headers:
113
+ logger.warning(f"Data is not a valid WebInspect XML structure for file {file_path}")
114
+ return False, None
115
+
116
+ # Extract mapping and issues data
117
+ mapping = validater.mapping
118
+ issues_data = mapping.get_value(cast(Dict[str, Any], validater.data), "Issues", {})
119
+ # issues_data = parent_issues_data.get("Issue", [])
120
+ # Validate that issues data contains 'Issue' elements
121
+ if not issues_data or "Issue" not in issues_data:
122
+ logger.warning(f"Data has no 'Issues' with 'Issue' elements for file {file_path}")
123
+ return False, None
124
+
125
+ return True, issues_data
126
+
127
+ def _process_files(
128
+ self,
129
+ file_path: Union[str, Path],
130
+ assets_output_file: str,
131
+ findings_output_file: str,
132
+ empty_assets_file: bool = True,
133
+ empty_findings_file: bool = True,
134
+ ) -> Tuple[int, int]:
135
+ """
136
+ Process files (local or S3) to extract both assets and findings in a single pass.
137
+
138
+ Optimizes file processing by reading each file once to extract asset and finding data.
139
+
140
+ :param Union[str, Path] file_path: Path to source file or directory (local or S3 URI)
141
+ :param str assets_output_file: Path to output JSONL file for assets
142
+ :param str findings_output_file: Path to output JSONL file for findings
143
+ :param bool empty_assets_file: Whether to empty the assets file before writing (default: True)
144
+ :param bool empty_findings_file: Whether to empty the findings file before writing (default: True)
145
+ :return: Tuple of total asset and finding counts
146
+ :rtype: Tuple[int, int]
147
+ """
148
+ asset_tracker = self._setup_tracker(assets_output_file, empty_assets_file, "asset")
149
+ finding_tracker = self._setup_tracker(findings_output_file, empty_findings_file, "finding")
150
+ processed_files = set()
151
+
152
+ with open(assets_output_file, "a") as assets_file, open(findings_output_file, "a") as findings_file:
153
+ self._process_file_data(
154
+ file_path, assets_file, findings_file, asset_tracker, finding_tracker, processed_files
155
+ )
156
+
157
+ self._log_completion(asset_tracker.new_items, assets_output_file, "assets")
158
+ self._log_completion(finding_tracker.new_items, findings_output_file, "findings")
159
+ return asset_tracker.total_items, finding_tracker.total_items
160
+
161
+ def _setup_tracker(self, output_file: str, empty_file: bool, item_type: str) -> "ItemTracker":
162
+ """
163
+ Set up a tracker for counting items.
164
+
165
+ :param str output_file: Path to the output file
166
+ :param bool empty_file: Whether to empty the file before processing
167
+ :param str item_type: Type of items ('asset' or 'finding')
168
+ :return: Tracker object for managing item counts
169
+ :rtype: ItemTracker
170
+ """
171
+ from dataclasses import dataclass
172
+
173
+ @dataclass
174
+ class ItemTracker:
175
+ existing_items: Dict[str, bool]
176
+ new_items: int = 0
177
+ total_items: int = 0
178
+
179
+ existing_items = self._prepare_output_file(output_file, empty_file, item_type)
180
+ return ItemTracker(existing_items=existing_items, total_items=len(existing_items))
181
+
182
+ def _process_file_data(
183
+ self,
184
+ file_path: Union[str, Path],
185
+ assets_file: Any,
186
+ findings_file: Any,
187
+ asset_tracker: "ItemTracker",
188
+ finding_tracker: "ItemTracker",
189
+ processed_files: Set[str],
190
+ ) -> None:
191
+ """
192
+ Process data from all files in the given path.
193
+
194
+ :param Union[str, Path] file_path: Path to source file or directory
195
+ :param Any assets_file: Open file handle for writing assets
196
+ :param Any findings_file: Open file handle for writing findings
197
+ :param ItemTracker asset_tracker: Tracker for asset counts
198
+ :param ItemTracker finding_tracker: Tracker for finding counts
199
+ :param Set[str] processed_files: Set of processed file paths
200
+ :rtype: None
201
+ """
202
+ for file, data in self.find_valid_files(file_path):
203
+ file_str = str(file)
204
+ if file_str in processed_files:
205
+ continue
206
+
207
+ processed_files.add(file_str)
208
+ self._handle_single_file(file, data, assets_file, findings_file, asset_tracker, finding_tracker)
58
209
 
59
- def create_asset(self, *args, **kwargs) -> Iterator[IntegrationAsset]:
210
+ def _handle_single_file(
211
+ self,
212
+ file: Union[Path, str],
213
+ data: Optional[Dict[str, Any]],
214
+ assets_file: Any,
215
+ findings_file: Any,
216
+ asset_tracker: "ItemTracker",
217
+ finding_tracker: "ItemTracker",
218
+ ) -> None:
60
219
  """
61
- Fetches assets from the processed XML files
220
+ Handle processing of a single file's data.
62
221
 
63
- :yields: Iterator[IntegrationAsset]
222
+ :param Union[Path, str] file: Path to the file being processed
223
+ :param Optional[Dict[str, Any]] data: Parsed data from the file
224
+ :param Any assets_file: Open file handle for writing assets
225
+ :param Any findings_file: Open file handle for writing findings
226
+ :param ItemTracker asset_tracker: Tracker for asset counts
227
+ :param ItemTracker finding_tracker: Tracker for finding counts
228
+ :rtype: None
64
229
  """
65
- # Get a list of issues from xml node Issues
66
- if data := self.mapping.get_value(self.validater.data, "Issues", {}):
67
- issues_dict = data.get("Issue", [])
68
- for issue in issues_dict:
69
- yield from self.parse_asset(issue)
230
+ try:
231
+ logger.info(f"Processing file: {file}")
232
+ asset = self._prepare_asset(file, data)
233
+ self._write_asset_if_new(asset, assets_file, asset_tracker)
70
234
 
71
- def parse_asset(self, issue: dict) -> Iterator[IntegrationAsset]:
235
+ findings_data = self._get_findings_data_from_file(data)
236
+ logger.info(f"Found {len(findings_data)} findings in file: {file}")
237
+ findings_added = self._write_findings(findings_data, asset.identifier, findings_file, finding_tracker)
238
+
239
+ if findings_added > 0:
240
+ logger.info(f"Added {findings_added} new findings from file {file}")
241
+ except Exception as e:
242
+ logger.error(f"Error processing file {file}: {str(e)}")
243
+
244
+ def _prepare_asset(self, file: Union[Path, str], data: Optional[Dict[str, Any]]) -> IntegrationAsset:
245
+ """
246
+ Prepare and validate an asset from file data.
247
+
248
+ :param Union[Path, str] file: Path to the file being processed
249
+ :param Optional[Dict[str, Any]] data: Parsed data from the file
250
+ :return: Processed and validated asset object
251
+ :rtype: IntegrationAsset
252
+ """
253
+ asset = self.parse_asset(file, data)
254
+ asset_dict = dataclasses.asdict(asset)
255
+ if not self.disable_mapping and self.mapping:
256
+ mapped_asset_dict = self._apply_mapping(
257
+ data or {}, asset_dict, getattr(self.mapping, "fields", {}).get("asset_mapping", {})
258
+ )
259
+ mapped_asset = IntegrationAsset(**mapped_asset_dict)
260
+ else:
261
+ mapped_asset = asset
262
+ self._validate_fields(mapped_asset, self.required_asset_fields)
263
+ return mapped_asset
264
+
265
+ def _write_asset_if_new(self, asset: IntegrationAsset, assets_file: Any, tracker: "ItemTracker") -> None:
266
+ """
267
+ Write an asset to the file if it’s new.
268
+
269
+ :param IntegrationAsset asset: Asset object to write
270
+ :param Any assets_file: Open file handle for writing assets
271
+ :param ItemTracker tracker: Tracker for asset counts
272
+ :rtype: None
273
+ """
274
+ asset_key = asset.identifier
275
+ if asset_key not in tracker.existing_items:
276
+ assets_file.write(json.dumps(dataclasses.asdict(asset)) + "\n")
277
+ assets_file.flush()
278
+ tracker.existing_items[asset_key] = True
279
+ tracker.new_items += 1
280
+ tracker.total_items += 1
281
+ else:
282
+ logger.debug(f"Asset with identifier {asset_key} already exists, skipping")
283
+
284
+ def _write_findings(
285
+ self,
286
+ findings_data: List[Dict[str, Any]],
287
+ asset_id: str,
288
+ findings_file: Any,
289
+ tracker: "ItemTracker",
290
+ ) -> int:
291
+ """
292
+ Write new findings to the file and track counts.
293
+
294
+ :param List[Dict[str, Any]] findings_data: List of finding items
295
+ :param str asset_id: Identifier of the associated asset
296
+ :param Any findings_file: Open file handle for writing findings
297
+ :param ItemTracker tracker: Tracker for finding counts
298
+ :return: Number of new findings added
299
+ :rtype: int
300
+ """
301
+ findings_added = 0
302
+ for finding_item in findings_data:
303
+ finding = self.parse_finding(asset_id, {}, finding_item) # Pass empty dict for data if unused
304
+ finding_dict = dataclasses.asdict(finding)
305
+ if not self.disable_mapping and self.mapping:
306
+ mapped_finding_dict = self._apply_mapping(
307
+ finding_item, finding_dict, getattr(self.mapping, "fields", {}).get("finding_mapping", {})
308
+ )
309
+ mapped_finding = IntegrationFinding(**mapped_finding_dict)
310
+ else:
311
+ mapped_finding = finding
312
+ self._validate_fields(mapped_finding, self.required_finding_fields)
313
+
314
+ finding_key = self._get_item_key(dataclasses.asdict(mapped_finding), "finding")
315
+ if finding_key not in tracker.existing_items:
316
+ findings_file.write(json.dumps(dataclasses.asdict(mapped_finding)) + "\n")
317
+ findings_file.flush()
318
+ tracker.existing_items[finding_key] = True
319
+ tracker.new_items += 1
320
+ tracker.total_items += 1
321
+ findings_added += 1
322
+ else:
323
+ logger.debug(f"Finding with key {finding_key} already exists, skipping")
324
+ return findings_added
325
+
326
+ def _log_completion(self, new_count: int, output_file: str, item_type: str) -> None:
327
+ """
328
+ Log the completion of processing items.
329
+
330
+ :param int new_count: Number of new items added
331
+ :param str output_file: Path to the output file
332
+ :param str item_type: Type of items processed ('assets' or 'findings')
333
+ :rtype: None
334
+ """
335
+ logger.info(f"Added {new_count} new {item_type} to {output_file}")
336
+
337
+ def find_valid_files(self, path: Union[Path, str]) -> Iterator[Tuple[Union[Path, str], Dict[str, Any]]]:
338
+ """
339
+ Find all valid WebInspect scan files in the given path.
340
+
341
+ Overrides the parent method to handle XML files instead of JSON, passing content or path to is_valid_file.
342
+
343
+ :param Union[Path, str] path: Path to a file or directory (local or S3 URI)
344
+ :return: Iterator yielding tuples of (file_path, validated_data) for valid files
345
+ :rtype: Iterator[Tuple[Union[Path, str], Dict[str, Any]]]
346
+ """
347
+ files = find_files(path, self.file_pattern)
348
+ for file in files:
349
+ try:
350
+ if self.read_files_only:
351
+ content = read_file(file) # Get raw XML content as string
352
+ else:
353
+ content = file # Pass file path directly
354
+ is_valid, validated_data = self.is_valid_file(content, file)
355
+ if is_valid and validated_data is not None:
356
+ yield file, validated_data
357
+ except Exception as e:
358
+ logger.error(f"Error processing file {file}: {str(e)}")
359
+
360
+ def parse_asset(self, file_path: Union[Path, str], data: Dict[str, Any]) -> IntegrationAsset:
72
361
  """
73
- Parse the asset from an element
362
+ Parse a single asset from WebInspect scan data.
74
363
 
75
- :param dict issue: The Issue element
76
- :yields: IntegrationAsset
364
+ :param Union[Path, str] file_path: Path to the file containing the asset data
365
+ :param Dict[str, Any] data: The parsed data containing validater, mapping, and data
366
+ :return: IntegrationAsset object
367
+ :rtype: IntegrationAsset
77
368
  """
78
- host = issue.get("Host")
79
- if host:
80
- yield IntegrationAsset(name=host, asset_type="Other", asset_category="Hardware", identifier=host)
369
+ # Get the first issue to extract host information
370
+ issues = data.get("Issue", [])
371
+ if not issues:
372
+ # If no issues found, create a default asset based on the file name
373
+ file_name = os.path.basename(str(file_path))
374
+ return IntegrationAsset(
375
+ identifier=file_name,
376
+ name=file_name,
377
+ ip_address="0.0.0.0",
378
+ status=AssetStatus.Active,
379
+ asset_type="Other",
380
+ asset_category="Hardware",
381
+ parent_id=self.plan_id,
382
+ parent_module="securityplans",
383
+ )
384
+
385
+ # Get the host from the first issue
386
+ host = issues[0].get("Host", "Unknown Host")
387
+
388
+ # Create and return the asset
389
+ return IntegrationAsset(
390
+ identifier=host,
391
+ name=host,
392
+ ip_address="0.0.0.0", # Default IP address
393
+ status=AssetStatus.Active,
394
+ asset_type="Other",
395
+ asset_category="Hardware",
396
+ parent_id=self.plan_id,
397
+ parent_module="securityplans",
398
+ )
81
399
 
82
- def create_vuln(self, *args, **kwargs) -> Iterator[IntegrationFinding]:
400
+ def _get_findings_data_from_file(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
83
401
  """
84
- Fetches findings from the XML files
402
+ Extract findings data from WebInspect file data.
85
403
 
86
- :return: A list of findings
87
- :rtype: List[IntegrationFinding]
404
+ :param Dict[str, Any] data: The data from the WebInspect file
405
+ :return: List of finding items
406
+ :rtype: List[Dict[str, Any]]
88
407
  """
89
- # Get a list of issues from xml node Issues
90
- if data := self.mapping.get_value(self.validater.data, "Issues", {}):
91
- issues_dict = data.get("Issue", [])
92
- for issue in issues_dict:
93
- if res := self.parse_finding(issue):
94
- yield res
408
+ if not data or not isinstance(data, dict):
409
+ return []
410
+
411
+ # Get the issues from the data
412
+ issues = data.get("Issue", [])
413
+ if not isinstance(issues, list):
414
+ return []
415
+
416
+ # Filter out findings with severity levels we don't want to include
417
+ filtered_issues = []
418
+ for issue in issues:
419
+ severity_int = int(issue.get("Severity", 3))
420
+ severity_value = self.finding_severity_map.get(severity_int, IssueSeverity.High.value)
421
+
422
+ try:
423
+ severity = IssueSeverity(severity_value)
424
+ # Only include findings with certain severity levels
425
+ if severity in (IssueSeverity.Critical, IssueSeverity.High, IssueSeverity.Moderate, IssueSeverity.Low):
426
+ filtered_issues.append(issue)
427
+ except ValueError:
428
+ # Include by default if we can't determine severity
429
+ filtered_issues.append(issue)
430
+
431
+ return filtered_issues
95
432
 
96
433
  @staticmethod
97
434
  def _parse_report_section(sections: List[dict], section_name: str) -> str:
@@ -103,26 +440,40 @@ class WebInspect(FlatFileImporter):
103
440
  :return: Text from the specified section
104
441
  :rtype: str
105
442
  """
443
+ if not sections:
444
+ return ""
445
+
106
446
  return next((section.get("SectionText", "") for section in sections if section.get("Name") == section_name), "")
107
447
 
108
- def parse_finding(self, issue: dict) -> Optional[IntegrationFinding]:
448
+ def parse_finding(self, asset_identifier: str, data: Dict[str, Any], item: Dict[str, Any]) -> IntegrationFinding:
109
449
  """
110
- Parse the dict to an Integration Finding
450
+ Parse a single finding from WebInspect scan data.
111
451
 
112
- :param dict issue: The Issue element
113
- :returns The Integration Finding
114
- :rtype Optional[IntegrationFinding]
452
+ :param str asset_identifier: The identifier of the asset this finding belongs to
453
+ :param Dict[str, Any] data: The parsed data (not used here, kept for interface compatibility)
454
+ :param Dict[str, Any] item: The finding data
455
+ :return: IntegrationFinding object
456
+ :rtype: IntegrationFinding
115
457
  """
116
- severity_int = int(issue.get("Severity", 3))
117
- severity = self.finding_severity_map.get(severity_int, IssueSeverity.High)
118
- title = issue.get("Name", "")
119
- host = issue.get("Host", "")
120
- plugin_id = issue.get("VulnerabilityID", "")
458
+ severity_int = int(item.get("Severity", 3))
459
+ severity_value = self.finding_severity_map.get(severity_int, IssueSeverity.High.value)
460
+
461
+ try:
462
+ severity = IssueSeverity(severity_value)
463
+ except ValueError:
464
+ severity = IssueSeverity.High
465
+
466
+ title = item.get("Name", "")
467
+ host = item.get("Host", asset_identifier)
468
+ plugin_id = item.get("VulnerabilityID", "")
121
469
  external_id = str(host + plugin_id)
122
- sections = issue.get("ReportSection")
470
+ sections = item.get("ReportSection", [])
471
+
472
+ # Extract description and mitigation from report sections
123
473
  description = self._parse_report_section(sections, "Summary")
124
474
  mitigation = self._parse_report_section(sections, "Fix")
125
475
 
476
+ # Only create findings for certain severity levels
126
477
  if severity in (IssueSeverity.Critical, IssueSeverity.High, IssueSeverity.Moderate, IssueSeverity.Low):
127
478
  return IntegrationFinding(
128
479
  external_id=external_id,
@@ -132,12 +483,25 @@ class WebInspect(FlatFileImporter):
132
483
  status=IssueStatus.Open,
133
484
  title=title,
134
485
  severity=severity,
135
- severity_int=severity_int,
136
- category=f"{self.name} Vulnerability",
486
+ category=f"{self.title} Vulnerability",
137
487
  plugin_id=plugin_id,
138
488
  plugin_name=title,
139
489
  rule_id=plugin_id,
140
490
  recommendation_for_mitigation=mitigation,
141
- source_report=self.name,
491
+ source_report=self.title,
142
492
  )
143
- return None
493
+ # Return a default finding for severities we skip
494
+ return IntegrationFinding(
495
+ external_id=f"skip-{external_id}",
496
+ asset_identifier=host,
497
+ control_labels=[],
498
+ description="Skipped finding due to low severity",
499
+ status=IssueStatus.Closed,
500
+ title=f"Skipped: {title}",
501
+ severity=IssueSeverity.NotAssigned,
502
+ category=f"{self.title} Skipped",
503
+ plugin_id=plugin_id,
504
+ plugin_name=title,
505
+ rule_id=plugin_id,
506
+ source_report=self.title,
507
+ )