regscale-cli 6.20.3.0__py3-none-any.whl → 6.20.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/__init__.py +1 -1
- regscale/integrations/commercial/__init__.py +1 -0
- regscale/integrations/commercial/jira.py +35 -16
- regscale/integrations/commercial/qualys/__init__.py +298 -28
- regscale/integrations/commercial/qualys/qualys_error_handler.py +519 -0
- regscale/integrations/commercial/qualys/scanner.py +222 -97
- regscale/integrations/commercial/synqly/assets.py +11 -1
- regscale/integrations/commercial/synqly/edr.py +4 -4
- regscale/integrations/commercial/synqly/ticketing.py +1 -1
- regscale/integrations/commercial/synqly/vulnerabilities.py +2 -2
- regscale/integrations/public/fedramp/fedramp_cis_crm.py +72 -42
- regscale/models/app_models/import_validater.py +20 -2
- regscale/models/integration_models/cisa_kev_data.json +97 -9
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/synqly_models/param.py +1 -1
- regscale/models/regscale_models/task.py +0 -1
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/METADATA +13 -9
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/RECORD +22 -21
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.20.3.0.dist-info → regscale_cli-6.20.4.0.dist-info}/top_level.txt +0 -0
regscale/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "6.20.
|
|
1
|
+
__version__ = "6.20.4.0"
|
|
@@ -308,6 +308,7 @@ show_mapping(prisma, "prisma", "csv")
|
|
|
308
308
|
"get_asset_groups": "regscale.integrations.commercial.qualys.__init__.get_asset_groups",
|
|
309
309
|
"import_total_cloud_xml": "regscale.integrations.commercial.qualys.__init__.import_total_cloud_from_xml",
|
|
310
310
|
"import_total_cloud": "regscale.integrations.commercial.qualys.__init__.import_total_cloud",
|
|
311
|
+
"validate_csv": "regscale.integrations.commercial.qualys.__init__.validate_csv",
|
|
311
312
|
},
|
|
312
313
|
name="qualys",
|
|
313
314
|
)
|
|
@@ -460,7 +460,7 @@ def create_regscale_task_from_jira(config: dict, jira_issue: jiraIssue, parent_i
|
|
|
460
460
|
:rtype: Task
|
|
461
461
|
"""
|
|
462
462
|
description = jira_issue.fields.description
|
|
463
|
-
due_date = jira_issue
|
|
463
|
+
due_date = map_jira_due_date(jira_issue, config)
|
|
464
464
|
status = convert_task_status(jira_issue.fields.status.name)
|
|
465
465
|
status_change_date = convert_datetime_to_regscale_string(
|
|
466
466
|
datetime.strptime(jira_issue.fields.statuscategorychangedate, "%Y-%m-%dT%H:%M:%S.%f%z")
|
|
@@ -468,9 +468,6 @@ def create_regscale_task_from_jira(config: dict, jira_issue: jiraIssue, parent_i
|
|
|
468
468
|
title = jira_issue.fields.summary
|
|
469
469
|
date_closed = None
|
|
470
470
|
percent_complete = None
|
|
471
|
-
if not due_date:
|
|
472
|
-
delta = config["issues"]["jira"]["medium"]
|
|
473
|
-
due_date = convert_datetime_to_regscale_string(datetime.now() + timedelta(days=delta))
|
|
474
471
|
if status == "Closed":
|
|
475
472
|
date_closed = status_change_date
|
|
476
473
|
percent_complete = 100
|
|
@@ -534,23 +531,45 @@ def process_tasks_for_sync(
|
|
|
534
531
|
insert_tasks = []
|
|
535
532
|
update_tasks = []
|
|
536
533
|
close_tasks = []
|
|
534
|
+
|
|
535
|
+
# Create a map of existing tasks by their Jira key for easier lookup
|
|
536
|
+
# Only include tasks that have an otherIdentifier
|
|
537
|
+
existing_task_map = {
|
|
538
|
+
task.otherIdentifier: task
|
|
539
|
+
for task in existing_tasks
|
|
540
|
+
if hasattr(task, "otherIdentifier") and task.otherIdentifier
|
|
541
|
+
}
|
|
542
|
+
|
|
537
543
|
for jira_issue in jira_issues:
|
|
538
|
-
task
|
|
539
|
-
|
|
544
|
+
# Create a RegScale task from the Jira issue
|
|
545
|
+
jira_task = create_regscale_task_from_jira(config, jira_issue, parent_id, parent_module)
|
|
546
|
+
|
|
547
|
+
# Check if we have a matching task in RegScale
|
|
548
|
+
existing_task = existing_task_map.get(jira_issue.key)
|
|
549
|
+
|
|
540
550
|
if existing_task:
|
|
541
|
-
task
|
|
542
|
-
if
|
|
543
|
-
|
|
551
|
+
# Check if task is closed in Jira and needs to be closed in regscale
|
|
552
|
+
if jira_task.status in closed_statuses and existing_task.status not in closed_statuses:
|
|
553
|
+
existing_task.status = "Closed"
|
|
554
|
+
existing_task.percentComplete = 100
|
|
555
|
+
existing_task.dateClosed = safe_datetime_str(jira_issue.fields.statuscategorychangedate)
|
|
556
|
+
close_tasks.append(existing_task)
|
|
557
|
+
|
|
558
|
+
# Check if update needed
|
|
559
|
+
elif (
|
|
560
|
+
jira_task.title != existing_task.title
|
|
561
|
+
or jira_task.description != existing_task.description
|
|
562
|
+
or jira_task.status != existing_task.status
|
|
563
|
+
or (jira_task.dueDate != existing_task.dueDate and jira_issue.fields.duedate)
|
|
544
564
|
):
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
task.dateClosed = safe_datetime_str(jira_issue.fields.statuscategorychangedate)
|
|
548
|
-
close_tasks.append(task)
|
|
549
|
-
elif task != existing_task:
|
|
550
|
-
update_tasks.append(task)
|
|
565
|
+
jira_task.id = existing_task.id # Preserve the RegScale ID
|
|
566
|
+
update_tasks.append(jira_task)
|
|
551
567
|
else:
|
|
552
|
-
|
|
568
|
+
# Task only exists in Jira - needs to be created in RegScale
|
|
569
|
+
insert_tasks.append(jira_task)
|
|
570
|
+
|
|
553
571
|
progress.update(progress_task, advance=1)
|
|
572
|
+
|
|
554
573
|
return insert_tasks, update_tasks, close_tasks
|
|
555
574
|
|
|
556
575
|
|
|
@@ -28,6 +28,7 @@ from regscale.core.app.utils.app_utils import (
|
|
|
28
28
|
create_progress_object,
|
|
29
29
|
)
|
|
30
30
|
from regscale.core.app.utils.file_utils import download_from_s3
|
|
31
|
+
from regscale.integrations.commercial.qualys.qualys_error_handler import QualysErrorHandler
|
|
31
32
|
from regscale.integrations.commercial.qualys.scanner import QualysTotalCloudJSONLIntegration
|
|
32
33
|
from regscale.integrations.commercial.qualys.variables import QualysVariables
|
|
33
34
|
from regscale.integrations.scanner_integration import IntegrationAsset, IntegrationFinding
|
|
@@ -411,26 +412,52 @@ def _fetch_qualys_api_data(include_tags, exclude_tags):
|
|
|
411
412
|
:param str exclude_tags: Tags to exclude from the query
|
|
412
413
|
:return: Parsed XML data or None if request failed
|
|
413
414
|
"""
|
|
415
|
+
from regscale.integrations.commercial.qualys.qualys_error_handler import QualysErrorHandler
|
|
416
|
+
|
|
414
417
|
qualys_url, qualys_api = _get_qualys_api()
|
|
415
418
|
params = _prepare_qualys_params(include_tags, exclude_tags)
|
|
416
419
|
|
|
417
420
|
logger.info("Fetching Qualys Total Cloud data with JSONL scanner...")
|
|
418
|
-
response = qualys_api.get(
|
|
419
|
-
url=urljoin(qualys_url, "/api/2.0/fo/asset/host/vm/detection/"),
|
|
420
|
-
headers=HEADERS,
|
|
421
|
-
params=params,
|
|
422
|
-
verify=ScannerVariables.sslVerify, # Apply SSL verification setting
|
|
423
|
-
)
|
|
424
421
|
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
422
|
+
try:
|
|
423
|
+
response = qualys_api.get(
|
|
424
|
+
url=urljoin(qualys_url, "/api/2.0/fo/asset/host/vm/detection/"),
|
|
425
|
+
headers=HEADERS,
|
|
426
|
+
params=params,
|
|
427
|
+
verify=ScannerVariables.sslVerify, # Apply SSL verification setting
|
|
428
428
|
)
|
|
429
|
-
return None
|
|
430
429
|
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
430
|
+
# Use the error handler to validate the response
|
|
431
|
+
is_valid, error_message, parsed_data = QualysErrorHandler.validate_response(response)
|
|
432
|
+
|
|
433
|
+
if not is_valid:
|
|
434
|
+
logger.error(f"Qualys API request failed: {error_message}")
|
|
435
|
+
|
|
436
|
+
# If we have parsed data, extract detailed error information
|
|
437
|
+
if parsed_data:
|
|
438
|
+
error_details = QualysErrorHandler.extract_error_details(parsed_data)
|
|
439
|
+
QualysErrorHandler.log_error_details(error_details)
|
|
440
|
+
|
|
441
|
+
# Check if this is a retryable error
|
|
442
|
+
error_code = error_details.get("error_code")
|
|
443
|
+
if error_code and QualysErrorHandler.should_retry(error_code):
|
|
444
|
+
retry_after = error_details.get("retry_after", 60)
|
|
445
|
+
logger.warning(f"This error may be retryable. Consider retrying after {retry_after} seconds.")
|
|
446
|
+
|
|
447
|
+
# Check if this is a fatal error that should stop processing
|
|
448
|
+
if error_code and QualysErrorHandler.is_fatal_error(error_code):
|
|
449
|
+
logger.error("Fatal error encountered. Please check your credentials and permissions.")
|
|
450
|
+
|
|
451
|
+
return None
|
|
452
|
+
|
|
453
|
+
# Process API response
|
|
454
|
+
logger.info("Total cloud data fetched successfully. Processing with JSONL scanner...")
|
|
455
|
+
return parsed_data
|
|
456
|
+
|
|
457
|
+
except Exception as e:
|
|
458
|
+
logger.error(f"Unexpected error during Qualys API request: {e}")
|
|
459
|
+
logger.debug(traceback.format_exc())
|
|
460
|
+
return None
|
|
434
461
|
|
|
435
462
|
|
|
436
463
|
def _initialize_integration(regscale_ssp_id, response_data, vulnerability_creation, ssl_verify):
|
|
@@ -501,13 +528,34 @@ def import_total_cloud_from_xml(regscale_ssp_id: int, xml_file: str):
|
|
|
501
528
|
|
|
502
529
|
This command processes an existing XML file instead of making an API call, useful for testing.
|
|
503
530
|
"""
|
|
531
|
+
from regscale.integrations.commercial.qualys.qualys_error_handler import QualysErrorHandler
|
|
532
|
+
|
|
504
533
|
try:
|
|
505
534
|
logger.info(f"Processing Qualys Total Cloud XML file: {xml_file}")
|
|
506
535
|
|
|
507
|
-
# Parse the XML file
|
|
536
|
+
# Parse the XML file with error handling
|
|
508
537
|
with open(xml_file, "r") as f:
|
|
509
538
|
xml_content = f.read()
|
|
510
|
-
|
|
539
|
+
|
|
540
|
+
# Use the error handler to safely parse XML
|
|
541
|
+
success, response_data, error_message = QualysErrorHandler.parse_xml_safely(xml_content)
|
|
542
|
+
|
|
543
|
+
if not success:
|
|
544
|
+
logger.error(f"Failed to parse XML file: {error_message}")
|
|
545
|
+
|
|
546
|
+
# If we have partial data, try to extract error details
|
|
547
|
+
if response_data:
|
|
548
|
+
error_details = QualysErrorHandler.extract_error_details(response_data)
|
|
549
|
+
QualysErrorHandler.log_error_details(error_details)
|
|
550
|
+
|
|
551
|
+
return
|
|
552
|
+
|
|
553
|
+
# Check for Qualys-specific errors in the parsed data
|
|
554
|
+
error_details = QualysErrorHandler.extract_error_details(response_data)
|
|
555
|
+
if error_details.get("has_error"):
|
|
556
|
+
logger.error("XML file contains Qualys error response")
|
|
557
|
+
QualysErrorHandler.log_error_details(error_details)
|
|
558
|
+
return
|
|
511
559
|
|
|
512
560
|
# Initialize the JSONLScannerIntegration implementation
|
|
513
561
|
integration = QualysTotalCloudJSONLIntegration(
|
|
@@ -740,7 +788,25 @@ def import_scans(
|
|
|
740
788
|
aws_profile: str,
|
|
741
789
|
upload_file: bool,
|
|
742
790
|
):
|
|
743
|
-
"""
|
|
791
|
+
"""
|
|
792
|
+
Import vulnerability scans from Qualys CSV files.
|
|
793
|
+
|
|
794
|
+
This command processes Qualys CSV export files and imports assets and vulnerabilities
|
|
795
|
+
into RegScale. The CSV files must contain specific required headers.
|
|
796
|
+
|
|
797
|
+
TROUBLESHOOTING:
|
|
798
|
+
If you encounter "No columns to parse from file" errors, try:
|
|
799
|
+
1. Run 'regscale qualys validate_csv -f <file_path>' first
|
|
800
|
+
2. Adjust the --skip_rows parameter (default: 129)
|
|
801
|
+
3. Check that your CSV file has the required headers
|
|
802
|
+
|
|
803
|
+
REQUIRED HEADERS:
|
|
804
|
+
Severity, Title, Exploitability, CVE ID, Solution, DNS, IP,
|
|
805
|
+
QG Host ID, OS, NetBIOS, FQDN
|
|
806
|
+
|
|
807
|
+
For detailed format requirements, see the documentation at:
|
|
808
|
+
regscale/integrations/commercial/qualys/QUALYS_CSV_FORMAT.md
|
|
809
|
+
"""
|
|
744
810
|
import_qualys_scans(
|
|
745
811
|
folder_path=folder_path,
|
|
746
812
|
regscale_ssp_id=regscale_ssp_id,
|
|
@@ -1612,7 +1678,7 @@ def update_and_insert_assets(
|
|
|
1612
1678
|
qualysId=qualys_asset["ASSET_ID"],
|
|
1613
1679
|
)
|
|
1614
1680
|
# avoid duplication
|
|
1615
|
-
if r_asset.qualysId not in
|
|
1681
|
+
if r_asset.qualysId not in {v["qualysId"] for v in insert_assets}:
|
|
1616
1682
|
insert_assets.append(r_asset)
|
|
1617
1683
|
try:
|
|
1618
1684
|
created_assets = Asset.batch_create(insert_assets, job_progress)
|
|
@@ -1901,19 +1967,18 @@ def map_issue_data_to_assets(assets: list[dict], qualys_issue_data: dict) -> lis
|
|
|
1901
1967
|
|
|
1902
1968
|
def lookup_asset(asset_list: list, asset_id: str = None) -> list[Asset]:
|
|
1903
1969
|
"""
|
|
1904
|
-
|
|
1970
|
+
Look up assets in the asset list by Qualys asset ID or return all unique assets.
|
|
1971
|
+
|
|
1972
|
+
Args:
|
|
1973
|
+
asset_list (list): List of assets from RegScale.
|
|
1974
|
+
asset_id (str, optional): Qualys asset ID to search for. Defaults to None.
|
|
1905
1975
|
|
|
1906
|
-
:
|
|
1907
|
-
|
|
1908
|
-
:return: list of Asset objects
|
|
1909
|
-
:rtype: list[Asset]
|
|
1976
|
+
Returns:
|
|
1977
|
+
list[Asset]: List of unique Asset objects matching the asset_id, or all unique assets if asset_id is None.
|
|
1910
1978
|
"""
|
|
1911
1979
|
if asset_id:
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
results = [asset for asset in asset_list]
|
|
1915
|
-
# Return unique list
|
|
1916
|
-
return list(set(results)) or []
|
|
1980
|
+
return list({asset for asset in asset_list if getattr(asset, "qualysId", None) == asset_id})
|
|
1981
|
+
return list(set(asset_list)) or []
|
|
1917
1982
|
|
|
1918
1983
|
|
|
1919
1984
|
def map_qualys_severity_to_regscale(severity: int) -> tuple[str, str]:
|
|
@@ -2045,7 +2110,7 @@ def inner_join(reg_list: list, qualys_list: list) -> list:
|
|
|
2045
2110
|
:rtype: list
|
|
2046
2111
|
"""
|
|
2047
2112
|
|
|
2048
|
-
set1 =
|
|
2113
|
+
set1 = {getattr(asset, "qualysId", None) for asset in reg_list}
|
|
2049
2114
|
data = []
|
|
2050
2115
|
try:
|
|
2051
2116
|
data = [list_qualys for list_qualys in qualys_list if getattr(list_qualys, "ASSET_ID", None) in set1]
|
|
@@ -2079,6 +2144,211 @@ def get_asset_groups_from_qualys() -> list:
|
|
|
2079
2144
|
return asset_groups
|
|
2080
2145
|
|
|
2081
2146
|
|
|
2147
|
+
def _get_required_headers() -> list[str]:
|
|
2148
|
+
"""
|
|
2149
|
+
Get the list of required headers for Qualys CSV validation.
|
|
2150
|
+
|
|
2151
|
+
:return: List of required header names
|
|
2152
|
+
:rtype: list[str]
|
|
2153
|
+
"""
|
|
2154
|
+
return [
|
|
2155
|
+
"Severity",
|
|
2156
|
+
"Title",
|
|
2157
|
+
"Exploitability",
|
|
2158
|
+
"CVE ID",
|
|
2159
|
+
"Solution",
|
|
2160
|
+
"DNS",
|
|
2161
|
+
"IP",
|
|
2162
|
+
"QG Host ID",
|
|
2163
|
+
"OS",
|
|
2164
|
+
"NetBIOS",
|
|
2165
|
+
"FQDN",
|
|
2166
|
+
]
|
|
2167
|
+
|
|
2168
|
+
|
|
2169
|
+
def _read_csv_file(file_path: str, skip_rows: int, console):
|
|
2170
|
+
"""
|
|
2171
|
+
Read and validate CSV file structure.
|
|
2172
|
+
|
|
2173
|
+
:param str file_path: Path to the CSV file
|
|
2174
|
+
:param int skip_rows: Number of rows to skip
|
|
2175
|
+
:param console: Rich console instance for output
|
|
2176
|
+
:return: DataFrame if successful, None if failed
|
|
2177
|
+
:rtype: Optional[pd.DataFrame]
|
|
2178
|
+
"""
|
|
2179
|
+
import pandas as pd
|
|
2180
|
+
|
|
2181
|
+
console.print(f"[blue]Reading CSV file: {file_path}[/blue]")
|
|
2182
|
+
console.print(f"[blue]Skipping first {skip_rows} rows[/blue]")
|
|
2183
|
+
|
|
2184
|
+
try:
|
|
2185
|
+
if skip_rows > 0:
|
|
2186
|
+
df = pd.read_csv(file_path, skiprows=skip_rows - 1, on_bad_lines="warn")
|
|
2187
|
+
else:
|
|
2188
|
+
df = pd.read_csv(file_path, on_bad_lines="warn")
|
|
2189
|
+
|
|
2190
|
+
if df.empty:
|
|
2191
|
+
console.print("[red]❌ File is empty after skipping rows[/red]")
|
|
2192
|
+
return None
|
|
2193
|
+
|
|
2194
|
+
if len(df.columns) == 0:
|
|
2195
|
+
console.print("[red]❌ No columns found in the file[/red]")
|
|
2196
|
+
return None
|
|
2197
|
+
|
|
2198
|
+
console.print("[green]✅ Successfully read CSV file[/green]")
|
|
2199
|
+
console.print(f"[green]✅ Found {len(df.columns)} columns and {len(df)} rows[/green]")
|
|
2200
|
+
return df
|
|
2201
|
+
|
|
2202
|
+
except pd.errors.EmptyDataError:
|
|
2203
|
+
console.print("[red]❌ File is empty or contains no parseable data[/red]")
|
|
2204
|
+
console.print("[yellow]💡 Try adjusting the skip_rows parameter[/yellow]")
|
|
2205
|
+
return None
|
|
2206
|
+
except pd.errors.ParserError as e:
|
|
2207
|
+
console.print(f"[red]❌ Error parsing CSV file: {e}[/red]")
|
|
2208
|
+
console.print("[yellow]💡 Check if the file is properly formatted CSV[/yellow]")
|
|
2209
|
+
return None
|
|
2210
|
+
|
|
2211
|
+
|
|
2212
|
+
def _display_columns_table(df, console):
|
|
2213
|
+
"""
|
|
2214
|
+
Display a table showing all columns found in the CSV.
|
|
2215
|
+
|
|
2216
|
+
:param df: DataFrame containing the CSV data
|
|
2217
|
+
:param console: Rich console instance for output
|
|
2218
|
+
"""
|
|
2219
|
+
from rich.table import Table
|
|
2220
|
+
|
|
2221
|
+
table = Table(title="Columns Found in CSV")
|
|
2222
|
+
table.add_column("Index", style="cyan")
|
|
2223
|
+
table.add_column("Column Name", style="magenta")
|
|
2224
|
+
|
|
2225
|
+
for i, col in enumerate(df.columns):
|
|
2226
|
+
table.add_row(str(i), str(col))
|
|
2227
|
+
|
|
2228
|
+
console.print(table)
|
|
2229
|
+
|
|
2230
|
+
|
|
2231
|
+
def _check_required_headers(df, console):
|
|
2232
|
+
"""
|
|
2233
|
+
Check for required headers and display results.
|
|
2234
|
+
|
|
2235
|
+
:param df: DataFrame containing the CSV data
|
|
2236
|
+
:param console: Rich console instance for output
|
|
2237
|
+
:return: Tuple of (missing_headers, found_headers)
|
|
2238
|
+
:rtype: tuple[list[str], list[str]]
|
|
2239
|
+
"""
|
|
2240
|
+
required_headers = _get_required_headers()
|
|
2241
|
+
|
|
2242
|
+
console.print("\n[blue]Checking for required headers:[/blue]")
|
|
2243
|
+
missing_headers = []
|
|
2244
|
+
found_headers = []
|
|
2245
|
+
|
|
2246
|
+
for header in required_headers:
|
|
2247
|
+
if header in df.columns:
|
|
2248
|
+
found_headers.append(header)
|
|
2249
|
+
console.print(f"[green]✅ {header}[/green]")
|
|
2250
|
+
else:
|
|
2251
|
+
missing_headers.append(header)
|
|
2252
|
+
console.print(f"[red]❌ {header}[/red]")
|
|
2253
|
+
|
|
2254
|
+
return missing_headers, found_headers
|
|
2255
|
+
|
|
2256
|
+
|
|
2257
|
+
def _display_header_validation_summary(missing_headers, console):
|
|
2258
|
+
"""
|
|
2259
|
+
Display summary of header validation results.
|
|
2260
|
+
|
|
2261
|
+
:param list[str] missing_headers: List of missing required headers
|
|
2262
|
+
:param console: Rich console instance for output
|
|
2263
|
+
"""
|
|
2264
|
+
if missing_headers:
|
|
2265
|
+
console.print(f"\n[yellow]⚠️ Missing {len(missing_headers)} required headers[/yellow]")
|
|
2266
|
+
console.print("[yellow]You may need to adjust the skip_rows parameter or check your file format[/yellow]")
|
|
2267
|
+
else:
|
|
2268
|
+
console.print("\n[green]🎉 All required headers found! File should import successfully.[/green]")
|
|
2269
|
+
|
|
2270
|
+
|
|
2271
|
+
def _display_sample_data(df, console):
|
|
2272
|
+
"""
|
|
2273
|
+
Display sample data from the CSV file.
|
|
2274
|
+
|
|
2275
|
+
:param df: DataFrame containing the CSV data
|
|
2276
|
+
:param console: Rich console instance for output
|
|
2277
|
+
"""
|
|
2278
|
+
from rich.table import Table
|
|
2279
|
+
|
|
2280
|
+
if len(df) == 0:
|
|
2281
|
+
return
|
|
2282
|
+
|
|
2283
|
+
console.print("\n[blue]Sample data (first 3 rows):[/blue]")
|
|
2284
|
+
sample_table = Table()
|
|
2285
|
+
|
|
2286
|
+
# Add columns (limit to first 5 for readability)
|
|
2287
|
+
display_cols = list(df.columns)[:5]
|
|
2288
|
+
for col in display_cols:
|
|
2289
|
+
sample_table.add_column(str(col)[:20], style="cyan")
|
|
2290
|
+
|
|
2291
|
+
# Add rows
|
|
2292
|
+
for i in range(min(3, len(df))):
|
|
2293
|
+
row_data = [str(df.iloc[i][col])[:30] for col in display_cols]
|
|
2294
|
+
sample_table.add_row(*row_data)
|
|
2295
|
+
|
|
2296
|
+
console.print(sample_table)
|
|
2297
|
+
|
|
2298
|
+
if len(df.columns) > 5:
|
|
2299
|
+
console.print(f"[dim]... and {len(df.columns) - 5} more columns[/dim]")
|
|
2300
|
+
|
|
2301
|
+
|
|
2302
|
+
@qualys.command(name="validate_csv")
|
|
2303
|
+
@click.option(
|
|
2304
|
+
"--file_path",
|
|
2305
|
+
"-f",
|
|
2306
|
+
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
|
|
2307
|
+
required=True,
|
|
2308
|
+
help="Path to the Qualys CSV file to validate.",
|
|
2309
|
+
prompt="Enter the path to the Qualys CSV file",
|
|
2310
|
+
)
|
|
2311
|
+
@click.option(
|
|
2312
|
+
"--skip_rows",
|
|
2313
|
+
type=click.INT,
|
|
2314
|
+
help="The number of rows in the file to skip to get to the column headers, defaults to 129.",
|
|
2315
|
+
default=129,
|
|
2316
|
+
)
|
|
2317
|
+
def validate_csv(file_path: str, skip_rows: int):
|
|
2318
|
+
"""
|
|
2319
|
+
Validate a Qualys CSV file format and headers before importing.
|
|
2320
|
+
|
|
2321
|
+
This command helps troubleshoot CSV file issues by checking:
|
|
2322
|
+
- File readability
|
|
2323
|
+
- Proper column headers after skipping rows
|
|
2324
|
+
- Required headers presence
|
|
2325
|
+
"""
|
|
2326
|
+
from rich.console import Console
|
|
2327
|
+
|
|
2328
|
+
console = Console()
|
|
2329
|
+
|
|
2330
|
+
try:
|
|
2331
|
+
# Read and validate CSV file
|
|
2332
|
+
df = _read_csv_file(file_path, skip_rows, console)
|
|
2333
|
+
if df is None:
|
|
2334
|
+
return
|
|
2335
|
+
|
|
2336
|
+
# Display columns found
|
|
2337
|
+
_display_columns_table(df, console)
|
|
2338
|
+
|
|
2339
|
+
# Check required headers
|
|
2340
|
+
missing_headers, _ = _check_required_headers(df, console)
|
|
2341
|
+
|
|
2342
|
+
# Display validation summary
|
|
2343
|
+
_display_header_validation_summary(missing_headers, console)
|
|
2344
|
+
|
|
2345
|
+
# Show sample data
|
|
2346
|
+
_display_sample_data(df, console)
|
|
2347
|
+
|
|
2348
|
+
except Exception as e:
|
|
2349
|
+
console.print(f"[red]❌ Unexpected error: {e}[/red]")
|
|
2350
|
+
|
|
2351
|
+
|
|
2082
2352
|
__all__ = [
|
|
2083
2353
|
"QualysTotalCloudJSONLIntegration",
|
|
2084
2354
|
"import_total_cloud",
|