devsecops-engine-tools 1.101.1__py3-none-any.whl → 1.102.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devsecops-engine-tools might be problematic. Click here for more details.
- devsecops_engine_tools/engine_core/src/applications/runner_engine_core.py +19 -4
- devsecops_engine_tools/engine_core/src/domain/model/finding.py +9 -1
- devsecops_engine_tools/engine_core/src/domain/usecases/break_build.py +11 -7
- devsecops_engine_tools/engine_core/src/infrastructure/driven_adapters/azure/azure_devops.py +3 -1
- devsecops_engine_tools/engine_core/src/infrastructure/driven_adapters/defect_dojo/defect_dojo.py +88 -74
- devsecops_engine_tools/engine_core/src/infrastructure/driven_adapters/github/github_actions.py +3 -1
- devsecops_engine_tools/engine_core/src/infrastructure/driven_adapters/printer_pretty_table/printer_pretty_table.py +14 -7
- devsecops_engine_tools/engine_core/src/infrastructure/driven_adapters/runtime_local/runtime_local.py +1 -0
- devsecops_engine_tools/engine_sast/engine_code/src/applications/runner_engine_code.py +17 -1
- devsecops_engine_tools/engine_sast/engine_code/src/domain/model/gateways/tool_gateway.py +24 -11
- devsecops_engine_tools/engine_sast/engine_code/src/domain/usecases/code_scan.py +100 -37
- devsecops_engine_tools/engine_sast/engine_code/src/infrastructure/driven_adapters/bearer/bearer_deserealizator.py +9 -4
- devsecops_engine_tools/engine_sast/engine_code/src/infrastructure/driven_adapters/kiuwan/__init__.py +0 -0
- devsecops_engine_tools/engine_sast/engine_code/src/infrastructure/driven_adapters/kiuwan/kiuwan_deserealizator.py +59 -0
- devsecops_engine_tools/engine_sast/engine_code/src/infrastructure/driven_adapters/kiuwan/kiuwan_tool.py +537 -0
- devsecops_engine_tools/engine_sast/engine_secret/src/infrastructure/driven_adapters/gitleaks/gitleaks_tool.py +1 -1
- devsecops_engine_tools/engine_utilities/azuredevops/models/AzurePredefinedVariables.py +3 -0
- devsecops_engine_tools/engine_utilities/github/models/GithubPredefinedVariables.py +3 -0
- devsecops_engine_tools/version.py +1 -1
- {devsecops_engine_tools-1.101.1.dist-info → devsecops_engine_tools-1.102.0.dist-info}/METADATA +1 -1
- {devsecops_engine_tools-1.101.1.dist-info → devsecops_engine_tools-1.102.0.dist-info}/RECORD +24 -21
- {devsecops_engine_tools-1.101.1.dist-info → devsecops_engine_tools-1.102.0.dist-info}/WHEEL +0 -0
- {devsecops_engine_tools-1.101.1.dist-info → devsecops_engine_tools-1.102.0.dist-info}/entry_points.txt +0 -0
- {devsecops_engine_tools-1.101.1.dist-info → devsecops_engine_tools-1.102.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,537 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import subprocess
|
|
4
|
+
import platform
|
|
5
|
+
import time
|
|
6
|
+
import fnmatch
|
|
7
|
+
import urllib.request
|
|
8
|
+
import zipfile
|
|
9
|
+
import stat
|
|
10
|
+
import shutil
|
|
11
|
+
from typing import Dict, Any, List, Optional, Union
|
|
12
|
+
from urllib.parse import urlparse
|
|
13
|
+
|
|
14
|
+
import requests
|
|
15
|
+
from requests.exceptions import RequestException, Timeout, ConnectionError
|
|
16
|
+
|
|
17
|
+
from devsecops_engine_tools.engine_sast.engine_code.src.domain.model.config_tool import ConfigTool
|
|
18
|
+
from devsecops_engine_tools.engine_sast.engine_code.src.domain.model.gateways.tool_gateway import ToolGateway
|
|
19
|
+
from devsecops_engine_tools.engine_utilities.utils.logger_info import MyLogger
|
|
20
|
+
from devsecops_engine_tools.engine_utilities import settings
|
|
21
|
+
from devsecops_engine_tools.engine_core.src.domain.model.finding import EngineCodeFinding
|
|
22
|
+
from devsecops_engine_tools.engine_sast.engine_code.src.infrastructure.driven_adapters.kiuwan.kiuwan_deserealizator import KiuwanDeserealizator
|
|
23
|
+
|
|
24
|
+
logger = MyLogger.__call__(**settings.SETTING_LOGGER).get_logger()
|
|
25
|
+
|
|
26
|
+
class KiuwanTool(ToolGateway):
|
|
27
|
+
|
|
28
|
+
"""
|
|
29
|
+
Kiuwan class to make analysis. This class install kiuwan CLI in different OS(Linux/MacOs/Windows).
|
|
30
|
+
This class make an analysis and promote to baseline if the specified requirements are supply.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, config: Dict[str, Any]):
|
|
34
|
+
self.user: str = config.get("user_engine_code", "")
|
|
35
|
+
self.password: str = config.get("token_engine_code", "")
|
|
36
|
+
self.base_url: str = config.get("host_engine_code", "")
|
|
37
|
+
self.build_execution_id = config.get("build_execution_id", "")
|
|
38
|
+
self.source_branch_name: str = config.get("source_branch_name", "")
|
|
39
|
+
self.target_branch: str = config.get("target_branch", "")
|
|
40
|
+
self.build_task: str = config.get("build_task", "")
|
|
41
|
+
self.modelo_regla: dict = config.get("MODELOS", {}).get(self.build_task, "General")
|
|
42
|
+
self.domain_id: str = config.get("domain_id_engine_code", "")
|
|
43
|
+
self.headers = {"X-KW-CORPORATE-DOMAIN-ID": self.domain_id}
|
|
44
|
+
self.working_directory: str = os.getcwd()
|
|
45
|
+
self.kiuwan_agent_path: Optional[str] = self._find_or_download_kiuwan_agent()
|
|
46
|
+
self.repository_name: str = ""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def run_tool(
|
|
50
|
+
self,
|
|
51
|
+
folder_to_scan: str,
|
|
52
|
+
pull_request_files: List[str],
|
|
53
|
+
agent_work_folder: str,
|
|
54
|
+
repository: str,
|
|
55
|
+
config_tool: ConfigTool
|
|
56
|
+
) -> tuple[List[Any], Optional[str]]:
|
|
57
|
+
"""
|
|
58
|
+
Run the code scan tool.
|
|
59
|
+
"""
|
|
60
|
+
# Validar target branch
|
|
61
|
+
if not self._validate_target_branch(config_tool):
|
|
62
|
+
logger.warning("Target branch %s is not allowed for analysis", self.target_branch)
|
|
63
|
+
return [], None
|
|
64
|
+
|
|
65
|
+
# Configurar el repositorio desde el parámetro
|
|
66
|
+
self.repository_name = repository
|
|
67
|
+
|
|
68
|
+
# Preparar el directorio de escaneo
|
|
69
|
+
scan_directory = self._prepare_scan_directory(
|
|
70
|
+
folder_to_scan,
|
|
71
|
+
pull_request_files,
|
|
72
|
+
agent_work_folder,
|
|
73
|
+
config_tool
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
logger.info(
|
|
77
|
+
"""== Context: ==
|
|
78
|
+
- Repository: %s
|
|
79
|
+
- Source branch: %s
|
|
80
|
+
- Target branch: %s
|
|
81
|
+
- Scan directory: %s
|
|
82
|
+
== == == ==""",
|
|
83
|
+
self.repository_name,
|
|
84
|
+
self.source_branch_name,
|
|
85
|
+
self.target_branch,
|
|
86
|
+
scan_directory
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
analysis_type = self._determine_analysis_type()
|
|
90
|
+
logger.info("Analysis selected: %s\n", analysis_type)
|
|
91
|
+
logger.info("Analysis %s started", analysis_type)
|
|
92
|
+
|
|
93
|
+
self._execute_kiuwan_scan(analysis_type, scan_directory)
|
|
94
|
+
self._validate_results(analysis_type)
|
|
95
|
+
last_analysis = self._fetch_last_analysis()
|
|
96
|
+
self._promote_to_baseline(last_analysis)
|
|
97
|
+
if not last_analysis:
|
|
98
|
+
last_analysis = self._fetch_last_analysis()
|
|
99
|
+
defects = self._fetch_defects_for_analysis(last_analysis.get("analysisCode", ""))
|
|
100
|
+
findings= self._map_defects_to_findings(last_analysis, defects, last_analysis.get("analysisCode", ""), config_tool.data["KIUWAN"]["SEVERITY"])
|
|
101
|
+
|
|
102
|
+
defects_file_path = self._download_kiuwan_csv_official(last_analysis.get("analysisCode", ""), "kiuwan_findings.csv")
|
|
103
|
+
|
|
104
|
+
return findings, defects_file_path
|
|
105
|
+
|
|
106
|
+
def _validate_target_branch(self, config_tool: ConfigTool) -> bool:
|
|
107
|
+
"""Validate if the target branch is allowed for analysis."""
|
|
108
|
+
target_branches = config_tool.target_branches
|
|
109
|
+
if target_branches and self.target_branch not in target_branches:
|
|
110
|
+
return False
|
|
111
|
+
return True
|
|
112
|
+
|
|
113
|
+
def _prepare_scan_directory(
|
|
114
|
+
self,
|
|
115
|
+
folder_to_scan: str,
|
|
116
|
+
pull_request_files: List[str],
|
|
117
|
+
agent_work_folder: str,
|
|
118
|
+
config_tool: ConfigTool
|
|
119
|
+
) -> str:
|
|
120
|
+
"""Prepare the directory that will be scanned by Kiuwan."""
|
|
121
|
+
|
|
122
|
+
# Crear directorio temporal para el escaneo
|
|
123
|
+
temp_scan_dir = os.path.join(agent_work_folder, "temp_folder_to_scan")
|
|
124
|
+
os.makedirs(temp_scan_dir, exist_ok=True)
|
|
125
|
+
|
|
126
|
+
if folder_to_scan is None:
|
|
127
|
+
# Copiar solo los archivos del PR
|
|
128
|
+
logger.info("Copying PR files to temporary scan directory")
|
|
129
|
+
for file_path in pull_request_files:
|
|
130
|
+
if os.path.exists(file_path):
|
|
131
|
+
# Mantener la estructura de directorios
|
|
132
|
+
relative_path = os.path.relpath(file_path, self.working_directory)
|
|
133
|
+
dest_path = os.path.join(temp_scan_dir, relative_path)
|
|
134
|
+
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
|
135
|
+
shutil.copy2(file_path, dest_path)
|
|
136
|
+
else:
|
|
137
|
+
# Copiar todo el folder_to_scan
|
|
138
|
+
logger.info("Copying folder_to_scan to temporary scan directory")
|
|
139
|
+
if os.path.exists(folder_to_scan):
|
|
140
|
+
shutil.copytree(folder_to_scan, temp_scan_dir, dirs_exist_ok=True)
|
|
141
|
+
|
|
142
|
+
# Manejar exclusiones
|
|
143
|
+
if config_tool.exclude_folder:
|
|
144
|
+
self._handle_exclusions(temp_scan_dir, config_tool.exclude_folder, agent_work_folder)
|
|
145
|
+
|
|
146
|
+
return temp_scan_dir
|
|
147
|
+
|
|
148
|
+
def _handle_exclusions(self, scan_dir: str, exclude_folders: List[str], agent_work_folder: str):
|
|
149
|
+
"""Move excluded files/folders to a separate directory."""
|
|
150
|
+
|
|
151
|
+
exclude_dir = os.path.join(agent_work_folder, "exclude_to_scan")
|
|
152
|
+
os.makedirs(exclude_dir, exist_ok=True)
|
|
153
|
+
|
|
154
|
+
logger.info("Processing exclusions: %s", exclude_folders)
|
|
155
|
+
|
|
156
|
+
for exclude_pattern in exclude_folders:
|
|
157
|
+
# Buscar archivos/carpetas que coincidan con el patrón
|
|
158
|
+
for root, dirs, files in os.walk(scan_dir):
|
|
159
|
+
# Verificar directorios
|
|
160
|
+
for dir_name in dirs[:]: # Usar slice copy para modificar durante iteración
|
|
161
|
+
if self._matches_exclude_pattern(dir_name, exclude_pattern):
|
|
162
|
+
source_path = os.path.join(root, dir_name)
|
|
163
|
+
relative_path = os.path.relpath(source_path, scan_dir)
|
|
164
|
+
dest_path = os.path.join(exclude_dir, relative_path)
|
|
165
|
+
|
|
166
|
+
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
|
167
|
+
shutil.move(source_path, dest_path)
|
|
168
|
+
dirs.remove(dir_name) # No procesar este directorio más
|
|
169
|
+
logger.info("Excluded directory: %s", relative_path)
|
|
170
|
+
|
|
171
|
+
# Verificar archivos
|
|
172
|
+
for file_name in files[:]:
|
|
173
|
+
if self._matches_exclude_pattern(file_name, exclude_pattern):
|
|
174
|
+
source_path = os.path.join(root, file_name)
|
|
175
|
+
relative_path = os.path.relpath(source_path, scan_dir)
|
|
176
|
+
dest_path = os.path.join(exclude_dir, relative_path)
|
|
177
|
+
|
|
178
|
+
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
|
|
179
|
+
shutil.move(source_path, dest_path)
|
|
180
|
+
logger.info("Excluded file: %s", relative_path)
|
|
181
|
+
|
|
182
|
+
def _matches_exclude_pattern(self, item_name: str, pattern: str) -> bool:
|
|
183
|
+
"""Check if an item matches the exclusion pattern."""
|
|
184
|
+
return fnmatch.fnmatch(item_name, pattern) or pattern in item_name
|
|
185
|
+
|
|
186
|
+
def _determine_analysis_type(self) -> str:
|
|
187
|
+
"""Determine if baseline or delivery analysis is needed."""
|
|
188
|
+
url = f"{self.base_url}/applications/list?applicationName={self.repository_name}"
|
|
189
|
+
try:
|
|
190
|
+
response = requests.get(url, headers=self.headers, auth=(self.user, self.password), timeout=60)
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
apps = response.json()
|
|
193
|
+
app_names = [app["name"] for app in apps]
|
|
194
|
+
return "baseline" if self.repository_name not in app_names else "delivery"
|
|
195
|
+
except (requests.RequestException, ValueError) as e:
|
|
196
|
+
raise RuntimeError(f"Failed to determine analysis type in engine code: {e}") from e
|
|
197
|
+
|
|
198
|
+
def _execute_kiuwan_scan(self, analysis_type: str, scan_directory: str) -> Union[subprocess.CompletedProcess, Dict[str, Any]]:
|
|
199
|
+
"""Execute Kiuwan baseline or delivery analysis."""
|
|
200
|
+
|
|
201
|
+
cmd = [
|
|
202
|
+
self.kiuwan_agent_path,
|
|
203
|
+
"--user", self.user,
|
|
204
|
+
"--pass", self.password,
|
|
205
|
+
"--domain-id", self.domain_id,
|
|
206
|
+
"--softwareName", self.repository_name,
|
|
207
|
+
"--sourcePath", scan_directory, # Usar el directorio preparado
|
|
208
|
+
"--label", self.build_execution_id,
|
|
209
|
+
"--wait-for-results",
|
|
210
|
+
"--model-name", self.modelo_regla
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
if analysis_type == "baseline":
|
|
214
|
+
cmd.extend(["--create", "--analysis-scope", "baseline"])
|
|
215
|
+
else:
|
|
216
|
+
cmd.extend(["--analysis-scope", "completeDelivery", "--change-request", "inprogress",
|
|
217
|
+
"--branch-name", self.source_branch_name])
|
|
218
|
+
|
|
219
|
+
logger.info("Kiuwan analysis will be executed using model %s", self.modelo_regla)
|
|
220
|
+
try:
|
|
221
|
+
result = subprocess.run(cmd, capture_output=True, text=True, check=True, errors="ignore")
|
|
222
|
+
logger.info("Scan results: %s", result)
|
|
223
|
+
return result
|
|
224
|
+
except subprocess.CalledProcessError as e:
|
|
225
|
+
error = {"status": "failed", "output": e.stderr}
|
|
226
|
+
logger.error("Scan results: %s", error)
|
|
227
|
+
return error
|
|
228
|
+
|
|
229
|
+
def _validate_results(self, analysis_type: str):
|
|
230
|
+
|
|
231
|
+
"""Validate analysis results."""
|
|
232
|
+
try:
|
|
233
|
+
if analysis_type == "baseline":
|
|
234
|
+
logger.info("Validate Baseline Results with applicationBusinessValue")
|
|
235
|
+
url = f"{self.base_url}/applications/list?applicationName={self.repository_name}"
|
|
236
|
+
response = requests.get(url, headers=self.headers, auth=(self.user, self.password), timeout=60)
|
|
237
|
+
response.raise_for_status()
|
|
238
|
+
application_business_value = response.json()[0].get("applicationBusinessValue")
|
|
239
|
+
if application_business_value in ["CRITICAL", "HIGH"]:
|
|
240
|
+
logger.warning("Baseline analysis failed: Business Value is %s", application_business_value)
|
|
241
|
+
else:
|
|
242
|
+
logger.info("Validate Delivery Results with AuditResult")
|
|
243
|
+
url = f"{self.base_url}/applications/deliveries?application={self.repository_name}"
|
|
244
|
+
response = requests.get(url, headers=self.headers, auth=(self.user, self.password), timeout=60)
|
|
245
|
+
response.raise_for_status()
|
|
246
|
+
results = response.json()
|
|
247
|
+
|
|
248
|
+
for result in results:
|
|
249
|
+
audit_result = result.get("auditResult", "")
|
|
250
|
+
if audit_result != "OK":
|
|
251
|
+
logger.warning("Delivery analysis failed: Audit Result is %s", audit_result)
|
|
252
|
+
|
|
253
|
+
except (subprocess.CalledProcessError, ValueError) as e:
|
|
254
|
+
logger.error("Analysis result failed:\nRepository: %s", self.repository_name)
|
|
255
|
+
raise RuntimeError(f"Validation analysis results failed: {e}") from e
|
|
256
|
+
|
|
257
|
+
def _fetch_last_analysis(self) -> Dict[str, Any]:
|
|
258
|
+
"""
|
|
259
|
+
Fetch the last analysis for the repository and wait until it is finished.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Dictionary containing the last analysis data.
|
|
263
|
+
|
|
264
|
+
Raises:
|
|
265
|
+
RuntimeError: If there's an error fetching the analysis or if the response is invalid.
|
|
266
|
+
"""
|
|
267
|
+
last_analysis_url = f"{self.base_url}/applications/last_analysis?application={self.repository_name}"
|
|
268
|
+
logger.info("Getting last analysis...")
|
|
269
|
+
max_tries = 10
|
|
270
|
+
tried = 0
|
|
271
|
+
try:
|
|
272
|
+
while tried <= max_tries:
|
|
273
|
+
response = requests.get(
|
|
274
|
+
last_analysis_url,
|
|
275
|
+
headers=self.headers,
|
|
276
|
+
auth=(self.user, self.password),
|
|
277
|
+
timeout=60,
|
|
278
|
+
)
|
|
279
|
+
response.raise_for_status()
|
|
280
|
+
last_analysis = response.json()
|
|
281
|
+
|
|
282
|
+
if last_analysis.get("analysisStatus") == "FINISHED":
|
|
283
|
+
return last_analysis
|
|
284
|
+
|
|
285
|
+
logger.info("Analysis status %s, waiting status FINISHED", last_analysis.get('analysisStatus'))
|
|
286
|
+
time.sleep(5)
|
|
287
|
+
tried += 1
|
|
288
|
+
return None
|
|
289
|
+
except (requests.RequestException, ValueError) as e:
|
|
290
|
+
raise RuntimeError(f"Failed to fetch last analysis: {e}") from e
|
|
291
|
+
|
|
292
|
+
def _promote_to_baseline(self, last_analysis):
|
|
293
|
+
|
|
294
|
+
"""Promote delivery to baseline if on master branch."""
|
|
295
|
+
|
|
296
|
+
if self.target_branch == "master" or not last_analysis:
|
|
297
|
+
logger.info("Promoting delivery to baseline...")
|
|
298
|
+
cmd = [
|
|
299
|
+
self.kiuwan_agent_path,
|
|
300
|
+
"--promote-to-baseline",
|
|
301
|
+
"--user", self.user,
|
|
302
|
+
"--pass", self.password,
|
|
303
|
+
"--domain-id", self.domain_id,
|
|
304
|
+
"--softwareName", self.repository_name,
|
|
305
|
+
"--change-request", "inprogress",
|
|
306
|
+
"--label", self.build_execution_id,
|
|
307
|
+
]
|
|
308
|
+
try:
|
|
309
|
+
subprocess.run(cmd, capture_output=True, text=True, check=True, errors="ignore")
|
|
310
|
+
logger.info("Promotion completed")
|
|
311
|
+
except subprocess.CalledProcessError as e:
|
|
312
|
+
logger.error("Promotion failed: %s", e)
|
|
313
|
+
|
|
314
|
+
else:
|
|
315
|
+
logger.info("No promotion needed")
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def _fetch_defects_for_analysis(self, analysis_code: str) -> Dict[str, Any]:
|
|
319
|
+
"""
|
|
320
|
+
Fetch all defects for a given analysis code, handling pagination.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
analysis_code: The code of the analysis to fetch defects for.
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Dictionary containing all defects and metadata.
|
|
327
|
+
|
|
328
|
+
Raises:
|
|
329
|
+
RuntimeError: If there's an error fetching defects or if the response is invalid.
|
|
330
|
+
"""
|
|
331
|
+
base_defects_url = f"{self.base_url}/apps/analysis/{analysis_code}/defects"
|
|
332
|
+
first_defects_page = "?page=1&count=5000"
|
|
333
|
+
last_analysis_defects_url = base_defects_url + first_defects_page
|
|
334
|
+
|
|
335
|
+
logger.info("Getting defects...")
|
|
336
|
+
try:
|
|
337
|
+
response = requests.get(
|
|
338
|
+
last_analysis_defects_url,
|
|
339
|
+
headers=self.headers,
|
|
340
|
+
auth=(self.user, self.password),
|
|
341
|
+
timeout=60,
|
|
342
|
+
)
|
|
343
|
+
response.raise_for_status()
|
|
344
|
+
last_analysis_defects: Dict[str, Any] = response.json()
|
|
345
|
+
|
|
346
|
+
all_defects = last_analysis_defects.get("defects", [])
|
|
347
|
+
total_defects = last_analysis_defects.get("defects_count", 0)
|
|
348
|
+
|
|
349
|
+
if total_defects > 5000:
|
|
350
|
+
total_pages = (total_defects + 4999) // 5000
|
|
351
|
+
for page in range(2, total_pages + 1):
|
|
352
|
+
paginated_url = f"{base_defects_url}?page={page}&count=5000"
|
|
353
|
+
logger.info("Fetching page %s of defects...", page)
|
|
354
|
+
response = requests.get(
|
|
355
|
+
paginated_url,
|
|
356
|
+
headers=self.headers,
|
|
357
|
+
auth=(self.user, self.password),
|
|
358
|
+
timeout=60,
|
|
359
|
+
)
|
|
360
|
+
if response.status_code == 200:
|
|
361
|
+
page_data = response.json()
|
|
362
|
+
all_defects.extend(page_data.get("defects", []))
|
|
363
|
+
else:
|
|
364
|
+
logger.warning("Failed to fetch page %s: %s", page, response.status_code)
|
|
365
|
+
|
|
366
|
+
last_analysis_defects["defects"] = all_defects
|
|
367
|
+
return last_analysis_defects
|
|
368
|
+
|
|
369
|
+
except (requests.RequestException, ValueError) as e:
|
|
370
|
+
raise RuntimeError(f"Failed to fetch defects: {e}") from e
|
|
371
|
+
|
|
372
|
+
def _download_kiuwan_csv_official(self, analysis_code: str, output_path: str = "kiuwan_findings.csv") -> str:
|
|
373
|
+
"""
|
|
374
|
+
Download the official Kiuwan SAST CSV report using the vulnerabilities/export API.
|
|
375
|
+
Compatible with DefectDojo's 'Kiuwan Scan' parser.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
analysis_code (str): The analysis code to export.
|
|
379
|
+
output_path (str): Path to save the CSV file.
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
str: Path to the downloaded CSV file.
|
|
383
|
+
"""
|
|
384
|
+
csv_url = (
|
|
385
|
+
f"{self.base_url}/applications/analysis/vulnerabilities/export?"
|
|
386
|
+
f"application={self.repository_name}&code={analysis_code}&type=CSV"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
logger.info("Downloading official Kiuwan CSV from: %s", csv_url)
|
|
391
|
+
response = requests.get(
|
|
392
|
+
csv_url,
|
|
393
|
+
auth=(self.user, self.password),
|
|
394
|
+
headers=self.headers,
|
|
395
|
+
timeout=60,
|
|
396
|
+
stream=True
|
|
397
|
+
)
|
|
398
|
+
response.raise_for_status()
|
|
399
|
+
|
|
400
|
+
if 'text/csv' not in response.headers.get('Content-Type', ''):
|
|
401
|
+
logger.warning("Response Content-Type is not CSV: %s", response.headers.get('Content-Type'))
|
|
402
|
+
|
|
403
|
+
with open(output_path, "wb") as f:
|
|
404
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
405
|
+
f.write(chunk)
|
|
406
|
+
|
|
407
|
+
logger.info("Official Kiuwan CSV downloaded successfully: %s", output_path)
|
|
408
|
+
return output_path
|
|
409
|
+
|
|
410
|
+
except RequestException as e:
|
|
411
|
+
logger.error("Failed to download Kiuwan CSV: %s", e)
|
|
412
|
+
raise RuntimeError(f"Error downloading Kiuwan CSV from {csv_url}: {e}") from e
|
|
413
|
+
|
|
414
|
+
def _map_defects_to_findings(
|
|
415
|
+
self,
|
|
416
|
+
last_analysis: Dict[str, Any],
|
|
417
|
+
defects_data: Dict[str, Any],
|
|
418
|
+
analysis_code: str,
|
|
419
|
+
severity_mapper: Dict[str,str]
|
|
420
|
+
) -> List[EngineCodeFinding]:
|
|
421
|
+
return KiuwanDeserealizator.get_findings(last_analysis, defects_data, analysis_code, severity_mapper)
|
|
422
|
+
|
|
423
|
+
def _find_or_download_kiuwan_agent(self) -> str:
|
|
424
|
+
"""Ensure Kiuwan agent is available and return its path."""
|
|
425
|
+
|
|
426
|
+
system = platform.system()
|
|
427
|
+
agent_script = {
|
|
428
|
+
"Windows": "agent.cmd",
|
|
429
|
+
"Linux": "agent.sh",
|
|
430
|
+
"Darwin": "agent.sh" # macOS
|
|
431
|
+
}.get(system)
|
|
432
|
+
|
|
433
|
+
if not agent_script:
|
|
434
|
+
raise RuntimeError(f"Unsupported OS: {system}")
|
|
435
|
+
|
|
436
|
+
agent_path = self._search_agent_script(agent_script)
|
|
437
|
+
if agent_path:
|
|
438
|
+
return agent_path
|
|
439
|
+
|
|
440
|
+
self._download_and_extract_kiuwan()
|
|
441
|
+
agent_path = self._search_agent_script(agent_script)
|
|
442
|
+
|
|
443
|
+
if not agent_path:
|
|
444
|
+
raise FileNotFoundError(f"{agent_script} was not found.")
|
|
445
|
+
|
|
446
|
+
logger.info("Kiuwan agent path is: %s", agent_path)
|
|
447
|
+
return agent_path
|
|
448
|
+
|
|
449
|
+
def _search_agent_script(self, script_name: str) -> str:
|
|
450
|
+
"""Search for the Kiuwan script in the tools directory."""
|
|
451
|
+
for root, _, files in os.walk(self.working_directory):
|
|
452
|
+
if script_name in files:
|
|
453
|
+
return os.path.join(root, script_name)
|
|
454
|
+
return ""
|
|
455
|
+
|
|
456
|
+
def _set_execution_permissions(self, extracted_files):
|
|
457
|
+
|
|
458
|
+
"""Set execution permissions to extracted files"""
|
|
459
|
+
|
|
460
|
+
# Set execution permissions on .sh files if on Unix-like system
|
|
461
|
+
if platform.system().lower() in ["linux", "darwin"]:
|
|
462
|
+
for file_path in extracted_files:
|
|
463
|
+
if file_path.endswith(".sh"):
|
|
464
|
+
try:
|
|
465
|
+
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IEXEC)
|
|
466
|
+
logger.info("Execution permissions granted to: %s", file_path)
|
|
467
|
+
except Exception as e:
|
|
468
|
+
logger.warning("Failed to set execution permissions for %s : %s", file_path, e)
|
|
469
|
+
|
|
470
|
+
def _download_and_extract_kiuwan(self):
|
|
471
|
+
"""Download and extract the Kiuwan CLI, flattening the internal folder structure and setting execution permissions."""
|
|
472
|
+
kiuwan_url = "https://www.kiuwan.com/pub/analyzer/KiuwanLocalAnalyzer.zip"
|
|
473
|
+
zip_path = os.path.join(self.working_directory, "KiuwanLocalAnalyzer.zip")
|
|
474
|
+
|
|
475
|
+
try:
|
|
476
|
+
|
|
477
|
+
parsed_url = urlparse(kiuwan_url)
|
|
478
|
+
if parsed_url.scheme != "https":
|
|
479
|
+
raise ValueError("Only HTTPS URLs are allowed for downloading Kiuwan CLI.")
|
|
480
|
+
|
|
481
|
+
logger.info("Downloading Kiuwan CLI...")
|
|
482
|
+
urllib.request.urlretrieve(kiuwan_url, zip_path) # nosec
|
|
483
|
+
|
|
484
|
+
extracted_files = self._extract_zip(zip_path)
|
|
485
|
+
|
|
486
|
+
os.remove(zip_path)
|
|
487
|
+
logger.info("Kiuwan CLI extracted successfully into the root tools directory.")
|
|
488
|
+
|
|
489
|
+
self._set_execution_permissions(extracted_files)
|
|
490
|
+
|
|
491
|
+
except Exception as e:
|
|
492
|
+
raise RuntimeError(f"Error downloading or extracting the Kiuwan agent: {e}") from e
|
|
493
|
+
|
|
494
|
+
def _extract_zip(self, zip_path) -> list:
|
|
495
|
+
|
|
496
|
+
"""Extract zip files"""
|
|
497
|
+
|
|
498
|
+
extracted_files = []
|
|
499
|
+
|
|
500
|
+
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
|
501
|
+
for member in zip_ref.namelist():
|
|
502
|
+
if member.startswith("KiuwanLocalAnalyzer/") and not member.endswith("/"):
|
|
503
|
+
relative_path = os.path.relpath(member, "KiuwanLocalAnalyzer")
|
|
504
|
+
target_path = os.path.join(self.working_directory, relative_path)
|
|
505
|
+
|
|
506
|
+
Path(os.path.dirname(target_path)).mkdir(parents=True, exist_ok=True)
|
|
507
|
+
with zip_ref.open(member) as source, open(target_path, "wb") as target:
|
|
508
|
+
shutil.copyfileobj(source, target)
|
|
509
|
+
extracted_files.append(target_path)
|
|
510
|
+
|
|
511
|
+
return extracted_files
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def get_kiuwan_instance(dict_args: Dict, devops_platform_gateway) -> KiuwanTool:
|
|
515
|
+
"""
|
|
516
|
+
Create a kiuwan instance to scan
|
|
517
|
+
"""
|
|
518
|
+
logger.info("Retrieving kiuwan configuration file...")
|
|
519
|
+
kiuwan_config_tool = devops_platform_gateway.get_remote_config(
|
|
520
|
+
dict_args["remote_config_repo"],
|
|
521
|
+
"/engine_sast/engine_code/ConfigTool.json",
|
|
522
|
+
dict_args["remote_config_branch"]
|
|
523
|
+
)
|
|
524
|
+
logger.info("Kiuwan configuration file retrieved")
|
|
525
|
+
logger.info("Settings config dictionary to scan tool...")
|
|
526
|
+
config = {
|
|
527
|
+
"host_engine_code": kiuwan_config_tool["KIUWAN"]["SERVER"]["BASE_URL"],
|
|
528
|
+
"user_engine_code": kiuwan_config_tool["KIUWAN"]["SERVER"]["USER"],
|
|
529
|
+
"domain_id_engine_code": kiuwan_config_tool["KIUWAN"]["SERVER"]["DOMAIN_ID"],
|
|
530
|
+
"token_engine_code": dict_args["token_engine_code"],
|
|
531
|
+
"build_execution_id": devops_platform_gateway.get_variable("build_execution_id"),
|
|
532
|
+
"source_branch_name": devops_platform_gateway.get_variable("branch_name"),
|
|
533
|
+
"target_branch": devops_platform_gateway.get_variable("target_branch"),
|
|
534
|
+
"build_task": devops_platform_gateway.get_variable("build_task"),
|
|
535
|
+
"MODELOS": kiuwan_config_tool["KIUWAN"]["MODELOS"]
|
|
536
|
+
}
|
|
537
|
+
return KiuwanTool(config)
|
|
@@ -67,7 +67,7 @@ class GitleaksTool(ToolGateway):
|
|
|
67
67
|
json.dump(combined_data, f, ensure_ascii=False, indent=4)
|
|
68
68
|
|
|
69
69
|
def _check_path(self, path, excluded_paths):
|
|
70
|
-
parts = path.split(
|
|
70
|
+
parts = [p for p in path.replace('\\', '/').split('/') if p]
|
|
71
71
|
for part in parts:
|
|
72
72
|
if part in excluded_paths: return True
|
|
73
73
|
return False
|
|
@@ -1 +1 @@
|
|
|
1
|
-
version = '1.
|
|
1
|
+
version = '1.102.0'
|