pyattackforge 0.1.0__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyattackforge/__init__.py +0 -0
- pyattackforge/client.py +924 -340
- pyattackforge/prev_client.py +384 -0
- pyattackforge-0.1.3.dist-info/METADATA +281 -0
- pyattackforge-0.1.3.dist-info/RECORD +8 -0
- {pyattackforge-0.1.0.dist-info → pyattackforge-0.1.3.dist-info}/WHEEL +0 -0
- {pyattackforge-0.1.0.dist-info → pyattackforge-0.1.3.dist-info}/licenses/LICENSE +661 -661
- {pyattackforge-0.1.0.dist-info → pyattackforge-0.1.3.dist-info}/top_level.txt +0 -0
- pyattackforge-0.1.0.dist-info/METADATA +0 -120
- pyattackforge-0.1.0.dist-info/RECORD +0 -7
pyattackforge/client.py
CHANGED
|
@@ -1,340 +1,924 @@
|
|
|
1
|
-
"""
|
|
2
|
-
PyAttackForge is free software: you can redistribute it and/or modify
|
|
3
|
-
it under the terms of the GNU Affero General Public License as published by
|
|
4
|
-
the Free Software Foundation, either version 3 of the License, or
|
|
5
|
-
(at your option) any later version.
|
|
6
|
-
|
|
7
|
-
PyAttackForge is distributed in the hope that it will be useful,
|
|
8
|
-
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9
|
-
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10
|
-
GNU Affero General Public License for more details.
|
|
11
|
-
|
|
12
|
-
You should have received a copy of the GNU Affero General Public License
|
|
13
|
-
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
import
|
|
17
|
-
import
|
|
18
|
-
|
|
19
|
-
from
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
Args:
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
"
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
"
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
"
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
dict:
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
if
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
if
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
if
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
1
|
+
"""
|
|
2
|
+
PyAttackForge is free software: you can redistribute it and/or modify
|
|
3
|
+
it under the terms of the GNU Affero General Public License as published by
|
|
4
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
5
|
+
(at your option) any later version.
|
|
6
|
+
|
|
7
|
+
PyAttackForge is distributed in the hope that it will be useful,
|
|
8
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10
|
+
GNU Affero General Public License for more details.
|
|
11
|
+
|
|
12
|
+
You should have received a copy of the GNU Affero General Public License
|
|
13
|
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
import requests
|
|
18
|
+
import logging
|
|
19
|
+
from datetime import datetime, timezone, timedelta
|
|
20
|
+
from typing import Any, Dict, Optional, Set, Tuple, List
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger("pyattackforge")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class PyAttackForgeClient:
|
|
27
|
+
"""
|
|
28
|
+
Python client for interacting with the AttackForge API.
|
|
29
|
+
|
|
30
|
+
Provides methods to manage assets, projects, and vulnerabilities.
|
|
31
|
+
Supports dry-run mode for testing without making real API calls.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def upsert_finding_for_project(
|
|
35
|
+
self,
|
|
36
|
+
project_id: str,
|
|
37
|
+
title: str,
|
|
38
|
+
affected_assets: list,
|
|
39
|
+
priority: str,
|
|
40
|
+
likelihood_of_exploitation: int,
|
|
41
|
+
description: str,
|
|
42
|
+
attack_scenario: str,
|
|
43
|
+
remediation_recommendation: str,
|
|
44
|
+
steps_to_reproduce: str,
|
|
45
|
+
tags: Optional[list] = None,
|
|
46
|
+
notes: Optional[list] = None,
|
|
47
|
+
is_zeroday: bool = False,
|
|
48
|
+
is_visible: bool = True,
|
|
49
|
+
import_to_library: Optional[str] = None,
|
|
50
|
+
import_source: Optional[str] = None,
|
|
51
|
+
import_source_id: Optional[str] = None,
|
|
52
|
+
custom_fields: Optional[list] = None,
|
|
53
|
+
linked_testcases: Optional[list] = None,
|
|
54
|
+
custom_tags: Optional[list] = None,
|
|
55
|
+
writeup_custom_fields: Optional[list] = None,
|
|
56
|
+
) -> Dict[str, Any]:
|
|
57
|
+
"""
|
|
58
|
+
Create or update a finding for a project. If a finding with the same title and writeup exists,
|
|
59
|
+
append the assets and notes; otherwise, create a new finding.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
project_id (str): The project ID.
|
|
63
|
+
title (str): The title of the finding.
|
|
64
|
+
affected_assets (list): List of affected asset objects or names.
|
|
65
|
+
priority (str): The priority (e.g., "Critical").
|
|
66
|
+
likelihood_of_exploitation (int): Likelihood of exploitation (e.g., 10).
|
|
67
|
+
description (str): Description of the finding.
|
|
68
|
+
attack_scenario (str): Attack scenario details.
|
|
69
|
+
remediation_recommendation (str): Remediation recommendation.
|
|
70
|
+
steps_to_reproduce (str): Steps to reproduce the finding.
|
|
71
|
+
tags (list, optional): List of tags.
|
|
72
|
+
notes (list, optional): List of notes.
|
|
73
|
+
is_zeroday (bool, optional): Whether this is a zero-day finding.
|
|
74
|
+
is_visible (bool, optional): Whether the finding is visible.
|
|
75
|
+
import_to_library (str, optional): Library to import to.
|
|
76
|
+
import_source (str, optional): Source of import.
|
|
77
|
+
import_source_id (str, optional): Source ID for import.
|
|
78
|
+
custom_fields (list, optional): List of custom fields.
|
|
79
|
+
linked_testcases (list, optional): List of linked testcases.
|
|
80
|
+
custom_tags (list, optional): List of custom tags.
|
|
81
|
+
writeup_custom_fields (list, optional): List of custom fields for the writeup.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
dict: The created or updated finding.
|
|
85
|
+
"""
|
|
86
|
+
# Ensure all assets exist before proceeding
|
|
87
|
+
asset_names = []
|
|
88
|
+
for asset in affected_assets:
|
|
89
|
+
name = asset["name"] if isinstance(asset, dict) and "name" in asset else asset
|
|
90
|
+
asset_obj = self.get_asset_by_name(name)
|
|
91
|
+
#if not asset_obj:
|
|
92
|
+
# try:
|
|
93
|
+
# asset_obj = self.create_asset({"name": name})
|
|
94
|
+
# except Exception as e:
|
|
95
|
+
# raise RuntimeError(f"Asset '{name}' does not exist and could not be created: {e}")
|
|
96
|
+
asset_names.append(name)
|
|
97
|
+
|
|
98
|
+
# Fetch all findings for the project
|
|
99
|
+
findings = self.get_findings_for_project(project_id)
|
|
100
|
+
print(f"[DEBUG] get_findings_for_project({project_id}) returned {len(findings)} findings:")
|
|
101
|
+
for f in findings:
|
|
102
|
+
print(f" - id={f.get('vulnerability_id')}, title={f.get('vulnerability_title')}, steps_to_reproduce={f.get('vulnerability_steps_to_reproduce')}")
|
|
103
|
+
print(f" FULL FINDING: {f}")
|
|
104
|
+
match = None
|
|
105
|
+
for f in findings:
|
|
106
|
+
if f.get("vulnerability_title") == title:
|
|
107
|
+
match = f
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
if match:
|
|
111
|
+
# Update the existing finding: append assets and notes if not present
|
|
112
|
+
updated_assets = set()
|
|
113
|
+
if "vulnerability_affected_assets" in match:
|
|
114
|
+
for asset in match["vulnerability_affected_assets"]:
|
|
115
|
+
# Handle nested asset structure from API
|
|
116
|
+
if isinstance(asset, dict):
|
|
117
|
+
if "asset" in asset and isinstance(asset["asset"], dict) and "name" in asset["asset"]:
|
|
118
|
+
updated_assets.add(asset["asset"]["name"])
|
|
119
|
+
elif "name" in asset:
|
|
120
|
+
updated_assets.add(asset["name"])
|
|
121
|
+
elif isinstance(asset, str):
|
|
122
|
+
updated_assets.add(asset)
|
|
123
|
+
elif "vulnerability_affected_asset_name" in match:
|
|
124
|
+
updated_assets.add(match["vulnerability_affected_asset_name"])
|
|
125
|
+
updated_assets.update(asset_names)
|
|
126
|
+
# Append notes
|
|
127
|
+
existing_notes = match.get("vulnerability_notes", [])
|
|
128
|
+
new_notes = notes or []
|
|
129
|
+
# Avoid duplicate notes
|
|
130
|
+
note_texts = {n["note"] for n in existing_notes if "note" in n}
|
|
131
|
+
for n in new_notes:
|
|
132
|
+
if isinstance(n, dict) and "note" in n:
|
|
133
|
+
if n["note"] not in note_texts:
|
|
134
|
+
existing_notes.append(n)
|
|
135
|
+
note_texts.add(n["note"])
|
|
136
|
+
elif isinstance(n, str):
|
|
137
|
+
if n not in note_texts:
|
|
138
|
+
existing_notes.append({"note": n, "type": "PLAINTEXT"})
|
|
139
|
+
note_texts.add(n)
|
|
140
|
+
# Prepare update payload
|
|
141
|
+
update_payload = {
|
|
142
|
+
"affected_assets": [{"assetName": n} for n in updated_assets],
|
|
143
|
+
"notes": existing_notes,
|
|
144
|
+
"project_id": project_id,
|
|
145
|
+
}
|
|
146
|
+
# Actually update the finding in the backend
|
|
147
|
+
resp = self._request("put", f"/api/ss/vulnerability/{match['vulnerability_id']}", json_data=update_payload)
|
|
148
|
+
if resp.status_code not in (200, 201):
|
|
149
|
+
raise RuntimeError(f"Failed to update finding: {resp.text}")
|
|
150
|
+
return {
|
|
151
|
+
"action": "update",
|
|
152
|
+
"existing_finding_id": match["vulnerability_id"],
|
|
153
|
+
"update_payload": update_payload,
|
|
154
|
+
"api_response": resp.json(),
|
|
155
|
+
}
|
|
156
|
+
else:
|
|
157
|
+
# No match, create a new finding
|
|
158
|
+
# Ensure all asset payloads use 'assetName'
|
|
159
|
+
assets_payload = []
|
|
160
|
+
for asset in affected_assets:
|
|
161
|
+
if isinstance(asset, dict) and "name" in asset:
|
|
162
|
+
assets_payload.append({"assetName": asset["name"]})
|
|
163
|
+
else:
|
|
164
|
+
assets_payload.append({"assetName": asset})
|
|
165
|
+
result = self.create_vulnerability(
|
|
166
|
+
project_id=project_id,
|
|
167
|
+
title=title,
|
|
168
|
+
affected_assets=assets_payload,
|
|
169
|
+
priority=priority,
|
|
170
|
+
likelihood_of_exploitation=likelihood_of_exploitation,
|
|
171
|
+
description=description,
|
|
172
|
+
attack_scenario=attack_scenario,
|
|
173
|
+
remediation_recommendation=remediation_recommendation,
|
|
174
|
+
steps_to_reproduce=steps_to_reproduce,
|
|
175
|
+
tags=tags,
|
|
176
|
+
notes=notes,
|
|
177
|
+
is_zeroday=is_zeroday,
|
|
178
|
+
is_visible=is_visible,
|
|
179
|
+
import_to_library=import_to_library,
|
|
180
|
+
import_source=import_source,
|
|
181
|
+
import_source_id=import_source_id,
|
|
182
|
+
custom_fields=custom_fields,
|
|
183
|
+
linked_testcases=linked_testcases,
|
|
184
|
+
custom_tags=custom_tags,
|
|
185
|
+
writeup_custom_fields=writeup_custom_fields,
|
|
186
|
+
)
|
|
187
|
+
return {
|
|
188
|
+
"action": "create",
|
|
189
|
+
"result": result,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
def get_findings_for_project(self, project_id: str, priority: Optional[str] = None) -> list:
|
|
193
|
+
"""
|
|
194
|
+
Fetch all findings/vulnerabilities for a given project.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
project_id (str): The project ID.
|
|
198
|
+
priority (str, optional): Filter by priority (e.g., "Critical"). Defaults to None.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
list: List of finding/vulnerability dicts.
|
|
202
|
+
"""
|
|
203
|
+
params = {}
|
|
204
|
+
if priority:
|
|
205
|
+
params["priority"] = priority
|
|
206
|
+
resp = self._request("get", f"/api/ss/project/{project_id}/vulnerabilities", params=params)
|
|
207
|
+
if resp.status_code != 200:
|
|
208
|
+
raise RuntimeError(f"Failed to fetch findings: {resp.text}")
|
|
209
|
+
# The response may have a "vulnerabilities" key or be a list directly
|
|
210
|
+
data = resp.json()
|
|
211
|
+
if isinstance(data, dict) and "vulnerabilities" in data:
|
|
212
|
+
return data["vulnerabilities"]
|
|
213
|
+
elif isinstance(data, list):
|
|
214
|
+
return data
|
|
215
|
+
else:
|
|
216
|
+
return []
|
|
217
|
+
|
|
218
|
+
def get_vulnerability(self, vulnerability_id: str) -> Dict[str, Any]:
|
|
219
|
+
"""
|
|
220
|
+
Retrieve a single vulnerability by ID.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
vulnerability_id (str): The vulnerability ID.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
dict: Vulnerability details.
|
|
227
|
+
"""
|
|
228
|
+
if not vulnerability_id:
|
|
229
|
+
raise ValueError("Missing required field: vulnerability_id")
|
|
230
|
+
resp = self._request("get", f"/api/ss/vulnerability/{vulnerability_id}")
|
|
231
|
+
if resp.status_code != 200:
|
|
232
|
+
raise RuntimeError(f"Failed to fetch vulnerability: {resp.text}")
|
|
233
|
+
data = resp.json()
|
|
234
|
+
if isinstance(data, dict) and "vulnerability" in data:
|
|
235
|
+
return data["vulnerability"]
|
|
236
|
+
return data
|
|
237
|
+
|
|
238
|
+
def add_note_to_finding(
|
|
239
|
+
self,
|
|
240
|
+
vulnerability_id: str,
|
|
241
|
+
note: Any,
|
|
242
|
+
note_type: str = "PLAINTEXT"
|
|
243
|
+
) -> Dict[str, Any]:
|
|
244
|
+
"""
|
|
245
|
+
Append a note to an existing finding.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
vulnerability_id (str): The vulnerability ID.
|
|
249
|
+
note (str or dict): Note text or note object with a 'note' key.
|
|
250
|
+
note_type (str): Note type when passing a plain string (default: "PLAINTEXT").
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
dict: API response.
|
|
254
|
+
"""
|
|
255
|
+
if not vulnerability_id:
|
|
256
|
+
raise ValueError("Missing required field: vulnerability_id")
|
|
257
|
+
if note is None or note == "":
|
|
258
|
+
raise ValueError("Missing required field: note")
|
|
259
|
+
if isinstance(note, dict):
|
|
260
|
+
note_text = note.get("note")
|
|
261
|
+
note_entry = note
|
|
262
|
+
else:
|
|
263
|
+
note_text = str(note)
|
|
264
|
+
note_entry = {"note": note_text, "type": note_type}
|
|
265
|
+
if not note_text:
|
|
266
|
+
raise ValueError("Note text cannot be empty")
|
|
267
|
+
try:
|
|
268
|
+
vuln = self.get_vulnerability(vulnerability_id)
|
|
269
|
+
existing_notes = (
|
|
270
|
+
vuln.get("vulnerability_notes")
|
|
271
|
+
or vuln.get("notes")
|
|
272
|
+
or []
|
|
273
|
+
) if isinstance(vuln, dict) else []
|
|
274
|
+
except Exception as exc: # pragma: no cover - best-effort fetch
|
|
275
|
+
logger.warning(
|
|
276
|
+
"Unable to fetch existing vulnerability notes; proceeding with provided note only: %s",
|
|
277
|
+
exc
|
|
278
|
+
)
|
|
279
|
+
existing_notes = []
|
|
280
|
+
collected_notes = []
|
|
281
|
+
note_texts = set()
|
|
282
|
+
for n in existing_notes:
|
|
283
|
+
if isinstance(n, dict) and "note" in n:
|
|
284
|
+
if n["note"] in note_texts:
|
|
285
|
+
continue
|
|
286
|
+
collected_notes.append(n)
|
|
287
|
+
note_texts.add(n["note"])
|
|
288
|
+
if note_entry.get("note") not in note_texts:
|
|
289
|
+
collected_notes.append(note_entry)
|
|
290
|
+
payload = {"notes": collected_notes}
|
|
291
|
+
resp = self._request("put", f"/api/ss/vulnerability/{vulnerability_id}", json_data=payload)
|
|
292
|
+
if resp.status_code not in (200, 201):
|
|
293
|
+
raise RuntimeError(f"Failed to add note: {resp.text}")
|
|
294
|
+
return resp.json()
|
|
295
|
+
|
|
296
|
+
def upload_finding_evidence(self, vulnerability_id: str, file_path: str) -> Dict[str, Any]:
|
|
297
|
+
"""
|
|
298
|
+
Upload evidence to a finding/vulnerability.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
vulnerability_id (str): The vulnerability ID.
|
|
302
|
+
file_path (str): Path to the evidence file.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
dict: API response.
|
|
306
|
+
"""
|
|
307
|
+
if not vulnerability_id:
|
|
308
|
+
raise ValueError("Missing required field: vulnerability_id")
|
|
309
|
+
if not file_path:
|
|
310
|
+
raise ValueError("Missing required field: file_path")
|
|
311
|
+
if not os.path.isfile(file_path):
|
|
312
|
+
raise FileNotFoundError(f"Evidence file not found: {file_path}")
|
|
313
|
+
endpoint = f"/api/ss/vulnerability/{vulnerability_id}/evidence"
|
|
314
|
+
if self.dry_run:
|
|
315
|
+
resp = self._request("post", endpoint)
|
|
316
|
+
return resp.json()
|
|
317
|
+
with open(file_path, "rb") as evidence:
|
|
318
|
+
resp = self._request(
|
|
319
|
+
"post",
|
|
320
|
+
endpoint,
|
|
321
|
+
files={"file": (os.path.basename(file_path), evidence)}
|
|
322
|
+
)
|
|
323
|
+
if resp.status_code not in (200, 201):
|
|
324
|
+
raise RuntimeError(f"Evidence upload failed: {resp.text}")
|
|
325
|
+
return resp.json()
|
|
326
|
+
|
|
327
|
+
def upload_testcase_evidence(
|
|
328
|
+
self,
|
|
329
|
+
project_id: str,
|
|
330
|
+
testcase_id: str,
|
|
331
|
+
file_path: str
|
|
332
|
+
) -> Dict[str, Any]:
|
|
333
|
+
"""
|
|
334
|
+
Upload evidence to a testcase.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
project_id (str): The project ID.
|
|
338
|
+
testcase_id (str): The testcase ID.
|
|
339
|
+
file_path (str): Path to the evidence file.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
dict: API response.
|
|
343
|
+
"""
|
|
344
|
+
if not project_id:
|
|
345
|
+
raise ValueError("Missing required field: project_id")
|
|
346
|
+
if not testcase_id:
|
|
347
|
+
raise ValueError("Missing required field: testcase_id")
|
|
348
|
+
if not file_path:
|
|
349
|
+
raise ValueError("Missing required field: file_path")
|
|
350
|
+
if not os.path.isfile(file_path):
|
|
351
|
+
raise FileNotFoundError(f"Evidence file not found: {file_path}")
|
|
352
|
+
endpoint = f"/api/ss/project/{project_id}/testcase/{testcase_id}/file"
|
|
353
|
+
if self.dry_run:
|
|
354
|
+
resp = self._request("post", endpoint)
|
|
355
|
+
return resp.json()
|
|
356
|
+
with open(file_path, "rb") as evidence:
|
|
357
|
+
resp = self._request(
|
|
358
|
+
"post",
|
|
359
|
+
endpoint,
|
|
360
|
+
files={"file": (os.path.basename(file_path), evidence)}
|
|
361
|
+
)
|
|
362
|
+
if resp.status_code not in (200, 201):
|
|
363
|
+
raise RuntimeError(f"Testcase evidence upload failed: {resp.text}")
|
|
364
|
+
return resp.json()
|
|
365
|
+
|
|
366
|
+
def assign_findings_to_testcase(
|
|
367
|
+
self,
|
|
368
|
+
project_id: str,
|
|
369
|
+
testcase_id: str,
|
|
370
|
+
vulnerability_ids: List[str],
|
|
371
|
+
existing_linked_vulnerabilities: Optional[List[str]] = None,
|
|
372
|
+
additional_fields: Optional[Dict[str, Any]] = None
|
|
373
|
+
) -> Dict[str, Any]:
|
|
374
|
+
"""
|
|
375
|
+
Assign one or more findings to a testcase.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
project_id (str): The project ID.
|
|
379
|
+
testcase_id (str): The testcase ID.
|
|
380
|
+
vulnerability_ids (list): List of vulnerability IDs to assign.
|
|
381
|
+
existing_linked_vulnerabilities (list, optional): Existing linked vulnerability IDs to merge with.
|
|
382
|
+
additional_fields (dict, optional): Additional testcase fields to include (e.g., status, tags).
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
dict: API response.
|
|
386
|
+
"""
|
|
387
|
+
if not project_id:
|
|
388
|
+
raise ValueError("Missing required field: project_id")
|
|
389
|
+
if not testcase_id:
|
|
390
|
+
raise ValueError("Missing required field: testcase_id")
|
|
391
|
+
if not vulnerability_ids:
|
|
392
|
+
raise ValueError("vulnerability_ids must contain at least one ID")
|
|
393
|
+
payload = additional_fields.copy() if additional_fields else {}
|
|
394
|
+
merged_ids = []
|
|
395
|
+
seen = set()
|
|
396
|
+
for vid in (existing_linked_vulnerabilities or []) + vulnerability_ids:
|
|
397
|
+
if vid and vid not in seen:
|
|
398
|
+
merged_ids.append(vid)
|
|
399
|
+
seen.add(vid)
|
|
400
|
+
payload["linked_vulnerabilities"] = merged_ids
|
|
401
|
+
return self.update_testcase(project_id, testcase_id, payload)
|
|
402
|
+
|
|
403
|
+
def update_testcase(
|
|
404
|
+
self,
|
|
405
|
+
project_id: str,
|
|
406
|
+
testcase_id: str,
|
|
407
|
+
update_fields: Dict[str, Any]
|
|
408
|
+
) -> Dict[str, Any]:
|
|
409
|
+
"""
|
|
410
|
+
Update a testcase with the provided fields.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
project_id (str): The project ID.
|
|
414
|
+
testcase_id (str): The testcase ID.
|
|
415
|
+
update_fields (dict): Fields to update (e.g., linked_vulnerabilities, details).
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
dict: API response.
|
|
419
|
+
"""
|
|
420
|
+
if not project_id:
|
|
421
|
+
raise ValueError("Missing required field: project_id")
|
|
422
|
+
if not testcase_id:
|
|
423
|
+
raise ValueError("Missing required field: testcase_id")
|
|
424
|
+
if not update_fields:
|
|
425
|
+
raise ValueError("update_fields cannot be empty")
|
|
426
|
+
endpoint = f"/api/ss/project/{project_id}/testcase/{testcase_id}"
|
|
427
|
+
resp = self._request("put", endpoint, json_data=update_fields)
|
|
428
|
+
if resp.status_code not in (200, 201):
|
|
429
|
+
raise RuntimeError(f"Failed to update testcase: {resp.text}")
|
|
430
|
+
return resp.json()
|
|
431
|
+
|
|
432
|
+
def __init__(self, api_key: str, base_url: str = "https://demo.attackforge.com", dry_run: bool = False):
|
|
433
|
+
"""
|
|
434
|
+
Initialize the PyAttackForgeClient.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
api_key (str): Your AttackForge API key.
|
|
438
|
+
base_url (str, optional): The base URL for the AttackForge instance. Defaults to "https://demo.attackforge.com".
|
|
439
|
+
dry_run (bool, optional): If True, no real API calls are made. Defaults to False.
|
|
440
|
+
"""
|
|
441
|
+
self.base_url = base_url.rstrip("/")
|
|
442
|
+
self.headers = {
|
|
443
|
+
"X-SSAPI-KEY": api_key,
|
|
444
|
+
"Content-Type": "application/json",
|
|
445
|
+
"Connection": "close"
|
|
446
|
+
}
|
|
447
|
+
self.dry_run = dry_run
|
|
448
|
+
self._asset_cache = None
|
|
449
|
+
self._project_scope_cache = {} # {project_id: set(asset_names)}
|
|
450
|
+
self._writeup_cache = None # Cache for all writeups
|
|
451
|
+
|
|
452
|
+
def get_all_writeups(self, force_refresh: bool = False) -> list:
|
|
453
|
+
"""
|
|
454
|
+
Fetches and caches all writeups from the /api/ss/library endpoint.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
force_refresh (bool): If True, refresh the cache even if it exists.
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
list: List of writeup dicts.
|
|
461
|
+
"""
|
|
462
|
+
if self._writeup_cache is not None and not force_refresh:
|
|
463
|
+
return self._writeup_cache
|
|
464
|
+
resp = self._request("get", "/api/ss/library")
|
|
465
|
+
if resp.status_code != 200:
|
|
466
|
+
raise RuntimeError(f"Failed to fetch writeups: {resp.text}")
|
|
467
|
+
data = resp.json()
|
|
468
|
+
# The endpoint may return a list or a dict with a key like "vulnerabilities"
|
|
469
|
+
if isinstance(data, dict) and "vulnerabilities" in data:
|
|
470
|
+
self._writeup_cache = data["vulnerabilities"]
|
|
471
|
+
elif isinstance(data, list):
|
|
472
|
+
self._writeup_cache = data
|
|
473
|
+
else:
|
|
474
|
+
# fallback: try to treat as a list of writeups
|
|
475
|
+
self._writeup_cache = data if isinstance(data, list) else []
|
|
476
|
+
return self._writeup_cache
|
|
477
|
+
|
|
478
|
+
def find_writeup_in_cache(self, title: str, library: str = "Main Library") -> str:
|
|
479
|
+
"""
|
|
480
|
+
Searches the cached writeups for a writeup with the given title and library.
|
|
481
|
+
|
|
482
|
+
Args:
|
|
483
|
+
title (str): The title of the writeup to find.
|
|
484
|
+
library (str): The library name (default: "Main Library").
|
|
485
|
+
|
|
486
|
+
Returns:
|
|
487
|
+
str: The writeup's reference_id if found, else None.
|
|
488
|
+
"""
|
|
489
|
+
writeups = self.get_all_writeups()
|
|
490
|
+
for w in writeups:
|
|
491
|
+
if w.get("title") == title and w.get("belongs_to_library", w.get("library", "")) == library:
|
|
492
|
+
return w.get("reference_id")
|
|
493
|
+
return None
|
|
494
|
+
|
|
495
|
+
def _request(
|
|
496
|
+
self,
|
|
497
|
+
method: str,
|
|
498
|
+
endpoint: str,
|
|
499
|
+
json_data: Optional[Dict[str, Any]] = None,
|
|
500
|
+
params: Optional[Dict[str, Any]] = None,
|
|
501
|
+
files: Optional[Dict[str, Any]] = None,
|
|
502
|
+
data: Optional[Dict[str, Any]] = None,
|
|
503
|
+
headers_override: Optional[Dict[str, str]] = None
|
|
504
|
+
) -> Any:
|
|
505
|
+
url = f"{self.base_url}{endpoint}"
|
|
506
|
+
if self.dry_run:
|
|
507
|
+
logger.info("[DRY RUN] %s %s", method.upper(), url)
|
|
508
|
+
if json_data:
|
|
509
|
+
logger.info("Payload: %s", json_data)
|
|
510
|
+
if params:
|
|
511
|
+
logger.info("Params: %s", params)
|
|
512
|
+
if files:
|
|
513
|
+
logger.info("Files: %s", list(files.keys()))
|
|
514
|
+
if data:
|
|
515
|
+
logger.info("Data: %s", data)
|
|
516
|
+
return DummyResponse()
|
|
517
|
+
headers = self.headers.copy()
|
|
518
|
+
if files:
|
|
519
|
+
headers.pop("Content-Type", None)
|
|
520
|
+
if headers_override:
|
|
521
|
+
headers.update(headers_override)
|
|
522
|
+
return requests.request(
|
|
523
|
+
method,
|
|
524
|
+
url,
|
|
525
|
+
headers=headers,
|
|
526
|
+
json=json_data,
|
|
527
|
+
params=params,
|
|
528
|
+
files=files,
|
|
529
|
+
data=data
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
def get_assets(self) -> Dict[str, Dict[str, Any]]:
|
|
533
|
+
if self._asset_cache is None:
|
|
534
|
+
self._asset_cache = {}
|
|
535
|
+
skip, limit = 0, 500
|
|
536
|
+
while True:
|
|
537
|
+
resp = self._request("get", "/api/ss/assets", params={"skip": skip, "limit": limit})
|
|
538
|
+
data = resp.json()
|
|
539
|
+
for asset in data.get("assets", []):
|
|
540
|
+
name = asset.get("asset")
|
|
541
|
+
if name:
|
|
542
|
+
self._asset_cache[name] = asset
|
|
543
|
+
if skip + limit >= data.get("count", 0):
|
|
544
|
+
break
|
|
545
|
+
skip += limit
|
|
546
|
+
return self._asset_cache
|
|
547
|
+
|
|
548
|
+
def get_asset_by_name(self, name: str) -> Optional[Dict[str, Any]]:
|
|
549
|
+
return self.get_assets().get(name)
|
|
550
|
+
|
|
551
|
+
def create_asset(self, asset_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
552
|
+
pass
|
|
553
|
+
#resp = self._request("post", "/api/ss/library/asset", json_data=asset_data)
|
|
554
|
+
#if resp.status_code in (200, 201):
|
|
555
|
+
# asset = resp.json()
|
|
556
|
+
# self._asset_cache = None # Invalidate cache
|
|
557
|
+
# return asset
|
|
558
|
+
#if "Asset Already Exists" in resp.text:
|
|
559
|
+
# return self.get_asset_by_name(asset_data["name"])
|
|
560
|
+
#raise RuntimeError(f"Asset creation failed: {resp.text}")
|
|
561
|
+
|
|
562
|
+
def get_project_by_name(self, name: str) -> Optional[Dict[str, Any]]:
|
|
563
|
+
params = {
|
|
564
|
+
"startDate": "2000-01-01T00:00:00.000Z",
|
|
565
|
+
"endDate": "2100-01-01T00:00:00.000Z",
|
|
566
|
+
"status": "All"
|
|
567
|
+
}
|
|
568
|
+
resp = self._request("get", "/api/ss/projects", params=params)
|
|
569
|
+
for proj in resp.json().get("projects", []):
|
|
570
|
+
if proj.get("project_name") == name:
|
|
571
|
+
return proj
|
|
572
|
+
return None
|
|
573
|
+
|
|
574
|
+
def get_project_scope(self, project_id: str) -> Set[str]:
|
|
575
|
+
if project_id in self._project_scope_cache:
|
|
576
|
+
return self._project_scope_cache[project_id]
|
|
577
|
+
|
|
578
|
+
resp = self._request("get", f"/api/ss/project/{project_id}")
|
|
579
|
+
if resp.status_code != 200:
|
|
580
|
+
raise RuntimeError(f"Failed to retrieve project: {resp.text}")
|
|
581
|
+
|
|
582
|
+
scope = set(resp.json().get("scope", []))
|
|
583
|
+
self._project_scope_cache[project_id] = scope
|
|
584
|
+
return scope
|
|
585
|
+
|
|
586
|
+
def update_project_scope(self, project_id: str, new_assets: List[str]) -> Dict[str, Any]:
|
|
587
|
+
current_scope = self.get_project_scope(project_id)
|
|
588
|
+
updated_scope = list(current_scope.union(new_assets))
|
|
589
|
+
resp = self._request("put", f"/api/ss/project/{project_id}", json_data={"scope": updated_scope})
|
|
590
|
+
if resp.status_code not in (200, 201):
|
|
591
|
+
raise RuntimeError(f"Failed to update project scope: {resp.text}")
|
|
592
|
+
self._project_scope_cache[project_id] = set(updated_scope)
|
|
593
|
+
return resp.json()
|
|
594
|
+
|
|
595
|
+
def create_project(self, name: str, **kwargs) -> Dict[str, Any]:
|
|
596
|
+
start, end = get_default_dates()
|
|
597
|
+
payload = {
|
|
598
|
+
"name": name,
|
|
599
|
+
"code": kwargs.get("code", "DEFAULT"),
|
|
600
|
+
"groups": kwargs.get("groups", []),
|
|
601
|
+
"startDate": kwargs.get("startDate", start),
|
|
602
|
+
"endDate": kwargs.get("endDate", end),
|
|
603
|
+
"scope": kwargs.get("scope", []),
|
|
604
|
+
"testsuites": kwargs.get("testsuites", []),
|
|
605
|
+
"organization_code": kwargs.get("organization_code", "ORG_DEFAULT"),
|
|
606
|
+
"vulnerability_code": kwargs.get("vulnerability_code", "VULN_"),
|
|
607
|
+
"scoringSystem": kwargs.get("scoringSystem", "CVSSv3.1"),
|
|
608
|
+
"team_notifications": kwargs.get("team_notifications", []),
|
|
609
|
+
"admin_notifications": kwargs.get("admin_notifications", []),
|
|
610
|
+
"custom_fields": kwargs.get("custom_fields", []),
|
|
611
|
+
"asset_library_ids": kwargs.get("asset_library_ids", []),
|
|
612
|
+
"sla_activation": kwargs.get("sla_activation", "automatic")
|
|
613
|
+
}
|
|
614
|
+
resp = self._request("post", "/api/ss/project", json_data=payload)
|
|
615
|
+
if resp.status_code in (200, 201):
|
|
616
|
+
return resp.json()
|
|
617
|
+
raise RuntimeError(f"Project creation failed: {resp.text}")
|
|
618
|
+
|
|
619
|
+
def update_project(self, project_id: str, update_fields: Dict[str, Any]) -> Dict[str, Any]:
|
|
620
|
+
resp = self._request("put", f"/api/ss/project/{project_id}", json_data=update_fields)
|
|
621
|
+
if resp.status_code in (200, 201):
|
|
622
|
+
return resp.json()
|
|
623
|
+
raise RuntimeError(f"Project update failed: {resp.text}")
|
|
624
|
+
|
|
625
|
+
def create_writeup(
|
|
626
|
+
self,
|
|
627
|
+
title: str,
|
|
628
|
+
description: str,
|
|
629
|
+
remediation_recommendation: str,
|
|
630
|
+
custom_fields: Optional[list] = None,
|
|
631
|
+
**kwargs
|
|
632
|
+
) -> Dict[str, Any]:
|
|
633
|
+
if not title or not description or not remediation_recommendation:
|
|
634
|
+
raise ValueError("Missing required field: title, description, or remediation_recommendation")
|
|
635
|
+
|
|
636
|
+
payload = {
|
|
637
|
+
"title": title,
|
|
638
|
+
"description": description,
|
|
639
|
+
"remediation_recommendation": remediation_recommendation,
|
|
640
|
+
"custom_fields": custom_fields or []
|
|
641
|
+
}
|
|
642
|
+
payload.update(kwargs)
|
|
643
|
+
resp = self._request("post", "/api/ss/library/vulnerability", json_data=payload)
|
|
644
|
+
if resp.status_code in (200, 201):
|
|
645
|
+
result = resp.json()
|
|
646
|
+
print("DEBUG: create_writeup API response:", result)
|
|
647
|
+
return result
|
|
648
|
+
raise RuntimeError(f"Writeup creation failed: {resp.text}")
|
|
649
|
+
|
|
650
|
+
def create_finding_from_writeup(
|
|
651
|
+
self,
|
|
652
|
+
project_id: str,
|
|
653
|
+
writeup_id: str,
|
|
654
|
+
priority: str,
|
|
655
|
+
affected_assets: Optional[list] = None,
|
|
656
|
+
**kwargs
|
|
657
|
+
) -> Dict[str, Any]:
|
|
658
|
+
"""
|
|
659
|
+
Create a finding from a writeup, supporting multiple affected assets.
|
|
660
|
+
|
|
661
|
+
Args:
|
|
662
|
+
project_id (str): The project ID.
|
|
663
|
+
writeup_id (str): The writeup/library ID.
|
|
664
|
+
priority (str): The priority.
|
|
665
|
+
affected_assets (list, optional): List of affected asset objects or names.
|
|
666
|
+
**kwargs: Additional fields.
|
|
667
|
+
|
|
668
|
+
Returns:
|
|
669
|
+
dict: Created finding details.
|
|
670
|
+
"""
|
|
671
|
+
if not project_id or not writeup_id or not priority:
|
|
672
|
+
raise ValueError("Missing required field: project_id, writeup_id, or priority")
|
|
673
|
+
|
|
674
|
+
payload = {
|
|
675
|
+
"projectId": project_id,
|
|
676
|
+
"vulnerabilityLibraryId": writeup_id,
|
|
677
|
+
"priority": priority
|
|
678
|
+
}
|
|
679
|
+
if affected_assets is not None:
|
|
680
|
+
asset_names = [
|
|
681
|
+
asset["assetName"] if isinstance(asset, dict) and "assetName" in asset
|
|
682
|
+
else asset["name"] if isinstance(asset, dict) and "name" in asset
|
|
683
|
+
else asset
|
|
684
|
+
for asset in affected_assets
|
|
685
|
+
]
|
|
686
|
+
payload["affected_assets"] = [{"assetName": n} for n in asset_names]
|
|
687
|
+
payload.update(kwargs)
|
|
688
|
+
resp = self._request("post", "/api/ss/vulnerability-with-library", json_data=payload)
|
|
689
|
+
if resp.status_code in (200, 201):
|
|
690
|
+
return resp.json()
|
|
691
|
+
raise RuntimeError(f"Finding creation from writeup failed: {resp.text}")
|
|
692
|
+
|
|
693
|
+
def create_vulnerability(
|
|
694
|
+
self,
|
|
695
|
+
project_id: str,
|
|
696
|
+
title: str,
|
|
697
|
+
affected_assets: list,
|
|
698
|
+
priority: str,
|
|
699
|
+
likelihood_of_exploitation: int,
|
|
700
|
+
description: str,
|
|
701
|
+
attack_scenario: str,
|
|
702
|
+
remediation_recommendation: str,
|
|
703
|
+
steps_to_reproduce: str,
|
|
704
|
+
writeup_id: Optional[str] = None,
|
|
705
|
+
tags: Optional[list] = None,
|
|
706
|
+
notes: Optional[list] = None,
|
|
707
|
+
is_zeroday: bool = False,
|
|
708
|
+
is_visible: bool = True,
|
|
709
|
+
import_to_library: Optional[str] = None,
|
|
710
|
+
import_source: Optional[str] = None,
|
|
711
|
+
import_source_id: Optional[str] = None,
|
|
712
|
+
custom_fields: Optional[list] = None,
|
|
713
|
+
linked_testcases: Optional[list] = None,
|
|
714
|
+
custom_tags: Optional[list] = None,
|
|
715
|
+
writeup_custom_fields: Optional[list] = None,
|
|
716
|
+
) -> Dict[str, Any]:
|
|
717
|
+
"""
|
|
718
|
+
Create a new security finding (vulnerability) in AttackForge with support for multiple assets.
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
project_id (str): The project ID.
|
|
722
|
+
title (str): The title of the finding.
|
|
723
|
+
affected_assets (list): List of affected asset objects or names.
|
|
724
|
+
priority (str): The priority (e.g., "Critical").
|
|
725
|
+
likelihood_of_exploitation (int): Likelihood of exploitation (e.g., 10).
|
|
726
|
+
description (str): Description of the finding.
|
|
727
|
+
attack_scenario (str): Attack scenario details.
|
|
728
|
+
remediation_recommendation (str): Remediation recommendation.
|
|
729
|
+
steps_to_reproduce (str): Steps to reproduce the finding.
|
|
730
|
+
writeup_id (str, optional): Existing writeup/library reference ID to use directly.
|
|
731
|
+
tags (list, optional): List of tags.
|
|
732
|
+
notes (list, optional): List of notes.
|
|
733
|
+
is_zeroday (bool, optional): Whether this is a zero-day finding.
|
|
734
|
+
is_visible (bool, optional): Whether the finding is visible.
|
|
735
|
+
import_to_library (str, optional): Library to import to.
|
|
736
|
+
import_source (str, optional): Source of import.
|
|
737
|
+
import_source_id (str, optional): Source ID for import.
|
|
738
|
+
custom_fields (list, optional): List of custom fields.
|
|
739
|
+
linked_testcases (list, optional): List of linked testcases.
|
|
740
|
+
custom_tags (list, optional): List of custom tags.
|
|
741
|
+
writeup_custom_fields (list, optional): List of custom fields for the writeup.
|
|
742
|
+
|
|
743
|
+
Returns:
|
|
744
|
+
dict: Created vulnerability details.
|
|
745
|
+
"""
|
|
746
|
+
# Ensure all assets exist and are in project scope
|
|
747
|
+
asset_names = []
|
|
748
|
+
for asset in affected_assets:
|
|
749
|
+
name = asset["assetName"] if isinstance(asset, dict) and "assetName" in asset \
|
|
750
|
+
else asset["name"] if isinstance(asset, dict) and "name" in asset \
|
|
751
|
+
else asset
|
|
752
|
+
asset_obj = self.get_asset_by_name(name)
|
|
753
|
+
#if not asset_obj:
|
|
754
|
+
# asset_obj = self.create_asset({"name": name})
|
|
755
|
+
asset_names.append(name)
|
|
756
|
+
# Ensure all assets are in project scope
|
|
757
|
+
scope = self.get_project_scope(project_id)
|
|
758
|
+
missing_in_scope = [n for n in asset_names if n not in scope]
|
|
759
|
+
if missing_in_scope:
|
|
760
|
+
self.update_project_scope(project_id, missing_in_scope)
|
|
761
|
+
|
|
762
|
+
finding_payload = {
|
|
763
|
+
"affected_assets": [{"assetName": n} for n in asset_names],
|
|
764
|
+
"likelihood_of_exploitation": likelihood_of_exploitation,
|
|
765
|
+
"steps_to_reproduce": steps_to_reproduce,
|
|
766
|
+
"tags": tags or [],
|
|
767
|
+
"is_zeroday": is_zeroday,
|
|
768
|
+
"is_visible": is_visible,
|
|
769
|
+
"import_to_library": import_to_library,
|
|
770
|
+
"import_source": import_source,
|
|
771
|
+
"import_source_id": import_source_id,
|
|
772
|
+
"custom_fields": custom_fields or [],
|
|
773
|
+
"linked_testcases": linked_testcases or [],
|
|
774
|
+
"custom_tags": custom_tags or [],
|
|
775
|
+
}
|
|
776
|
+
if notes:
|
|
777
|
+
finding_payload["notes"] = notes
|
|
778
|
+
finding_payload = {k: v for k, v in finding_payload.items() if v is not None}
|
|
779
|
+
resolved_writeup_id = writeup_id
|
|
780
|
+
if not resolved_writeup_id:
|
|
781
|
+
self.get_all_writeups()
|
|
782
|
+
resolved_writeup_id = self.find_writeup_in_cache(title, "Main Vulnerabilities")
|
|
783
|
+
if not resolved_writeup_id:
|
|
784
|
+
writeup_fields = writeup_custom_fields[:] if writeup_custom_fields else []
|
|
785
|
+
if import_source:
|
|
786
|
+
writeup_fields.append({"key": "import_source", "value": import_source})
|
|
787
|
+
self.create_writeup(
|
|
788
|
+
title=title,
|
|
789
|
+
description=description,
|
|
790
|
+
remediation_recommendation=remediation_recommendation,
|
|
791
|
+
attack_scenario=attack_scenario,
|
|
792
|
+
custom_fields=writeup_fields
|
|
793
|
+
)
|
|
794
|
+
# Refresh the cache and search again
|
|
795
|
+
self.get_all_writeups(force_refresh=True)
|
|
796
|
+
resolved_writeup_id = self.find_writeup_in_cache(
|
|
797
|
+
title, "Main Vulnerabilities"
|
|
798
|
+
)
|
|
799
|
+
if not resolved_writeup_id:
|
|
800
|
+
raise RuntimeError(
|
|
801
|
+
"Writeup creation failed: missing reference_id"
|
|
802
|
+
)
|
|
803
|
+
result = self.create_finding_from_writeup(
|
|
804
|
+
project_id=project_id,
|
|
805
|
+
writeup_id=resolved_writeup_id,
|
|
806
|
+
priority=priority,
|
|
807
|
+
**finding_payload
|
|
808
|
+
)
|
|
809
|
+
return result
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def create_vulnerability_old(
|
|
813
|
+
self,
|
|
814
|
+
project_id: str,
|
|
815
|
+
title: str,
|
|
816
|
+
affected_asset_name: str,
|
|
817
|
+
priority: str,
|
|
818
|
+
likelihood_of_exploitation: int,
|
|
819
|
+
description: str,
|
|
820
|
+
attack_scenario: str,
|
|
821
|
+
remediation_recommendation: str,
|
|
822
|
+
steps_to_reproduce: str,
|
|
823
|
+
tags: Optional[list] = None,
|
|
824
|
+
notes: Optional[list] = None,
|
|
825
|
+
is_zeroday: bool = False,
|
|
826
|
+
is_visible: bool = True,
|
|
827
|
+
import_to_library: Optional[str] = None,
|
|
828
|
+
import_source: Optional[str] = None,
|
|
829
|
+
import_source_id: Optional[str] = None,
|
|
830
|
+
custom_fields: Optional[list] = None,
|
|
831
|
+
linked_testcases: Optional[list] = None,
|
|
832
|
+
custom_tags: Optional[list] = None,
|
|
833
|
+
) -> Dict[str, Any]:
|
|
834
|
+
"""
|
|
835
|
+
[DEPRECATED] Create a new security finding (vulnerability) in AttackForge.
|
|
836
|
+
|
|
837
|
+
Args:
|
|
838
|
+
project_id (str): The project ID.
|
|
839
|
+
title (str): The title of the finding.
|
|
840
|
+
affected_asset_name (str): The name of the affected asset.
|
|
841
|
+
priority (str): The priority (e.g., "Critical").
|
|
842
|
+
likelihood_of_exploitation (int): Likelihood of exploitation (e.g., 10).
|
|
843
|
+
description (str): Description of the finding.
|
|
844
|
+
attack_scenario (str): Attack scenario details.
|
|
845
|
+
remediation_recommendation (str): Remediation recommendation.
|
|
846
|
+
steps_to_reproduce (str): Steps to reproduce the finding.
|
|
847
|
+
tags (list, optional): List of tags.
|
|
848
|
+
notes (list, optional): List of notes.
|
|
849
|
+
is_zeroday (bool, optional): Whether this is a zero-day finding.
|
|
850
|
+
is_visible (bool, optional): Whether the finding is visible.
|
|
851
|
+
import_to_library (str, optional): Library to import to.
|
|
852
|
+
import_source (str, optional): Source of import.
|
|
853
|
+
import_source_id (str, optional): Source ID for import.
|
|
854
|
+
custom_fields (list, optional): List of custom fields.
|
|
855
|
+
linked_testcases (list, optional): List of linked testcases.
|
|
856
|
+
custom_tags (list, optional): List of custom tags.
|
|
857
|
+
|
|
858
|
+
Returns:
|
|
859
|
+
dict: Created vulnerability details.
|
|
860
|
+
|
|
861
|
+
Raises:
|
|
862
|
+
ValueError: If any required field is missing.
|
|
863
|
+
RuntimeError: If vulnerability creation fails.
|
|
864
|
+
"""
|
|
865
|
+
# Validate required fields
|
|
866
|
+
required_fields = [
|
|
867
|
+
("project_id", project_id),
|
|
868
|
+
("title", title),
|
|
869
|
+
("affected_asset_name", affected_asset_name),
|
|
870
|
+
("priority", priority),
|
|
871
|
+
("likelihood_of_exploitation", likelihood_of_exploitation),
|
|
872
|
+
("description", description),
|
|
873
|
+
("attack_scenario", attack_scenario),
|
|
874
|
+
("remediation_recommendation", remediation_recommendation),
|
|
875
|
+
("steps_to_reproduce", steps_to_reproduce),
|
|
876
|
+
]
|
|
877
|
+
for field_name, value in required_fields:
|
|
878
|
+
if value is None:
|
|
879
|
+
raise ValueError(f"Missing required field: {field_name}")
|
|
880
|
+
|
|
881
|
+
payload = {
|
|
882
|
+
"projectId": project_id,
|
|
883
|
+
"title": title,
|
|
884
|
+
"affected_asset_name": affected_asset_name,
|
|
885
|
+
"priority": priority,
|
|
886
|
+
"likelihood_of_exploitation": likelihood_of_exploitation,
|
|
887
|
+
"description": description,
|
|
888
|
+
"attack_scenario": attack_scenario,
|
|
889
|
+
"remediation_recommendation": remediation_recommendation,
|
|
890
|
+
"steps_to_reproduce": steps_to_reproduce,
|
|
891
|
+
"tags": tags or [],
|
|
892
|
+
"is_zeroday": is_zeroday,
|
|
893
|
+
"is_visible": is_visible,
|
|
894
|
+
"import_to_library": import_to_library,
|
|
895
|
+
"import_source": import_source,
|
|
896
|
+
"import_source_id": import_source_id,
|
|
897
|
+
"custom_fields": custom_fields or [],
|
|
898
|
+
"linked_testcases": linked_testcases or [],
|
|
899
|
+
"custom_tags": custom_tags or [],
|
|
900
|
+
}
|
|
901
|
+
if notes:
|
|
902
|
+
payload["notes"] = notes
|
|
903
|
+
payload = {k: v for k, v in payload.items() if v is not None}
|
|
904
|
+
resp = self._request("post", "/api/ss/vulnerability", json_data=payload)
|
|
905
|
+
if resp.status_code in (200, 201):
|
|
906
|
+
return resp.json()
|
|
907
|
+
raise RuntimeError(f"Vulnerability creation failed: {resp.text}")
|
|
908
|
+
|
|
909
|
+
class DummyResponse:
|
|
910
|
+
def __init__(self) -> None:
|
|
911
|
+
self.status_code = 200
|
|
912
|
+
self.text = "[DRY RUN] No real API call performed."
|
|
913
|
+
|
|
914
|
+
def json(self) -> Dict[str, Any]:
|
|
915
|
+
return {}
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
def get_default_dates() -> Tuple[str, str]:
|
|
919
|
+
now = datetime.now(timezone.utc)
|
|
920
|
+
start = now.isoformat(timespec="milliseconds").replace("+00:00", "Z")
|
|
921
|
+
end = (
|
|
922
|
+
now + timedelta(days=30)
|
|
923
|
+
).isoformat(timespec="milliseconds").replace("+00:00", "Z")
|
|
924
|
+
return start, end
|