defectdojo-cli2 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- defectdojo_cli2/__init__.py +7 -0
- defectdojo_cli2/__main__.py +45 -0
- defectdojo_cli2/engagements.py +373 -0
- defectdojo_cli2/findings.py +745 -0
- defectdojo_cli2/tests.py +442 -0
- defectdojo_cli2/util.py +28 -0
- defectdojo_cli2-0.0.0.dist-info/AUTHORS +1 -0
- defectdojo_cli2-0.0.0.dist-info/LICENSE +21 -0
- defectdojo_cli2-0.0.0.dist-info/METADATA +20 -0
- defectdojo_cli2-0.0.0.dist-info/RECORD +14 -0
- defectdojo_cli2-0.0.0.dist-info/WHEEL +5 -0
- defectdojo_cli2-0.0.0.dist-info/entry_points.txt +2 -0
- defectdojo_cli2-0.0.0.dist-info/pbr.json +1 -0
- defectdojo_cli2-0.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,745 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import json
|
|
3
|
+
import sys
|
|
4
|
+
import argparse
|
|
5
|
+
import requests
|
|
6
|
+
import re
|
|
7
|
+
from unittest.mock import PropertyMock
|
|
8
|
+
from tabulate import tabulate
|
|
9
|
+
from defectdojo_cli2.util import Util
|
|
10
|
+
from defectdojo_cli2.engagements import Engagements
|
|
11
|
+
from defectdojo_cli2.tests import Tests
|
|
12
|
+
|
|
13
|
+
class Findings(object):
|
|
14
|
+
def parse_cli_args(self):
|
|
15
|
+
parser = argparse.ArgumentParser(
|
|
16
|
+
description='Perform <sub_command> related to findings on DefectDojo',
|
|
17
|
+
usage='''defectdojo findings <sub_command> [<args>]
|
|
18
|
+
|
|
19
|
+
You can use the following sub_commands:
|
|
20
|
+
import Import findings (scan results)
|
|
21
|
+
upload Same as import (deprecated, EOL december/2021)
|
|
22
|
+
reimport Re-import findings of a test
|
|
23
|
+
list List findings
|
|
24
|
+
update Update a finding
|
|
25
|
+
close Close a finding
|
|
26
|
+
''')
|
|
27
|
+
parser.add_argument('sub_command', help='Sub_command to run')
|
|
28
|
+
# Get sub_command
|
|
29
|
+
args = parser.parse_args(sys.argv[2:3])
|
|
30
|
+
if not hasattr(self, '_'+args.sub_command):
|
|
31
|
+
print('Unrecognized sub_command')
|
|
32
|
+
parser.print_help()
|
|
33
|
+
exit(1)
|
|
34
|
+
# Use dispatch pattern to invoke method with same name (that starts with _)
|
|
35
|
+
getattr(self, '_'+args.sub_command)()
|
|
36
|
+
|
|
37
|
+
# Backwards compability
|
|
38
|
+
def _upload(self):
|
|
39
|
+
self._import()
|
|
40
|
+
|
|
41
|
+
def import_(self, url, api_key, result_file, scanner, engagement_id, lead_id,
|
|
42
|
+
active=None, verified=None, scan_date=None, min_severity=None,
|
|
43
|
+
tag_test=None, test_type=None, env=None, auto_close=None,
|
|
44
|
+
skip_duplicates=None, version=None, build_id=None, branch_tag=None,
|
|
45
|
+
commit_hash=None, **kwargs):
|
|
46
|
+
# Prepare JSON data to be send
|
|
47
|
+
request_json = dict()
|
|
48
|
+
API_URL = url+'/api/v2'
|
|
49
|
+
IMPORT_SCAN_URL = API_URL+'/import-scan/'
|
|
50
|
+
if scan_date is not None:
|
|
51
|
+
request_json['scan_date'] = scan_date
|
|
52
|
+
if scanner is not None:
|
|
53
|
+
request_json['scan_type'] = scanner
|
|
54
|
+
if verified is not None:
|
|
55
|
+
request_json['verified'] = verified
|
|
56
|
+
if engagement_id is not None:
|
|
57
|
+
request_json['engagement'] = engagement_id
|
|
58
|
+
if lead_id is not None:
|
|
59
|
+
request_json['lead'] = lead_id
|
|
60
|
+
if active is not None:
|
|
61
|
+
request_json['active'] = active
|
|
62
|
+
if min_severity is not None:
|
|
63
|
+
request_json['minimum_severity'] = min_severity
|
|
64
|
+
if tag_test is not None:
|
|
65
|
+
request_json['tags'] = tag_test
|
|
66
|
+
if test_type is not None:
|
|
67
|
+
request_json['test_type'] = test_type
|
|
68
|
+
if env is not None:
|
|
69
|
+
request_json['environment'] = env
|
|
70
|
+
if auto_close is not None:
|
|
71
|
+
request_json['close_old_findings'] = True
|
|
72
|
+
if skip_duplicates is not None:
|
|
73
|
+
request_json['skip_duplicates'] = True
|
|
74
|
+
if version is not None:
|
|
75
|
+
request_json['version'] = version
|
|
76
|
+
if build_id is not None:
|
|
77
|
+
request_json['build_id'] = build_id
|
|
78
|
+
if branch_tag is not None:
|
|
79
|
+
request_json['branch_tag'] = branch_tag
|
|
80
|
+
if commit_hash is not None:
|
|
81
|
+
request_json['commit_hash'] = commit_hash
|
|
82
|
+
|
|
83
|
+
# Prepare file data to be send
|
|
84
|
+
files = dict()
|
|
85
|
+
files['file'] = open(result_file)
|
|
86
|
+
|
|
87
|
+
# Make request
|
|
88
|
+
response = Util().request_apiv2('POST', IMPORT_SCAN_URL, api_key,
|
|
89
|
+
files=files, data=request_json)
|
|
90
|
+
return response
|
|
91
|
+
|
|
92
|
+
def _import(self):
|
|
93
|
+
# Read user-supplied arguments
|
|
94
|
+
parser = argparse.ArgumentParser(description='Import findings (scan results) to DefectDojo',
|
|
95
|
+
usage='defectdojo findings import RESULT_FILE [<args>]')
|
|
96
|
+
optional = parser._action_groups.pop()
|
|
97
|
+
required = parser.add_argument_group('required arguments')
|
|
98
|
+
parser.add_argument(
|
|
99
|
+
'result_file',
|
|
100
|
+
help='File with the results to be imported'
|
|
101
|
+
)
|
|
102
|
+
required.add_argument(
|
|
103
|
+
'--scanner',
|
|
104
|
+
help='Type of scanner',
|
|
105
|
+
required=True
|
|
106
|
+
)
|
|
107
|
+
required.add_argument('--url', help='DefectDojo URL', required=True)
|
|
108
|
+
required.add_argument('--api_key', help='API v2 Key', required=True)
|
|
109
|
+
required.add_argument('--engagement_id', help='Engagement ID', required=True)
|
|
110
|
+
required.add_argument('--lead_id', help='ID of the user conducting the operation', required=True)
|
|
111
|
+
optional.add_argument('--test_type', help='Test type / title (default = scanner name)')
|
|
112
|
+
optional.add_argument('--env', help='Environment')
|
|
113
|
+
optional.add_argument('--scan_date', help='Date the scan was perfomed (default = TODAY)',
|
|
114
|
+
metavar='YYYY-MM-DD', default=datetime.now().strftime('%Y-%m-%d'))
|
|
115
|
+
optional.add_argument('--active', help='Mark vulnerabilities found as active (default)',
|
|
116
|
+
action='store_true', dest='active')
|
|
117
|
+
optional.add_argument('--inactive', help='Mark vulnerabilities found as inactive',
|
|
118
|
+
action='store_false', dest='active')
|
|
119
|
+
optional.add_argument('--verified', help='Mark vulnerabilities found as verified',
|
|
120
|
+
action='store_true', dest='verified')
|
|
121
|
+
optional.add_argument('--unverified', help='Mark vulnerabilities found as unverified (default)',
|
|
122
|
+
action='store_false', dest='verified')
|
|
123
|
+
optional.set_defaults(active=True, verified=False)
|
|
124
|
+
optional.add_argument('--min_severity', help='Ignore findings below this severity (default = "Info")',
|
|
125
|
+
choices=['Info', 'Low', 'Medium', 'High', 'Critical'], default='Info')
|
|
126
|
+
optional.add_argument('--tag_test', help='Test tag (can be used multiple times)', action='append')
|
|
127
|
+
optional.add_argument('--note',
|
|
128
|
+
help='Add the string passed to this flag as a'
|
|
129
|
+
'note to each finding imported'
|
|
130
|
+
'(can have a big impact on performance'
|
|
131
|
+
'depending on the amount of findings'
|
|
132
|
+
'imported)')
|
|
133
|
+
optional.add_argument('--auto_close',
|
|
134
|
+
help='Close all open findings from the same '
|
|
135
|
+
+'--test_type that are not listed on '
|
|
136
|
+
+'this import (default = False)',
|
|
137
|
+
action='store_true')
|
|
138
|
+
|
|
139
|
+
optional.add_argument(
|
|
140
|
+
'--skip_duplicates',
|
|
141
|
+
help='Dont import duplicates '
|
|
142
|
+
'(requires deduplication) (default = False)',
|
|
143
|
+
action='store_true'
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
optional.add_argument(
|
|
147
|
+
'--version',
|
|
148
|
+
help='Current version of the project'
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
optional.add_argument(
|
|
152
|
+
'--build_id',
|
|
153
|
+
help='Build ID'
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
optional.add_argument(
|
|
157
|
+
'--branch_tag',
|
|
158
|
+
help='Branch or tag scanned'
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
optional.add_argument(
|
|
162
|
+
'--commit_hash',
|
|
163
|
+
help='Commit HASH'
|
|
164
|
+
)
|
|
165
|
+
parser._action_groups.append(optional)
|
|
166
|
+
# Parse out arguments ignoring the first three (because we're inside a sub-command)
|
|
167
|
+
args = vars(parser.parse_args(sys.argv[3:]))
|
|
168
|
+
|
|
169
|
+
# Import results
|
|
170
|
+
response = self.import_(**args)
|
|
171
|
+
# Load import response as JSON
|
|
172
|
+
out_error = False
|
|
173
|
+
try:
|
|
174
|
+
import_out = json.loads(response.text)
|
|
175
|
+
except:
|
|
176
|
+
out_error = True
|
|
177
|
+
|
|
178
|
+
# If --note flag was passed
|
|
179
|
+
if args['note'] is not None:
|
|
180
|
+
# Get the findings that were imported
|
|
181
|
+
tmp_args = dict()
|
|
182
|
+
tmp_args['url'] = args['url']
|
|
183
|
+
tmp_args['api_key'] = args['api_key']
|
|
184
|
+
tmp_args['test_id'] = import_out['test'] # Get the test ID from the import output
|
|
185
|
+
tmp_response = self.list(**tmp_args)
|
|
186
|
+
imported_findings_out = json.loads(tmp_response.text)
|
|
187
|
+
# Create a list with all the imported findings IDs
|
|
188
|
+
imported_findings_ids = set()
|
|
189
|
+
for imported_finding in imported_findings_out['results']:
|
|
190
|
+
imported_findings_ids.add(imported_finding['id'])
|
|
191
|
+
# Add note to each imported finding
|
|
192
|
+
tmp_args = dict()
|
|
193
|
+
tmp_args['url'] = args['url']
|
|
194
|
+
tmp_args['api_key'] = args['api_key']
|
|
195
|
+
tmp_args['entry'] = args['note']
|
|
196
|
+
for imported_finding_id in imported_findings_ids:
|
|
197
|
+
tmp_args['finding_id'] = imported_finding_id
|
|
198
|
+
self.add_note(**tmp_args)
|
|
199
|
+
|
|
200
|
+
# Pretty print JSON response
|
|
201
|
+
if not out_error:
|
|
202
|
+
Util().default_output(response, sucess_status_code=201)
|
|
203
|
+
else:
|
|
204
|
+
print(response.text)
|
|
205
|
+
|
|
206
|
+
def reimport(self, url, api_key, result_file, scanner, scan_date, test_id,
|
|
207
|
+
active=None, verified=None, min_severity=None, auto_close=None,
|
|
208
|
+
version=None, build_id=None, branch_tag=None, commit_hash=None,
|
|
209
|
+
**kwargs):
|
|
210
|
+
# Prepare JSON data to be send
|
|
211
|
+
request_json = dict()
|
|
212
|
+
API_URL = url+'/api/v2'
|
|
213
|
+
REIMPORT_SCAN_URL = API_URL+'/reimport-scan/'
|
|
214
|
+
request_json['scan_type'] = scanner
|
|
215
|
+
request_json['scan_date'] = scan_date
|
|
216
|
+
request_json['test'] = test_id
|
|
217
|
+
|
|
218
|
+
if active is not None:
|
|
219
|
+
request_json['active'] = active
|
|
220
|
+
if verified is not None:
|
|
221
|
+
request_json['verified'] = verified
|
|
222
|
+
if min_severity is not None:
|
|
223
|
+
request_json['minimum_severity'] = min_severity
|
|
224
|
+
if auto_close is not None:
|
|
225
|
+
request_json['close_old_findings'] = True
|
|
226
|
+
if version is not None:
|
|
227
|
+
request_json['version'] = version
|
|
228
|
+
if build_id is not None:
|
|
229
|
+
request_json['build_id'] = build_id
|
|
230
|
+
if branch_tag is not None:
|
|
231
|
+
request_json['branch_tag'] = branch_tag
|
|
232
|
+
if commit_hash is not None:
|
|
233
|
+
request_json['commit_hash'] = commit_hash
|
|
234
|
+
|
|
235
|
+
# Prepare file data to be send
|
|
236
|
+
files = dict()
|
|
237
|
+
files['file'] = open(result_file)
|
|
238
|
+
|
|
239
|
+
# Make request
|
|
240
|
+
response = Util().request_apiv2(
|
|
241
|
+
'POST', REIMPORT_SCAN_URL, api_key, files=files, data=request_json
|
|
242
|
+
)
|
|
243
|
+
return response
|
|
244
|
+
|
|
245
|
+
def _reimport(self):
|
|
246
|
+
# Read user-supplied arguments
|
|
247
|
+
parser = argparse.ArgumentParser(description='Re-import findings (scan results) to DefectDojo',
|
|
248
|
+
usage='defectdojo findings reimport RESULT_FILE [<args>]')
|
|
249
|
+
optional = parser._action_groups.pop()
|
|
250
|
+
required = parser.add_argument_group('required arguments')
|
|
251
|
+
|
|
252
|
+
parser.add_argument(
|
|
253
|
+
'result_file',
|
|
254
|
+
help='File with the results to be imported'
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
required.add_argument(
|
|
258
|
+
'--scanner',
|
|
259
|
+
help='Type of scanner',
|
|
260
|
+
required=True
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
required.add_argument(
|
|
264
|
+
'--url',
|
|
265
|
+
help='DefectDojo URL',
|
|
266
|
+
required=True
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
required.add_argument(
|
|
270
|
+
'--api_key',
|
|
271
|
+
help='API v2 Key',
|
|
272
|
+
required=True
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
required.add_argument(
|
|
276
|
+
'--test_id',
|
|
277
|
+
help='Test to reimport',
|
|
278
|
+
required=True
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
optional.add_argument(
|
|
282
|
+
'--scan_date',
|
|
283
|
+
help='Date the scan was perfomed (default = TODAY)',
|
|
284
|
+
metavar='YYYY-MM-DD',
|
|
285
|
+
default=datetime.now().strftime('%Y-%m-%d')
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
optional.add_argument(
|
|
289
|
+
'--active',
|
|
290
|
+
help='Mark vulnerabilities found as active (default)',
|
|
291
|
+
action='store_true',
|
|
292
|
+
dest='active'
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
optional.add_argument(
|
|
296
|
+
'--inactive',
|
|
297
|
+
help='Mark vulnerabilities found as inactive',
|
|
298
|
+
action='store_false',
|
|
299
|
+
dest='active'
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
optional.add_argument(
|
|
303
|
+
'--verified',
|
|
304
|
+
help='Mark vulnerabilities found as verified',
|
|
305
|
+
action='store_true',
|
|
306
|
+
dest='verified'
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
optional.add_argument(
|
|
310
|
+
'--unverified',
|
|
311
|
+
help='Mark vulnerabilities found as unverified (default)',
|
|
312
|
+
action='store_false',
|
|
313
|
+
dest='verified'
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
optional.set_defaults(active=True, verified=False)
|
|
317
|
+
|
|
318
|
+
optional.add_argument(
|
|
319
|
+
'--min_severity',
|
|
320
|
+
help='Ignore findings below this severity (default = "Low")',
|
|
321
|
+
choices=['Informational', 'Low', 'Medium', 'High', 'Critical'],
|
|
322
|
+
default='Low'
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
optional.add_argument(
|
|
326
|
+
'--auto_close',
|
|
327
|
+
help='Close all open findings from the same --test_type that are '
|
|
328
|
+
'not listed on this import (default = False)',
|
|
329
|
+
action='store_true'
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
optional.add_argument(
|
|
333
|
+
'--version',
|
|
334
|
+
help='Current version of the project'
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
optional.add_argument(
|
|
338
|
+
'--build_id',
|
|
339
|
+
help='Build ID'
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
optional.add_argument(
|
|
343
|
+
'--branch_tag',
|
|
344
|
+
help='Branch or tag scanned'
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
optional.add_argument(
|
|
348
|
+
'--commit_hash',
|
|
349
|
+
help='Commit HASH'
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
parser._action_groups.append(optional)
|
|
353
|
+
|
|
354
|
+
# Parse out arguments ignoring the first three (because we're inside a sub-command)
|
|
355
|
+
args = vars(parser.parse_args(sys.argv[3:]))
|
|
356
|
+
# Re-import results
|
|
357
|
+
response = self.reimport(**args)
|
|
358
|
+
# Load re-import response as JSON
|
|
359
|
+
out_error = False
|
|
360
|
+
try:
|
|
361
|
+
import_out = json.loads(response.text)
|
|
362
|
+
except:
|
|
363
|
+
out_error = True
|
|
364
|
+
|
|
365
|
+
# Pretty print JSON response
|
|
366
|
+
if not out_error:
|
|
367
|
+
Util().default_output(response, sucess_status_code=201)
|
|
368
|
+
else:
|
|
369
|
+
print(response.text)
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
def list(self, url, api_key, finding_id=None, test_id=None, product_id=None,
|
|
373
|
+
engagement_id=None, test_type=None, active=None, closed=None,
|
|
374
|
+
valid=None, scope=None, limit=None, tag_test=None, tags_operator=None, **kwargs):
|
|
375
|
+
# Create parameters to be requested
|
|
376
|
+
request_params = dict()
|
|
377
|
+
API_URL = url+'/api/v2'
|
|
378
|
+
FINDINGS_URL = API_URL+'/findings/'
|
|
379
|
+
if finding_id is not None:
|
|
380
|
+
request_params['id'] = finding_id
|
|
381
|
+
if test_id is not None:
|
|
382
|
+
request_params['test'] = test_id
|
|
383
|
+
if product_id is not None:
|
|
384
|
+
request_params['test__engagement__product'] = product_id
|
|
385
|
+
if engagement_id is not None:
|
|
386
|
+
request_params['test__engagement'] = engagement_id
|
|
387
|
+
if active is not None:
|
|
388
|
+
if active is True:
|
|
389
|
+
request_params['active'] = 2
|
|
390
|
+
elif active is False:
|
|
391
|
+
request_params['active'] = 3
|
|
392
|
+
if closed:
|
|
393
|
+
request_params['is_Mitigated'] = True
|
|
394
|
+
if valid is not None:
|
|
395
|
+
if valid is True:
|
|
396
|
+
request_params['false_p'] = 3
|
|
397
|
+
elif valid is False:
|
|
398
|
+
request_params['false_p'] = 2
|
|
399
|
+
if scope is not None:
|
|
400
|
+
if scope is True:
|
|
401
|
+
request_params['out_of_scope'] = 3
|
|
402
|
+
elif scope is False:
|
|
403
|
+
request_params['out_of_scope'] = 2
|
|
404
|
+
if limit is not None:
|
|
405
|
+
request_params['limit'] = limit
|
|
406
|
+
else:
|
|
407
|
+
# Make a request to API getting only one finding to retrieve the total amount of findings
|
|
408
|
+
temp_params = request_params.copy()
|
|
409
|
+
temp_params['url'] = url
|
|
410
|
+
temp_params['api_key'] = api_key
|
|
411
|
+
temp_params['limit'] = 1
|
|
412
|
+
temp_response = self.list(**temp_params)
|
|
413
|
+
limit = int(json.loads(temp_response.text)['count'])
|
|
414
|
+
request_params['limit'] = limit
|
|
415
|
+
if tag_test:
|
|
416
|
+
# First get all test types with the tags we're looking for
|
|
417
|
+
test_type_list = Tests().get_test_type_by_tags(url, api_key, tag_test, tags_operator, engagement_id)
|
|
418
|
+
# Add them to request parameters
|
|
419
|
+
# (so the tags aren't actually passed to request, only their test_types)
|
|
420
|
+
if test_type is not None:
|
|
421
|
+
# If a test_type was passed by the user, append it to the list
|
|
422
|
+
test_type_list = test_type_list + test_type
|
|
423
|
+
test_type = test_type_list
|
|
424
|
+
if test_type is not None:
|
|
425
|
+
# Transform test_type names to IDs
|
|
426
|
+
test_type_ids = set()
|
|
427
|
+
for tt in test_type:
|
|
428
|
+
if type(tt) is str:
|
|
429
|
+
temp_params = dict()
|
|
430
|
+
temp_params['name'] = tt
|
|
431
|
+
# Make a get request to /test_types passing the test_type as parameter
|
|
432
|
+
temp_response = Util().request_apiv2('GET', API_URL+'/test_types/', api_key, params=temp_params)
|
|
433
|
+
# Tranform the above response in json and get the id
|
|
434
|
+
test_type_ids.add(json.loads(temp_response.text)['results'][0]['id'])
|
|
435
|
+
else:
|
|
436
|
+
test_type_ids.add(tt)
|
|
437
|
+
# If there's only one test_type
|
|
438
|
+
if (len(test_type_ids) == 1):
|
|
439
|
+
# Add to request_params
|
|
440
|
+
request_params['test__test_type'] = list(test_type_ids)[0]
|
|
441
|
+
else:
|
|
442
|
+
# Use the appropriate method
|
|
443
|
+
return self.list_multiple_test_types(url, api_key, test_type_ids, **request_params)
|
|
444
|
+
|
|
445
|
+
# Make request
|
|
446
|
+
response = Util().request_apiv2('GET', FINDINGS_URL, api_key, params=request_params)
|
|
447
|
+
return response
|
|
448
|
+
|
|
449
|
+
def _list(self):
|
|
450
|
+
# Read user-supplied arguments
|
|
451
|
+
parser = argparse.ArgumentParser(description='List findings stored on DefectDojo',
|
|
452
|
+
usage='defectdojo findings list [<args>]')
|
|
453
|
+
optional = parser._action_groups.pop()
|
|
454
|
+
required = parser.add_argument_group('required arguments')
|
|
455
|
+
required.add_argument('--url', help='DefectDojo URL', required=True)
|
|
456
|
+
required.add_argument('--api_key', help='API v2 Key', required=True)
|
|
457
|
+
optional.add_argument('--id', help='Get finding with this id')
|
|
458
|
+
optional.add_argument('--test_id', help='Filter by test')
|
|
459
|
+
optional.add_argument('--product_id', help='Filter by product')
|
|
460
|
+
optional.add_argument('--engagement_id', help='Filter by engagement')
|
|
461
|
+
optional.add_argument(
|
|
462
|
+
'--test_type',
|
|
463
|
+
help='Filter by test type (can be used multiple times)',
|
|
464
|
+
action='append'
|
|
465
|
+
)
|
|
466
|
+
optional.add_argument('--active', help='List only actives findings',
|
|
467
|
+
action='store_true', dest='active')
|
|
468
|
+
optional.add_argument('--inactive', help='List only inactives findings',
|
|
469
|
+
action='store_false', dest='active')
|
|
470
|
+
optional.add_argument('--closed', help='List only closed/mitigated fidings',
|
|
471
|
+
action='store_true')
|
|
472
|
+
optional.add_argument('--valid', help='List only valid findings (true-positives)',
|
|
473
|
+
action='store_true', dest='valid')
|
|
474
|
+
optional.add_argument('--false_positives', help='List only false-positives findings',
|
|
475
|
+
action='store_false', dest='valid')
|
|
476
|
+
optional.add_argument('--in_scope', help='List only findings in-scope',
|
|
477
|
+
action='store_true', dest='scope')
|
|
478
|
+
optional.add_argument('--out_of_scope', help='List only findings out-of-scope',
|
|
479
|
+
action='store_false', dest='scope')
|
|
480
|
+
optional.add_argument('--json', help='Print output in JSON format', action='store_true', default=False)
|
|
481
|
+
optional.add_argument('--limit',
|
|
482
|
+
help='Number of results to return (by default it gets all the findings)')
|
|
483
|
+
optional.add_argument('--offset', help='The initial index from which to return the results '
|
|
484
|
+
+'(not needed if the --limit flag is not set)')
|
|
485
|
+
optional.add_argument(
|
|
486
|
+
'--fail_if_found',
|
|
487
|
+
help='Returns a non-zero exit code if any findings with the passed '
|
|
488
|
+
'severity (or higher) are returned (default = NULL)',
|
|
489
|
+
default='NULL',
|
|
490
|
+
choices=['NULL', 'Info', 'Low', 'Medium', 'High', 'Critical']
|
|
491
|
+
)
|
|
492
|
+
optional.add_argument(
|
|
493
|
+
'--tag_test',
|
|
494
|
+
help='Test tag (can be used multiple times). The API call to '
|
|
495
|
+
'filter by tags is bugged, so what this does under the '
|
|
496
|
+
'hood is get all test_types from tests with theses tags and '
|
|
497
|
+
'filter by them. This method is not failproof, so if you are '
|
|
498
|
+
'having issues with it try fine-tuning using others flags '
|
|
499
|
+
'(e.g. passing an engagement id along with tags)',
|
|
500
|
+
action='append'
|
|
501
|
+
)
|
|
502
|
+
optional.add_argument(
|
|
503
|
+
'--tags_operator',
|
|
504
|
+
help='Determine the operation to perform when working with multiple tags (default = "union")',
|
|
505
|
+
default='union',
|
|
506
|
+
choices=['union', 'intersect']
|
|
507
|
+
)
|
|
508
|
+
optional.set_defaults(active=None, valid=None, scope=None)
|
|
509
|
+
parser._action_groups.append(optional)
|
|
510
|
+
# Parse out arguments ignoring the first three (because we're inside a sub-command)
|
|
511
|
+
args = vars(parser.parse_args(sys.argv[3:]))
|
|
512
|
+
|
|
513
|
+
# Adjust args
|
|
514
|
+
if args['id'] is not None:
|
|
515
|
+
# Rename key from 'id' to 'finding_id' to match the argument of self.list
|
|
516
|
+
args['finding_id'] = args.pop('id')
|
|
517
|
+
|
|
518
|
+
# Get findings
|
|
519
|
+
response = self.list(**args)
|
|
520
|
+
|
|
521
|
+
# Print output
|
|
522
|
+
json_out = json.loads(response.text)
|
|
523
|
+
if response.status_code == 200: # Sucess
|
|
524
|
+
|
|
525
|
+
if args['json'] is True: # If --json flag was passed
|
|
526
|
+
# Pretty print output in json
|
|
527
|
+
pretty_json_out = json.dumps(json_out, indent=4)
|
|
528
|
+
print(pretty_json_out)
|
|
529
|
+
|
|
530
|
+
else: # Print output in a more human readable way
|
|
531
|
+
# Print findings amount
|
|
532
|
+
findings_amount = json_out['count']
|
|
533
|
+
print('\nFindings amount: '+str(json_out['count']))
|
|
534
|
+
if findings_amount > 0:
|
|
535
|
+
|
|
536
|
+
# Print components and its version (usefull for Software Composition Analysis)
|
|
537
|
+
components = set()
|
|
538
|
+
for finding in json_out['results']:
|
|
539
|
+
if finding['component_name'] is not None:
|
|
540
|
+
if finding['component_version'] is not None:
|
|
541
|
+
components.add(' ' + finding['component_name']+' v'+finding['component_version'])
|
|
542
|
+
else:
|
|
543
|
+
components.add(' ' + finding['component_name'])
|
|
544
|
+
if components:
|
|
545
|
+
print('\nVulnerable components:')
|
|
546
|
+
for component in sorted(components):
|
|
547
|
+
print(component)
|
|
548
|
+
|
|
549
|
+
# Print link to the list of findings on DefectDojo
|
|
550
|
+
if args['product_id'] is not None: # If a product id was passed
|
|
551
|
+
# Print link specific for that product
|
|
552
|
+
findings_list_url = response.request.url.replace('api/v2/findings/', 'product/'+args['product_id']+'/finding/all') # Mount URL using the previous API call as base
|
|
553
|
+
findings_list_url = re.sub('test__engagement__product=\d+&?', '', findings_list_url)
|
|
554
|
+
findings_list_url = re.sub('limit=\d+&?', '', findings_list_url)
|
|
555
|
+
findings_list_url = re.sub('%0A', '', findings_list_url)
|
|
556
|
+
print('\n\nYou can also view this list on DefectDojo:\n'+findings_list_url) # Print URL
|
|
557
|
+
else:
|
|
558
|
+
# Print general link
|
|
559
|
+
findings_list_url = response.request.url.replace('api/v2/findings/', 'finding') # Mount URL using the previous API call as base
|
|
560
|
+
print('\n\nYou can also view this list on DefectDojo:\n'+findings_list_url) # Print URL
|
|
561
|
+
|
|
562
|
+
# Print findings using tabulate (https://pypi.org/project/tabulate)
|
|
563
|
+
table = dict()
|
|
564
|
+
table['Severity'] = list()
|
|
565
|
+
table['Title'] = list()
|
|
566
|
+
table['URL'] = list()
|
|
567
|
+
for finding in json_out['results']:
|
|
568
|
+
table['Severity'].append(finding['severity'])
|
|
569
|
+
if len(finding['title']) <= 70: # Truncate title bigger then 70 chars
|
|
570
|
+
table['Title'].append(finding['title'])
|
|
571
|
+
else:
|
|
572
|
+
table['Title'].append(finding['title'][:70]+'...')
|
|
573
|
+
table['URL'].append(args['url']+'/finding/'+str(finding['id']))
|
|
574
|
+
print(tabulate(table, headers='keys', tablefmt='fancy_grid'))
|
|
575
|
+
|
|
576
|
+
# Exit
|
|
577
|
+
if args['fail_if_found'] != 'NULL': # If --fail_if_found flag was passed
|
|
578
|
+
|
|
579
|
+
findings_sev = set(table['Severity'])
|
|
580
|
+
# Get maximum severity from listed findings
|
|
581
|
+
if 'Info' in findings_sev:
|
|
582
|
+
sev_max = 1
|
|
583
|
+
if 'Low' in findings_sev:
|
|
584
|
+
sev_max = 2
|
|
585
|
+
if 'Medium' in findings_sev:
|
|
586
|
+
sev_max = 3
|
|
587
|
+
if 'High' in findings_sev:
|
|
588
|
+
sev_max = 4
|
|
589
|
+
if 'Critical' in findings_sev:
|
|
590
|
+
sev_max = 5
|
|
591
|
+
|
|
592
|
+
# Parse fail_if_found flag
|
|
593
|
+
if args['fail_if_found'] == 'Info':
|
|
594
|
+
fail_if_found = 1
|
|
595
|
+
if args['fail_if_found'] == 'Low':
|
|
596
|
+
fail_if_found = 2
|
|
597
|
+
if args['fail_if_found'] == 'Medium':
|
|
598
|
+
fail_if_found = 3
|
|
599
|
+
if args['fail_if_found'] == 'High':
|
|
600
|
+
fail_if_found = 4
|
|
601
|
+
if args['fail_if_found'] == 'Critical':
|
|
602
|
+
fail_if_found = 5
|
|
603
|
+
|
|
604
|
+
if sev_max >= fail_if_found:
|
|
605
|
+
exit(1)
|
|
606
|
+
else:
|
|
607
|
+
exit(0)
|
|
608
|
+
|
|
609
|
+
else:
|
|
610
|
+
exit(0)
|
|
611
|
+
else: # Failure
|
|
612
|
+
# Pretty print output in json
|
|
613
|
+
pretty_json_out = json.dumps(json_out, indent=4)
|
|
614
|
+
print(pretty_json_out)
|
|
615
|
+
exit(1)
|
|
616
|
+
|
|
617
|
+
def update(self, url, api_key, finding_id, active=None, mitigated=None, **kwargs):
|
|
618
|
+
# Prepare JSON data to be send
|
|
619
|
+
request_json = dict()
|
|
620
|
+
API_URL = url+'/api/v2'
|
|
621
|
+
FINDINGS_URL = API_URL+'/findings/'
|
|
622
|
+
FINDINGS_ID_URL = FINDINGS_URL+str(finding_id)+'/'
|
|
623
|
+
if active is not None:
|
|
624
|
+
request_json['active'] = active
|
|
625
|
+
if mitigated is not None:
|
|
626
|
+
request_json['is_Mitigated'] = mitigated
|
|
627
|
+
request_json = json.dumps(request_json)
|
|
628
|
+
|
|
629
|
+
# Make the request
|
|
630
|
+
response = Util().request_apiv2('PATCH', FINDINGS_ID_URL, api_key, data=request_json)
|
|
631
|
+
return response
|
|
632
|
+
|
|
633
|
+
def _update(self):
|
|
634
|
+
# Read user-supplied arguments
|
|
635
|
+
parser = argparse.ArgumentParser(description='Update a finding on DefectDojo',
|
|
636
|
+
usage='defectdojo finding update FINDING_ID [<args>]')
|
|
637
|
+
optional = parser._action_groups.pop()
|
|
638
|
+
required = parser.add_argument_group('required arguments')
|
|
639
|
+
parser.add_argument('finding_id', help='ID of the finding to be updated')
|
|
640
|
+
required.add_argument('--url', help='DefectDojo URL', required=True)
|
|
641
|
+
required.add_argument('--api_key', help='API v2 Key', required=True)
|
|
642
|
+
optional.add_argument('--active', help='Set finding as active (true) or inactive (false)',
|
|
643
|
+
choices=['true', 'false'])
|
|
644
|
+
optional.add_argument('--mitigated', help='Indicates if the finding is mitigated (true) or not (false)',
|
|
645
|
+
choices=['true', 'false'])
|
|
646
|
+
parser._action_groups.append(optional)
|
|
647
|
+
# Parse out arguments ignoring the first three (because we're inside a sub_command)
|
|
648
|
+
args = vars(parser.parse_args(sys.argv[3:]))
|
|
649
|
+
|
|
650
|
+
# Adjust args
|
|
651
|
+
if args['active'] is not None:
|
|
652
|
+
if args['active'] == 'true':
|
|
653
|
+
args['active'] = True
|
|
654
|
+
else:
|
|
655
|
+
args['active'] = False
|
|
656
|
+
if args['mitigated'] is not None:
|
|
657
|
+
if args['mitigated'] == 'true':
|
|
658
|
+
args['mitigated'] = True
|
|
659
|
+
else:
|
|
660
|
+
args['mitigated'] = False
|
|
661
|
+
|
|
662
|
+
# Update finding
|
|
663
|
+
response = self.update(**args)
|
|
664
|
+
|
|
665
|
+
# Pretty print JSON response
|
|
666
|
+
Util().default_output(response, sucess_status_code=200)
|
|
667
|
+
|
|
668
|
+
def close(self, url, api_key, finding_id, **kwargs):
|
|
669
|
+
# Prepare parameters
|
|
670
|
+
request_params = dict()
|
|
671
|
+
request_params['url'] = url
|
|
672
|
+
request_params['api_key'] = api_key
|
|
673
|
+
request_params['finding_id'] = finding_id
|
|
674
|
+
request_params['active'] = False
|
|
675
|
+
request_params['mitigated'] = True
|
|
676
|
+
|
|
677
|
+
# Call the update method with active=False and mitigated=True
|
|
678
|
+
response = self.update(**request_params)
|
|
679
|
+
return response
|
|
680
|
+
|
|
681
|
+
def _close(self):
|
|
682
|
+
# Read user-supplied arguments
|
|
683
|
+
parser = argparse.ArgumentParser(description='Close a finding on DefectDojo',
|
|
684
|
+
usage='defectdojo finding close FINDING_ID [<args>]')
|
|
685
|
+
required = parser.add_argument_group('required arguments')
|
|
686
|
+
parser.add_argument('finding_id', help='ID of the finding to be closed')
|
|
687
|
+
required.add_argument('--url', help='DefectDojo URL', required=True)
|
|
688
|
+
required.add_argument('--api_key', help='API v2 Key', required=True)
|
|
689
|
+
# Parse out arguments ignoring the first three (because we're inside a sub_command)
|
|
690
|
+
args = vars(parser.parse_args(sys.argv[3:]))
|
|
691
|
+
|
|
692
|
+
# Close finding
|
|
693
|
+
response = self.close(**args)
|
|
694
|
+
|
|
695
|
+
# Pretty print JSON response
|
|
696
|
+
Util().default_output(response, sucess_status_code=200)
|
|
697
|
+
|
|
698
|
+
def add_note(self, url, api_key, finding_id, entry, private=None, note_type=None, **kwargs):
|
|
699
|
+
# Prepare parameters
|
|
700
|
+
API_URL = url+'/api/v2/'
|
|
701
|
+
FINDINGS_URL = API_URL+'findings/'
|
|
702
|
+
FINDINGS_ID_URL = FINDINGS_URL+str(finding_id)+'/'
|
|
703
|
+
FINDINGS_ID_NOTES_URL = FINDINGS_ID_URL+'notes/'
|
|
704
|
+
|
|
705
|
+
# Prepare JSON data to be send
|
|
706
|
+
request_json = dict()
|
|
707
|
+
request_json['entry'] = entry
|
|
708
|
+
if private is not None:
|
|
709
|
+
request_json['private'] = private
|
|
710
|
+
if note_type is not None:
|
|
711
|
+
request_json['note_type'] = note_type
|
|
712
|
+
request_json = json.dumps(request_json)
|
|
713
|
+
|
|
714
|
+
# Make the request
|
|
715
|
+
response = Util().request_apiv2('POST', FINDINGS_ID_NOTES_URL, api_key, data=request_json)
|
|
716
|
+
return response
|
|
717
|
+
|
|
718
|
+
def list_multiple_test_types(self, url, api_key, test_types, **kwargs):
|
|
719
|
+
# Create parameters to be requested
|
|
720
|
+
request_params = kwargs
|
|
721
|
+
API_URL = url+'/api/v2'
|
|
722
|
+
FINDINGS_URL = API_URL+'/findings/'
|
|
723
|
+
|
|
724
|
+
# Get list of json responses
|
|
725
|
+
json_out_list = list()
|
|
726
|
+
for test_type in test_types:
|
|
727
|
+
request_params['test__test_type'] = test_type
|
|
728
|
+
response = Util().request_apiv2('GET', FINDINGS_URL, api_key, params=request_params)
|
|
729
|
+
json_out_list.append(json.loads(response.text))
|
|
730
|
+
|
|
731
|
+
# Merge responses
|
|
732
|
+
json_out_result = dict()
|
|
733
|
+
json_out_result['count'] = 0
|
|
734
|
+
json_out_result['results'] = list()
|
|
735
|
+
for json_out in json_out_list:
|
|
736
|
+
for finding in json_out['results']:
|
|
737
|
+
json_out_result['count'] += 1
|
|
738
|
+
json_out_result['results'].append(finding)
|
|
739
|
+
|
|
740
|
+
# Make a request passing the list of test_types so that the url at the tool output works properly
|
|
741
|
+
request_params['test__test_type'] = test_types
|
|
742
|
+
response = Util().request_apiv2('GET', FINDINGS_URL, api_key, params=request_params)
|
|
743
|
+
# Replace the response body with the one we created
|
|
744
|
+
type(response).text = PropertyMock(return_value=json.dumps(json_out_result))
|
|
745
|
+
return response
|