secator 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/celery.py +3 -3
- secator/cli.py +106 -76
- secator/config.py +88 -58
- secator/configs/workflows/subdomain_recon.yaml +2 -2
- secator/configs/workflows/url_dirsearch.yaml +1 -1
- secator/decorators.py +1 -0
- secator/definitions.py +1 -1
- secator/installer.py +277 -60
- secator/output_types/error.py +3 -3
- secator/output_types/exploit.py +11 -7
- secator/output_types/info.py +2 -2
- secator/output_types/ip.py +1 -1
- secator/output_types/port.py +3 -3
- secator/output_types/record.py +4 -4
- secator/output_types/stat.py +2 -2
- secator/output_types/subdomain.py +1 -1
- secator/output_types/tag.py +3 -3
- secator/output_types/target.py +2 -2
- secator/output_types/url.py +11 -11
- secator/output_types/user_account.py +6 -6
- secator/output_types/vulnerability.py +5 -4
- secator/output_types/warning.py +2 -2
- secator/report.py +1 -0
- secator/runners/_base.py +17 -13
- secator/runners/command.py +44 -7
- secator/tasks/_categories.py +145 -43
- secator/tasks/bbot.py +2 -0
- secator/tasks/bup.py +1 -0
- secator/tasks/dirsearch.py +2 -2
- secator/tasks/dnsxbrute.py +2 -1
- secator/tasks/feroxbuster.py +2 -3
- secator/tasks/fping.py +1 -1
- secator/tasks/grype.py +2 -4
- secator/tasks/h8mail.py +1 -1
- secator/tasks/katana.py +1 -1
- secator/tasks/maigret.py +1 -1
- secator/tasks/msfconsole.py +18 -3
- secator/tasks/naabu.py +15 -1
- secator/tasks/nmap.py +32 -20
- secator/tasks/nuclei.py +4 -1
- secator/tasks/searchsploit.py +9 -2
- secator/tasks/wpscan.py +12 -1
- secator/template.py +1 -1
- secator/utils.py +151 -62
- {secator-0.7.0.dist-info → secator-0.8.0.dist-info}/METADATA +50 -45
- {secator-0.7.0.dist-info → secator-0.8.0.dist-info}/RECORD +49 -49
- {secator-0.7.0.dist-info → secator-0.8.0.dist-info}/WHEEL +1 -1
- {secator-0.7.0.dist-info → secator-0.8.0.dist-info}/entry_points.txt +0 -0
- {secator-0.7.0.dist-info → secator-0.8.0.dist-info}/licenses/LICENSE +0 -0
secator/output_types/url.py
CHANGED
|
@@ -4,7 +4,7 @@ from dataclasses import dataclass, field
|
|
|
4
4
|
from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, STATUS_CODE,
|
|
5
5
|
TECH, TIME, TITLE, URL, WEBSERVER)
|
|
6
6
|
from secator.output_types import OutputType
|
|
7
|
-
from secator.utils import rich_to_ansi, trim_string
|
|
7
|
+
from secator.utils import rich_to_ansi, trim_string, rich_escape as _s
|
|
8
8
|
from secator.config import CONFIG
|
|
9
9
|
|
|
10
10
|
|
|
@@ -56,27 +56,27 @@ class Url(OutputType):
|
|
|
56
56
|
return self.url
|
|
57
57
|
|
|
58
58
|
def __repr__(self):
|
|
59
|
-
s = f'🔗 [white]{self.url}'
|
|
59
|
+
s = f'🔗 [white]{_s(self.url)}'
|
|
60
60
|
if self.method and self.method != 'GET':
|
|
61
|
-
s +=
|
|
61
|
+
s += rf' \[[turquoise4]{self.method}[/]]'
|
|
62
62
|
if self.status_code and self.status_code != 0:
|
|
63
63
|
if self.status_code < 400:
|
|
64
|
-
s +=
|
|
64
|
+
s += rf' \[[green]{self.status_code}[/]]'
|
|
65
65
|
else:
|
|
66
|
-
s +=
|
|
66
|
+
s += rf' \[[red]{self.status_code}[/]]'
|
|
67
67
|
if self.title:
|
|
68
|
-
s +=
|
|
68
|
+
s += rf' \[[green]{trim_string(self.title)}[/]]'
|
|
69
69
|
if self.webserver:
|
|
70
|
-
s +=
|
|
70
|
+
s += rf' \[[magenta]{_s(self.webserver)}[/]]'
|
|
71
71
|
if self.tech:
|
|
72
|
-
techs_str = ', '.join([f'[magenta]{tech}[/]' for tech in self.tech])
|
|
72
|
+
techs_str = ', '.join([f'[magenta]{_s(tech)}[/]' for tech in self.tech])
|
|
73
73
|
s += f' [{techs_str}]'
|
|
74
74
|
if self.content_type:
|
|
75
|
-
s +=
|
|
75
|
+
s += rf' \[[magenta]{_s(self.content_type)}[/]]'
|
|
76
76
|
if self.content_length:
|
|
77
77
|
cl = str(self.content_length)
|
|
78
78
|
cl += '[bold red]+[/]' if self.content_length == CONFIG.http.response_max_size_bytes else ''
|
|
79
|
-
s +=
|
|
79
|
+
s += rf' \[[magenta]{cl}[/]]'
|
|
80
80
|
if self.screenshot_path:
|
|
81
|
-
s +=
|
|
81
|
+
s += rf' \[[magenta]{_s(self.screenshot_path)}[/]]'
|
|
82
82
|
return rich_to_ansi(s)
|
|
@@ -3,7 +3,7 @@ from dataclasses import dataclass, field
|
|
|
3
3
|
|
|
4
4
|
from secator.definitions import SITE_NAME, URL, USERNAME
|
|
5
5
|
from secator.output_types import OutputType
|
|
6
|
-
from secator.utils import rich_to_ansi
|
|
6
|
+
from secator.utils import rich_to_ansi, rich_escape as _s
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
@dataclass
|
|
@@ -29,13 +29,13 @@ class UserAccount(OutputType):
|
|
|
29
29
|
return self.url
|
|
30
30
|
|
|
31
31
|
def __repr__(self) -> str:
|
|
32
|
-
s = f'👤 [green]{self.username}[/]'
|
|
32
|
+
s = f'👤 [green]{_s(self.username)}[/]'
|
|
33
33
|
if self.email:
|
|
34
|
-
s +=
|
|
34
|
+
s += rf' \[[bold yellow]{_s(self.email)}[/]]'
|
|
35
35
|
if self.site_name:
|
|
36
|
-
s +=
|
|
36
|
+
s += rf' \[[bold blue]{self.site_name}[/]]'
|
|
37
37
|
if self.url:
|
|
38
|
-
s +=
|
|
38
|
+
s += rf' \[[white]{_s(self.url)}[/]]'
|
|
39
39
|
if self.extra_data:
|
|
40
|
-
s += ' \[[bold yellow]' + ', '.join(f'{k}:{v}' for k, v in self.extra_data.items()) + '[/]]'
|
|
40
|
+
s += r' \[[bold yellow]' + _s(', '.join(f'{k}:{v}' for k, v in self.extra_data.items()) + '[/]]')
|
|
41
41
|
return rich_to_ansi(s)
|
|
@@ -5,7 +5,7 @@ from typing import List
|
|
|
5
5
|
from secator.definitions import (CONFIDENCE, CVSS_SCORE, EXTRA_DATA, ID,
|
|
6
6
|
MATCHED_AT, NAME, REFERENCE, SEVERITY, TAGS)
|
|
7
7
|
from secator.output_types import OutputType
|
|
8
|
-
from secator.utils import rich_to_ansi
|
|
8
|
+
from secator.utils import rich_to_ansi, rich_escape as _s
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
@dataclass
|
|
@@ -70,6 +70,7 @@ class Vulnerability(OutputType):
|
|
|
70
70
|
data = ','.join(data['data'])
|
|
71
71
|
elif isinstance(data, dict):
|
|
72
72
|
data = ', '.join([f'{k}:{v}' for k, v in data.items()])
|
|
73
|
+
data = _s(data)
|
|
73
74
|
tags = self.tags
|
|
74
75
|
colors = {
|
|
75
76
|
'critical': 'bold red',
|
|
@@ -80,12 +81,12 @@ class Vulnerability(OutputType):
|
|
|
80
81
|
'unknown': 'dim magenta'
|
|
81
82
|
}
|
|
82
83
|
c = colors.get(self.severity, 'dim magenta')
|
|
83
|
-
s =
|
|
84
|
+
s = rf'🚨 \[[green]{_s(self.name)} [link={_s(self.reference)}]🡕[/link][/]] \[[{c}]{self.severity}[/]] {_s(self.matched_at)}' # noqa: E501
|
|
84
85
|
if tags:
|
|
85
86
|
tags_str = ','.join(tags)
|
|
86
|
-
s +=
|
|
87
|
+
s += rf' \[[cyan]{_s(tags_str)}[/]]'
|
|
87
88
|
if data:
|
|
88
|
-
s +=
|
|
89
|
+
s += rf' \[[yellow]{str(data)}[/]]'
|
|
89
90
|
if self.confidence == 'low':
|
|
90
91
|
s = f'[dim]{s}[/]'
|
|
91
92
|
return rich_to_ansi(s)
|
secator/output_types/warning.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from dataclasses import dataclass, field
|
|
2
2
|
import time
|
|
3
3
|
from secator.output_types import OutputType
|
|
4
|
-
from secator.utils import rich_to_ansi
|
|
4
|
+
from secator.utils import rich_to_ansi, rich_escape as _s
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
@dataclass
|
|
@@ -20,5 +20,5 @@ class Warning(OutputType):
|
|
|
20
20
|
_sort_by = ('_timestamp',)
|
|
21
21
|
|
|
22
22
|
def __repr__(self):
|
|
23
|
-
s =
|
|
23
|
+
s = rf"\[[yellow]WRN[/]] {_s(self.message)}"
|
|
24
24
|
return rich_to_ansi(s)
|
secator/report.py
CHANGED
secator/runners/_base.py
CHANGED
|
@@ -75,7 +75,9 @@ class Runner:
|
|
|
75
75
|
if not isinstance(inputs, list):
|
|
76
76
|
inputs = [inputs]
|
|
77
77
|
self.inputs = inputs
|
|
78
|
-
self.
|
|
78
|
+
self.uuids = []
|
|
79
|
+
self.output = ''
|
|
80
|
+
self.results = []
|
|
79
81
|
self.workspace_name = context.get('workspace_name', 'default')
|
|
80
82
|
self.run_opts = run_opts.copy()
|
|
81
83
|
self.sync = run_opts.get('sync', True)
|
|
@@ -86,14 +88,12 @@ class Runner:
|
|
|
86
88
|
self.last_updated_progress = None
|
|
87
89
|
self.end_time = None
|
|
88
90
|
self._hooks = hooks
|
|
89
|
-
self.output = ''
|
|
90
91
|
self.progress = 0
|
|
91
92
|
self.context = context
|
|
92
93
|
self.delay = run_opts.get('delay', False)
|
|
93
94
|
self.celery_result = None
|
|
94
95
|
self.celery_ids = []
|
|
95
96
|
self.celery_ids_map = {}
|
|
96
|
-
self.uuids = []
|
|
97
97
|
self.caller = self.run_opts.get('caller', None)
|
|
98
98
|
self.threads = []
|
|
99
99
|
|
|
@@ -163,6 +163,10 @@ class Runner:
|
|
|
163
163
|
self.unique_name = self.name.replace('/', '_')
|
|
164
164
|
self.unique_name = f'{self.unique_name}_{self.chunk}' if self.chunk else self.unique_name
|
|
165
165
|
|
|
166
|
+
# Process prior results
|
|
167
|
+
for result in results:
|
|
168
|
+
list(self._process_item(result, print=False))
|
|
169
|
+
|
|
166
170
|
# Input post-process
|
|
167
171
|
self.run_hooks('before_init')
|
|
168
172
|
|
|
@@ -387,14 +391,14 @@ class Runner:
|
|
|
387
391
|
if item_out:
|
|
388
392
|
item_repr = repr(item)
|
|
389
393
|
if isinstance(item, OutputType) and self.print_remote_info:
|
|
390
|
-
item_repr += rich_to_ansi(
|
|
394
|
+
item_repr += rich_to_ansi(rf' \[[dim]{item._source}[/]]')
|
|
391
395
|
self._print(item_repr, out=item_out)
|
|
392
396
|
|
|
393
397
|
# Item is a line
|
|
394
398
|
elif isinstance(item, str):
|
|
395
399
|
self.debug(item, sub='line', allow_no_process=False, verbose=True)
|
|
396
400
|
if self.print_line or force:
|
|
397
|
-
self._print(item, out=sys.stderr, end='\n')
|
|
401
|
+
self._print(item, out=sys.stderr, end='\n', rich=False)
|
|
398
402
|
|
|
399
403
|
def debug(self, *args, **kwargs):
|
|
400
404
|
"""Print debug with runner class name, only if self.no_process is True.
|
|
@@ -557,7 +561,7 @@ class Runner:
|
|
|
557
561
|
name = f'{self.__class__.__name__}.{validator_type}'
|
|
558
562
|
fun = self.get_func_path(validator)
|
|
559
563
|
if not validator(self, *args):
|
|
560
|
-
self.debug('', obj={name + ' [dim yellow]->[/] ' + fun: 'failed'}, id=_id, sub='validators')
|
|
564
|
+
self.debug('', obj={name + ' [dim yellow]->[/] ' + fun: '[dim red]failed[/]'}, id=_id, verbose=True, sub='validators') # noqa: E501
|
|
561
565
|
doc = validator.__doc__
|
|
562
566
|
if error:
|
|
563
567
|
message = 'Validator failed'
|
|
@@ -570,7 +574,7 @@ class Runner:
|
|
|
570
574
|
)
|
|
571
575
|
self.add_result(error, print=True)
|
|
572
576
|
return False
|
|
573
|
-
self.debug('', obj={name + ' [dim yellow]->[/] ' + fun: 'success'}, id=_id, sub='validators')
|
|
577
|
+
self.debug('', obj={name + ' [dim yellow]->[/] ' + fun: '[dim green]success[/]'}, id=_id, verbose=True, sub='validators') # noqa: E501
|
|
574
578
|
return True
|
|
575
579
|
|
|
576
580
|
def register_hooks(self, hooks):
|
|
@@ -776,20 +780,20 @@ class Runner:
|
|
|
776
780
|
count_map[name] = count
|
|
777
781
|
return count_map
|
|
778
782
|
|
|
779
|
-
def _process_item(self, item):
|
|
783
|
+
def _process_item(self, item, print=True):
|
|
780
784
|
"""Process an item yielded by the derived runner.
|
|
781
785
|
|
|
782
786
|
Args:
|
|
783
787
|
item (dict | str): Input item.
|
|
788
|
+
print (bool): Print item in console.
|
|
784
789
|
|
|
785
790
|
Yields:
|
|
786
791
|
OutputType: Output type.
|
|
787
792
|
"""
|
|
788
|
-
|
|
789
793
|
# Item is a string, just print it
|
|
790
794
|
if isinstance(item, str):
|
|
791
795
|
self.output += item + '\n'
|
|
792
|
-
self._print_item(item) if item else ''
|
|
796
|
+
self._print_item(item) if item and print else ''
|
|
793
797
|
return
|
|
794
798
|
|
|
795
799
|
# Abort further processing if no_process is set
|
|
@@ -836,14 +840,14 @@ class Runner:
|
|
|
836
840
|
elif isinstance(item, Info) and item.task_id and item.task_id not in self.celery_ids:
|
|
837
841
|
self.celery_ids.append(item.task_id)
|
|
838
842
|
|
|
839
|
-
#
|
|
840
|
-
|
|
843
|
+
# If finding, run on_item hooks
|
|
844
|
+
elif isinstance(item, tuple(FINDING_TYPES)):
|
|
841
845
|
item = self.run_hooks('on_item', item)
|
|
842
846
|
if not item:
|
|
843
847
|
return
|
|
844
848
|
|
|
845
849
|
# Add item to results
|
|
846
|
-
self.add_result(item, print=
|
|
850
|
+
self.add_result(item, print=print)
|
|
847
851
|
|
|
848
852
|
# Yield item
|
|
849
853
|
yield item
|
secator/runners/command.py
CHANGED
|
@@ -19,7 +19,7 @@ from secator.config import CONFIG
|
|
|
19
19
|
from secator.output_types import Info, Error, Target, Stat
|
|
20
20
|
from secator.runners import Runner
|
|
21
21
|
from secator.template import TemplateLoader
|
|
22
|
-
from secator.utils import debug
|
|
22
|
+
from secator.utils import debug, rich_escape as _s
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
logger = logging.getLogger(__name__)
|
|
@@ -79,6 +79,8 @@ class Command(Runner):
|
|
|
79
79
|
version_flag = None
|
|
80
80
|
|
|
81
81
|
# Install
|
|
82
|
+
install_pre = None
|
|
83
|
+
install_post = None
|
|
82
84
|
install_cmd = None
|
|
83
85
|
install_github_handle = None
|
|
84
86
|
|
|
@@ -359,6 +361,24 @@ class Command(Runner):
|
|
|
359
361
|
# Prepare cmds
|
|
360
362
|
command = self.cmd if self.shell else shlex.split(self.cmd)
|
|
361
363
|
|
|
364
|
+
# Check command is installed and auto-install
|
|
365
|
+
if not self.no_process and not self.is_installed():
|
|
366
|
+
if CONFIG.security.auto_install_commands:
|
|
367
|
+
from secator.installer import ToolInstaller
|
|
368
|
+
yield Info(
|
|
369
|
+
message=f'Command {self.name} is missing but auto-installing since security.autoinstall_commands is set', # noqa: E501
|
|
370
|
+
_source=self.unique_name,
|
|
371
|
+
_uuid=str(uuid.uuid4())
|
|
372
|
+
)
|
|
373
|
+
status = ToolInstaller.install(self.__class__)
|
|
374
|
+
if not status.is_ok():
|
|
375
|
+
yield Error(
|
|
376
|
+
message=f'Failed installing {self.name}',
|
|
377
|
+
_source=self.unique_name,
|
|
378
|
+
_uuid=str(uuid.uuid4())
|
|
379
|
+
)
|
|
380
|
+
return
|
|
381
|
+
|
|
362
382
|
# Output and results
|
|
363
383
|
self.return_code = 0
|
|
364
384
|
self.killed = False
|
|
@@ -404,6 +424,19 @@ class Command(Runner):
|
|
|
404
424
|
finally:
|
|
405
425
|
yield from self._wait_for_end()
|
|
406
426
|
|
|
427
|
+
def is_installed(self):
|
|
428
|
+
"""Check if a command is installed by using `which`.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
command (str): The command to check.
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
bool: True if the command is installed, False otherwise.
|
|
435
|
+
"""
|
|
436
|
+
result = subprocess.Popen(["which", self.cmd.split(' ')[0]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
437
|
+
result.communicate()
|
|
438
|
+
return result.returncode == 0
|
|
439
|
+
|
|
407
440
|
def process_line(self, line):
|
|
408
441
|
"""Process a single line of output emitted on stdout / stderr and yield results."""
|
|
409
442
|
|
|
@@ -448,13 +481,11 @@ class Command(Runner):
|
|
|
448
481
|
if self.sync and not self.has_children:
|
|
449
482
|
if self.caller and self.description:
|
|
450
483
|
self._print(f'\n[bold gold3]:wrench: {self.description} [dim cyan]({self.config.name})[/][/] ...', rich=True)
|
|
451
|
-
elif self.print_cmd:
|
|
452
|
-
self._print('')
|
|
453
484
|
|
|
454
485
|
def print_command(self):
|
|
455
486
|
"""Print command."""
|
|
456
487
|
if self.print_cmd:
|
|
457
|
-
cmd_str = self.cmd
|
|
488
|
+
cmd_str = _s(self.cmd)
|
|
458
489
|
if self.sync and self.chunk and self.chunk_count:
|
|
459
490
|
cmd_str += f' [dim gray11]({self.chunk}/{self.chunk_count})[/]'
|
|
460
491
|
self._print(cmd_str, color='bold cyan', rich=True)
|
|
@@ -473,7 +504,7 @@ class Command(Runner):
|
|
|
473
504
|
if self.config.name in str(exc):
|
|
474
505
|
message = 'Executable not found.'
|
|
475
506
|
if self.install_cmd:
|
|
476
|
-
message += f' Install it with
|
|
507
|
+
message += f' Install it with [bold green4]secator install tools {self.config.name}[/].'
|
|
477
508
|
error = Error(message=message)
|
|
478
509
|
else:
|
|
479
510
|
error = Error.from_exception(exc)
|
|
@@ -578,9 +609,10 @@ class Command(Runner):
|
|
|
578
609
|
return -1, error
|
|
579
610
|
|
|
580
611
|
# If not, prompt the user for a password
|
|
581
|
-
self._print('[bold red]Please enter sudo password to continue.[/]')
|
|
612
|
+
self._print('[bold red]Please enter sudo password to continue.[/]', rich=True)
|
|
582
613
|
for _ in range(3):
|
|
583
|
-
|
|
614
|
+
user = getpass.getuser()
|
|
615
|
+
self._print(rf'\[sudo] password for {user}: ▌', rich=True)
|
|
584
616
|
sudo_password = getpass.getpass()
|
|
585
617
|
result = subprocess.run(
|
|
586
618
|
['sudo', '-S', '-p', '', 'true'],
|
|
@@ -665,6 +697,11 @@ class Command(Runner):
|
|
|
665
697
|
debug('skipped (falsy)', obj={'name': opt_name, 'value': opt_val}, obj_after=False, sub='command.options', verbose=True) # noqa: E501
|
|
666
698
|
continue
|
|
667
699
|
|
|
700
|
+
# Apply process function on opt value
|
|
701
|
+
if 'process' in opt_conf:
|
|
702
|
+
func = opt_conf['process']
|
|
703
|
+
opt_val = func(opt_val)
|
|
704
|
+
|
|
668
705
|
# Convert opt value to expected command opt value
|
|
669
706
|
mapped_opt_val = opt_value_map.get(opt_name)
|
|
670
707
|
if mapped_opt_val:
|
secator/tasks/_categories.py
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import os
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
from functools import cache
|
|
3
6
|
|
|
4
7
|
import requests
|
|
5
8
|
from bs4 import BeautifulSoup
|
|
@@ -13,7 +16,7 @@ from secator.definitions import (CIDR_RANGE, CVSS_SCORE, DELAY, DEPTH, DESCRIPTI
|
|
|
13
16
|
from secator.output_types import Ip, Port, Subdomain, Tag, Url, UserAccount, Vulnerability
|
|
14
17
|
from secator.config import CONFIG
|
|
15
18
|
from secator.runners import Command
|
|
16
|
-
from secator.utils import debug
|
|
19
|
+
from secator.utils import debug, process_wordlist
|
|
17
20
|
|
|
18
21
|
|
|
19
22
|
OPTS = {
|
|
@@ -36,7 +39,7 @@ OPTS = {
|
|
|
36
39
|
THREADS: {'type': int, 'help': 'Number of threads to run', 'default': 50},
|
|
37
40
|
TIMEOUT: {'type': int, 'help': 'Request timeout'},
|
|
38
41
|
USER_AGENT: {'type': str, 'short': 'ua', 'help': 'User agent, e.g "Mozilla Firefox 1.0"'},
|
|
39
|
-
WORDLIST: {'type': str, 'short': 'w', 'default':
|
|
42
|
+
WORDLIST: {'type': str, 'short': 'w', 'default': 'http', 'process': process_wordlist, 'help': 'Wordlist to use'}
|
|
40
43
|
}
|
|
41
44
|
|
|
42
45
|
OPTS_HTTP = [
|
|
@@ -124,6 +127,7 @@ class Vuln(Command):
|
|
|
124
127
|
if os.path.exists(cve_path):
|
|
125
128
|
with open(cve_path, 'r') as f:
|
|
126
129
|
return json.load(f)
|
|
130
|
+
debug(f'CVE {cve_id} not found in cache', sub='cve')
|
|
127
131
|
return None
|
|
128
132
|
|
|
129
133
|
# @staticmethod
|
|
@@ -179,12 +183,98 @@ class Vuln(Command):
|
|
|
179
183
|
return tup1 == tup2
|
|
180
184
|
|
|
181
185
|
@staticmethod
|
|
182
|
-
def
|
|
183
|
-
"""
|
|
186
|
+
def get_cpe_fs(cpe):
|
|
187
|
+
""""Return formatted string for given CPE.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
cpe (string): Input CPE
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
string: CPE formatted string.
|
|
194
|
+
"""
|
|
195
|
+
try:
|
|
196
|
+
return CPE(cpe).as_fs()
|
|
197
|
+
except NotImplementedError:
|
|
198
|
+
return None
|
|
199
|
+
|
|
200
|
+
@cache
|
|
201
|
+
@staticmethod
|
|
202
|
+
def lookup_cve_from_vulners_exploit(exploit_id, *cpes):
|
|
203
|
+
"""Search for a CVE corresponding to an exploit by extracting the CVE id from the exploit HTML page.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
exploit_id (str): Exploit ID.
|
|
207
|
+
cpes (tuple[str], Optional): CPEs to match for.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
dict: vulnerability data.
|
|
211
|
+
"""
|
|
212
|
+
if CONFIG.runners.skip_exploit_search:
|
|
213
|
+
debug(f'Skip remote query for {exploit_id} since config.runners.skip_exploit_search is set.', sub='cve')
|
|
214
|
+
return None
|
|
215
|
+
if CONFIG.offline_mode:
|
|
216
|
+
debug(f'Skip remote query for {exploit_id} since config.offline_mode is set.', sub='cve')
|
|
217
|
+
return None
|
|
218
|
+
try:
|
|
219
|
+
resp = requests.get(f'https://vulners.com/githubexploit/{exploit_id}', timeout=5)
|
|
220
|
+
resp.raise_for_status()
|
|
221
|
+
soup = BeautifulSoup(resp.text, 'lxml')
|
|
222
|
+
title = soup.title.get_text(strip=True)
|
|
223
|
+
h1 = [h1.get_text(strip=True) for h1 in soup.find_all('h1')]
|
|
224
|
+
if '404' in h1:
|
|
225
|
+
raise requests.RequestException("404 [not found or rate limited]")
|
|
226
|
+
code = [code.get_text(strip=True) for code in soup.find_all('code')]
|
|
227
|
+
elems = [title] + h1 + code
|
|
228
|
+
content = '\n'.join(elems)
|
|
229
|
+
cve_regex = re.compile(r'(CVE(?:-|_)\d{4}(?:-|_)\d{4,7})', re.IGNORECASE)
|
|
230
|
+
matches = cve_regex.findall(str(content))
|
|
231
|
+
if not matches:
|
|
232
|
+
debug(f'{exploit_id}: No CVE found in https://vulners.com/githubexploit/{exploit_id}.', sub='cve')
|
|
233
|
+
return None
|
|
234
|
+
cve_id = matches[0].replace('_', '-').upper()
|
|
235
|
+
cve_data = Vuln.lookup_cve(cve_id, *cpes)
|
|
236
|
+
if cve_data:
|
|
237
|
+
return cve_data
|
|
238
|
+
|
|
239
|
+
except requests.RequestException as e:
|
|
240
|
+
debug(f'Failed remote query for {exploit_id} ({str(e)}).', sub='cve')
|
|
241
|
+
return None
|
|
242
|
+
|
|
243
|
+
@cache
|
|
244
|
+
@staticmethod
|
|
245
|
+
def lookup_cve_from_cve_circle(cve_id):
|
|
246
|
+
"""Get CVE data from vulnerability.circl.lu.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
cve_id (str): CVE id.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
dict | None: CVE data, None if no response or empty response.
|
|
253
|
+
"""
|
|
254
|
+
try:
|
|
255
|
+
resp = requests.get(f'https://vulnerability.circl.lu/api/cve/{cve_id}', timeout=5)
|
|
256
|
+
resp.raise_for_status()
|
|
257
|
+
cve_info = resp.json()
|
|
258
|
+
if not cve_info:
|
|
259
|
+
debug(f'Empty response from https://vulnerability.circl.lu/api/cve/{cve_id}', sub='cve')
|
|
260
|
+
return None
|
|
261
|
+
cve_path = f'{CONFIG.dirs.data}/cves/{cve_id}.json'
|
|
262
|
+
with open(cve_path, 'w') as f:
|
|
263
|
+
f.write(json.dumps(cve_info, indent=2))
|
|
264
|
+
debug(f'Downloaded {cve_id} to {cve_path}', sub='cve')
|
|
265
|
+
return cve_info
|
|
266
|
+
except requests.RequestException as e:
|
|
267
|
+
debug(f'Failed remote query for {cve_id} ({str(e)}).', sub='cve')
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
@cache
|
|
271
|
+
@staticmethod
|
|
272
|
+
def lookup_cve(cve_id, *cpes):
|
|
273
|
+
"""Search for a CVE info and return vulnerability data.
|
|
184
274
|
|
|
185
275
|
Args:
|
|
186
276
|
cve_id (str): CVE ID in the form CVE-*
|
|
187
|
-
cpes (str, Optional): CPEs to match for.
|
|
277
|
+
cpes (tuple[str], Optional): CPEs to match for.
|
|
188
278
|
|
|
189
279
|
Returns:
|
|
190
280
|
dict: vulnerability data.
|
|
@@ -199,76 +289,87 @@ class Vuln(Command):
|
|
|
199
289
|
if CONFIG.offline_mode:
|
|
200
290
|
debug(f'Skip remote query for {cve_id} since config.offline_mode is set.', sub='cve')
|
|
201
291
|
return None
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
resp.raise_for_status()
|
|
205
|
-
cve_info = resp.json()
|
|
206
|
-
if not cve_info:
|
|
207
|
-
debug(f'Empty response from https://cve.circl.lu/api/cve/{cve_id}.', sub='cve')
|
|
208
|
-
return None
|
|
209
|
-
except requests.RequestException as e:
|
|
210
|
-
debug(f'Failed remote query for {cve_id} ({str(e)}).', sub='cve')
|
|
292
|
+
cve_info = Vuln.lookup_cve_from_cve_circle(cve_id)
|
|
293
|
+
if not cve_info:
|
|
211
294
|
return None
|
|
212
295
|
|
|
296
|
+
# Convert cve info to easy format
|
|
297
|
+
cve_id = cve_info['cveMetadata']['cveId']
|
|
298
|
+
cna = cve_info['containers']['cna']
|
|
299
|
+
metrics = cna.get('metrics', [])
|
|
300
|
+
cvss_score = 0
|
|
301
|
+
for metric in metrics:
|
|
302
|
+
for name, value in metric.items():
|
|
303
|
+
if 'cvss' in name:
|
|
304
|
+
cvss_score = metric[name]['baseScore']
|
|
305
|
+
description = cna.get('descriptions', [{}])[0].get('value')
|
|
306
|
+
cwe_id = cna.get('problemTypes', [{}])[0].get('descriptions', [{}])[0].get('cweId')
|
|
307
|
+
cpes_affected = []
|
|
308
|
+
for product in cna['affected']:
|
|
309
|
+
cpes_affected.extend(product.get('cpes', []))
|
|
310
|
+
references = [u['url'] for u in cna['references']]
|
|
311
|
+
cve_info = {
|
|
312
|
+
'id': cve_id,
|
|
313
|
+
'cwe_id': cwe_id,
|
|
314
|
+
'cvss_score': cvss_score,
|
|
315
|
+
'description': description,
|
|
316
|
+
'cpes': cpes_affected,
|
|
317
|
+
'references': references
|
|
318
|
+
}
|
|
319
|
+
|
|
213
320
|
# Match the CPE string against the affected products CPE FS strings from the CVE data if a CPE was passed.
|
|
214
321
|
# This allow to limit the number of False positives (high) that we get from nmap NSE vuln scripts like vulscan
|
|
215
322
|
# and ensure we keep only right matches.
|
|
216
323
|
# The check is not executed if no CPE was passed (sometimes nmap cannot properly detect a CPE) or if the CPE
|
|
217
324
|
# version cannot be determined.
|
|
218
325
|
cpe_match = False
|
|
219
|
-
tags = []
|
|
326
|
+
tags = [cve_id]
|
|
220
327
|
if cpes:
|
|
221
328
|
for cpe in cpes:
|
|
222
|
-
|
|
223
|
-
|
|
329
|
+
cpe_fs = Vuln.get_cpe_fs(cpe)
|
|
330
|
+
if not cpe_fs:
|
|
331
|
+
debug(f'{cve_id}: Failed to parse CPE {cpe} with CPE parser', sub='cve.match', verbose=True)
|
|
332
|
+
tags.append('cpe-invalid')
|
|
333
|
+
continue
|
|
224
334
|
# cpe_version = cpe_obj.get_version()[0]
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
335
|
+
for cpe_affected in cpes_affected:
|
|
336
|
+
cpe_affected_fs = Vuln.get_cpe_fs(cpe_affected)
|
|
337
|
+
if not cpe_affected_fs:
|
|
338
|
+
debug(f'{cve_id}: Failed to parse CPE {cpe} (from online data) with CPE parser', sub='cve.match', verbose=True)
|
|
339
|
+
continue
|
|
340
|
+
debug(f'{cve_id}: Testing {cpe_fs} against {cpe_affected_fs}', sub='cve.match', verbose=True)
|
|
341
|
+
cpe_match = Vuln.match_cpes(cpe_fs, cpe_affected_fs)
|
|
342
|
+
if cpe_match:
|
|
229
343
|
debug(f'{cve_id}: CPE match found for {cpe}.', sub='cve')
|
|
230
|
-
cpe_match = True
|
|
231
344
|
tags.append('cpe-match')
|
|
232
345
|
break
|
|
346
|
+
|
|
233
347
|
if not cpe_match:
|
|
234
348
|
debug(f'{cve_id}: no CPE match found for {cpe}.', sub='cve')
|
|
235
349
|
|
|
236
350
|
# Parse CVE id and CVSS
|
|
237
351
|
name = id = cve_info['id']
|
|
238
|
-
cvss = cve_info.get('cvss') or 0
|
|
239
352
|
# exploit_ids = cve_info.get('refmap', {}).get('exploit-db', [])
|
|
240
353
|
# osvdb_ids = cve_info.get('refmap', {}).get('osvdb', [])
|
|
241
354
|
|
|
242
355
|
# Get description
|
|
243
|
-
description = cve_info
|
|
356
|
+
description = cve_info['description']
|
|
244
357
|
if description is not None:
|
|
245
358
|
description = description.replace(id, '').strip()
|
|
246
359
|
|
|
247
360
|
# Get references
|
|
248
361
|
references = cve_info.get(REFERENCES, [])
|
|
249
|
-
cve_ref_url = f'https://
|
|
362
|
+
cve_ref_url = f'https://vulnerability.circl.lu/cve/{id}'
|
|
250
363
|
references.append(cve_ref_url)
|
|
251
364
|
|
|
252
365
|
# Get CWE ID
|
|
253
|
-
|
|
254
|
-
if
|
|
255
|
-
tags.append(
|
|
256
|
-
|
|
257
|
-
# Parse capecs for a better vuln name / type
|
|
258
|
-
capecs = cve_info.get('capec', [])
|
|
259
|
-
if capecs and len(capecs) > 0:
|
|
260
|
-
name = capecs[0]['name']
|
|
261
|
-
|
|
262
|
-
# Parse ovals for a better vuln name / type
|
|
263
|
-
ovals = cve_info.get('oval', [])
|
|
264
|
-
if ovals:
|
|
265
|
-
if description == 'none':
|
|
266
|
-
description = ovals[0]['title']
|
|
267
|
-
family = ovals[0]['family']
|
|
268
|
-
tags.append(family)
|
|
366
|
+
cwe_id = cve_info['cwe_id']
|
|
367
|
+
if cwe_id is not None:
|
|
368
|
+
tags.append(cwe_id)
|
|
269
369
|
|
|
270
370
|
# Set vulnerability severity based on CVSS score
|
|
271
371
|
severity = None
|
|
372
|
+
cvss = cve_info['cvss_score']
|
|
272
373
|
if cvss:
|
|
273
374
|
severity = Vuln.cvss_to_severity(cvss)
|
|
274
375
|
|
|
@@ -276,15 +377,16 @@ class Vuln(Command):
|
|
|
276
377
|
vuln = {
|
|
277
378
|
ID: id,
|
|
278
379
|
NAME: name,
|
|
279
|
-
PROVIDER: '
|
|
380
|
+
PROVIDER: 'vulnerability.circl.lu',
|
|
280
381
|
SEVERITY: severity,
|
|
281
382
|
CVSS_SCORE: cvss,
|
|
282
383
|
TAGS: tags,
|
|
283
|
-
REFERENCES: [f'https://
|
|
384
|
+
REFERENCES: [f'https://vulnerability.circl.lu/cve/{id}'] + references,
|
|
284
385
|
DESCRIPTION: description,
|
|
285
386
|
}
|
|
286
387
|
return vuln
|
|
287
388
|
|
|
389
|
+
@cache
|
|
288
390
|
@staticmethod
|
|
289
391
|
def lookup_ghsa(ghsa_id):
|
|
290
392
|
"""Search for a GHSA on Github and and return associated CVE vulnerability data.
|
secator/tasks/bbot.py
CHANGED
|
@@ -151,10 +151,12 @@ def output_discriminator(self, item):
|
|
|
151
151
|
|
|
152
152
|
@task()
|
|
153
153
|
class bbot(Command):
|
|
154
|
+
"""Multipurpose scanner."""
|
|
154
155
|
cmd = 'bbot -y --allow-deadly --force'
|
|
155
156
|
json_flag = '--json'
|
|
156
157
|
input_flag = '-t'
|
|
157
158
|
file_flag = None
|
|
159
|
+
version_flag = '--help'
|
|
158
160
|
opts = {
|
|
159
161
|
'modules': {'type': str, 'short': 'm', 'default': '', 'help': ','.join(BBOT_MODULES)},
|
|
160
162
|
'presets': {'type': str, 'short': 'ps', 'default': 'kitchen-sink', 'help': ','.join(BBOT_PRESETS), 'shlex': False},
|
secator/tasks/bup.py
CHANGED
secator/tasks/dirsearch.py
CHANGED
|
@@ -20,7 +20,7 @@ class dirsearch(HttpFuzzer):
|
|
|
20
20
|
cmd = 'dirsearch'
|
|
21
21
|
input_flag = '-u'
|
|
22
22
|
file_flag = '-l'
|
|
23
|
-
json_flag = '
|
|
23
|
+
json_flag = '-O json'
|
|
24
24
|
opt_prefix = '--'
|
|
25
25
|
encoding = 'ansi'
|
|
26
26
|
opt_key_map = {
|
|
@@ -52,7 +52,7 @@ class dirsearch(HttpFuzzer):
|
|
|
52
52
|
STATUS_CODE: 'status'
|
|
53
53
|
}
|
|
54
54
|
}
|
|
55
|
-
install_cmd = 'pipx install dirsearch'
|
|
55
|
+
install_cmd = 'pipx install --force git+https://github.com/maurosoria/dirsearch'
|
|
56
56
|
proxychains = True
|
|
57
57
|
proxy_socks5 = True
|
|
58
58
|
proxy_http = True
|