secator 0.1.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (99) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +421 -0
  4. secator/cli.py +927 -0
  5. secator/config.py +137 -0
  6. secator/configs/__init__.py +0 -0
  7. secator/configs/profiles/__init__.py +0 -0
  8. secator/configs/profiles/aggressive.yaml +7 -0
  9. secator/configs/profiles/default.yaml +9 -0
  10. secator/configs/profiles/stealth.yaml +7 -0
  11. secator/configs/scans/__init__.py +0 -0
  12. secator/configs/scans/domain.yaml +18 -0
  13. secator/configs/scans/host.yaml +14 -0
  14. secator/configs/scans/network.yaml +17 -0
  15. secator/configs/scans/subdomain.yaml +8 -0
  16. secator/configs/scans/url.yaml +12 -0
  17. secator/configs/workflows/__init__.py +0 -0
  18. secator/configs/workflows/cidr_recon.yaml +28 -0
  19. secator/configs/workflows/code_scan.yaml +11 -0
  20. secator/configs/workflows/host_recon.yaml +41 -0
  21. secator/configs/workflows/port_scan.yaml +34 -0
  22. secator/configs/workflows/subdomain_recon.yaml +33 -0
  23. secator/configs/workflows/url_crawl.yaml +29 -0
  24. secator/configs/workflows/url_dirsearch.yaml +29 -0
  25. secator/configs/workflows/url_fuzz.yaml +35 -0
  26. secator/configs/workflows/url_nuclei.yaml +11 -0
  27. secator/configs/workflows/url_vuln.yaml +55 -0
  28. secator/configs/workflows/user_hunt.yaml +10 -0
  29. secator/configs/workflows/wordpress.yaml +14 -0
  30. secator/decorators.py +346 -0
  31. secator/definitions.py +183 -0
  32. secator/exporters/__init__.py +12 -0
  33. secator/exporters/_base.py +3 -0
  34. secator/exporters/csv.py +29 -0
  35. secator/exporters/gdrive.py +118 -0
  36. secator/exporters/json.py +14 -0
  37. secator/exporters/table.py +7 -0
  38. secator/exporters/txt.py +24 -0
  39. secator/hooks/__init__.py +0 -0
  40. secator/hooks/mongodb.py +212 -0
  41. secator/output_types/__init__.py +24 -0
  42. secator/output_types/_base.py +95 -0
  43. secator/output_types/exploit.py +50 -0
  44. secator/output_types/ip.py +33 -0
  45. secator/output_types/port.py +45 -0
  46. secator/output_types/progress.py +35 -0
  47. secator/output_types/record.py +34 -0
  48. secator/output_types/subdomain.py +42 -0
  49. secator/output_types/tag.py +46 -0
  50. secator/output_types/target.py +30 -0
  51. secator/output_types/url.py +76 -0
  52. secator/output_types/user_account.py +41 -0
  53. secator/output_types/vulnerability.py +97 -0
  54. secator/report.py +95 -0
  55. secator/rich.py +123 -0
  56. secator/runners/__init__.py +12 -0
  57. secator/runners/_base.py +873 -0
  58. secator/runners/_helpers.py +154 -0
  59. secator/runners/command.py +674 -0
  60. secator/runners/scan.py +67 -0
  61. secator/runners/task.py +107 -0
  62. secator/runners/workflow.py +137 -0
  63. secator/serializers/__init__.py +8 -0
  64. secator/serializers/dataclass.py +33 -0
  65. secator/serializers/json.py +15 -0
  66. secator/serializers/regex.py +17 -0
  67. secator/tasks/__init__.py +10 -0
  68. secator/tasks/_categories.py +304 -0
  69. secator/tasks/cariddi.py +102 -0
  70. secator/tasks/dalfox.py +66 -0
  71. secator/tasks/dirsearch.py +88 -0
  72. secator/tasks/dnsx.py +56 -0
  73. secator/tasks/dnsxbrute.py +34 -0
  74. secator/tasks/feroxbuster.py +89 -0
  75. secator/tasks/ffuf.py +85 -0
  76. secator/tasks/fping.py +44 -0
  77. secator/tasks/gau.py +43 -0
  78. secator/tasks/gf.py +34 -0
  79. secator/tasks/gospider.py +71 -0
  80. secator/tasks/grype.py +78 -0
  81. secator/tasks/h8mail.py +80 -0
  82. secator/tasks/httpx.py +104 -0
  83. secator/tasks/katana.py +128 -0
  84. secator/tasks/maigret.py +78 -0
  85. secator/tasks/mapcidr.py +32 -0
  86. secator/tasks/msfconsole.py +176 -0
  87. secator/tasks/naabu.py +52 -0
  88. secator/tasks/nmap.py +341 -0
  89. secator/tasks/nuclei.py +97 -0
  90. secator/tasks/searchsploit.py +53 -0
  91. secator/tasks/subfinder.py +40 -0
  92. secator/tasks/wpscan.py +177 -0
  93. secator/utils.py +404 -0
  94. secator/utils_test.py +183 -0
  95. secator-0.1.0.dist-info/METADATA +379 -0
  96. secator-0.1.0.dist-info/RECORD +99 -0
  97. secator-0.1.0.dist-info/WHEEL +5 -0
  98. secator-0.1.0.dist-info/entry_points.txt +2 -0
  99. secator-0.1.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,67 @@
1
+ import logging
2
+
3
+ from secator.config import ConfigLoader
4
+ from secator.exporters import CsvExporter, JsonExporter
5
+ from secator.runners._base import Runner
6
+ from secator.runners._helpers import run_extractors
7
+ from secator.runners.workflow import Workflow
8
+ from secator.rich import console
9
+ from secator.output_types import Target
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class Scan(Runner):
15
+
16
+ default_exporters = [
17
+ JsonExporter,
18
+ CsvExporter
19
+ ]
20
+
21
+ @classmethod
22
+ def delay(cls, *args, **kwargs):
23
+ from secator.celery import run_scan
24
+ return run_scan.delay(args=args, kwargs=kwargs)
25
+
26
+ def yielder(self):
27
+ """Run scan.
28
+
29
+ Yields:
30
+ dict: Item yielded from individual workflow tasks.
31
+ """
32
+ # Yield targets
33
+ for target in self.targets:
34
+ yield Target(name=target, _source=self.config.name, _type='target', _context=self.context)
35
+
36
+ # Run workflows
37
+ for name, workflow_opts in self.config.workflows.items():
38
+
39
+ # Extract opts and and expand target from previous workflows results
40
+ targets, workflow_opts = run_extractors(self.results, workflow_opts or {}, self.targets)
41
+ if not targets:
42
+ console.log(f'No targets were specified for workflow {name}. Skipping.')
43
+ continue
44
+
45
+ # Workflow opts
46
+ run_opts = self.run_opts.copy()
47
+ run_opts['reports_folder'] = self.reports_folder
48
+ fmt_opts = {
49
+ 'json': run_opts.get('json', False),
50
+ 'print_item': False,
51
+ 'print_start': True,
52
+ 'print_run_summary': True,
53
+ 'print_progress': self.sync
54
+ }
55
+ run_opts.update(fmt_opts)
56
+
57
+ # Run workflow
58
+ workflow = Workflow(
59
+ ConfigLoader(name=f'workflows/{name}'),
60
+ targets,
61
+ results=[],
62
+ run_opts=run_opts,
63
+ hooks=self._hooks,
64
+ context=self.context.copy())
65
+
66
+ # Get results
67
+ yield from workflow
@@ -0,0 +1,107 @@
1
+ from secator.definitions import DEBUG
2
+ from secator.output_types import Target
3
+ from secator.runners import Runner
4
+ from secator.utils import discover_tasks
5
+
6
+
7
+ class Task(Runner):
8
+ default_exporters = []
9
+ enable_hooks = False
10
+
11
+ def delay(cls, *args, **kwargs):
12
+ from secator.celery import run_task
13
+ return run_task.apply_async(kwargs={'args': args, 'kwargs': kwargs}, queue='celery')
14
+
15
+ def yielder(self):
16
+ """Run task.
17
+
18
+ Args:
19
+ sync (bool): Run in sync mode (main thread). If False, run in Celery worker in distributed mode.
20
+
21
+ Returns:
22
+ list: List of results.
23
+ """
24
+ # Get task class
25
+ task_cls = Task.get_task_class(self.config.name)
26
+
27
+ # Run opts
28
+ run_opts = self.run_opts.copy()
29
+ run_opts.pop('output', None)
30
+ dry_run = run_opts.get('show', False)
31
+ if dry_run:
32
+ self.print_item_count = False
33
+
34
+ # Fmt opts
35
+ fmt_opts = {
36
+ 'json': run_opts.get('json', False),
37
+ 'print_cmd': True,
38
+ 'print_cmd_prefix': not self.sync,
39
+ 'print_input_file': DEBUG > 0,
40
+ 'print_item': True,
41
+ 'print_item_count': not self.sync and not dry_run,
42
+ 'print_line': self.sync and not self.output_quiet,
43
+ }
44
+ # self.print_item = not self.sync # enable print_item for base Task only if running remote
45
+ run_opts.update(fmt_opts)
46
+
47
+ # Set task output types
48
+ self.output_types = task_cls.output_types
49
+
50
+ # Get hooks
51
+ hooks = {task_cls: self.hooks}
52
+ run_opts['hooks'] = hooks
53
+ run_opts['context'] = self.context
54
+ run_opts['reports_folder'] = self.reports_folder
55
+
56
+ # Run task
57
+ if self.sync:
58
+ task = task_cls(self.targets, **run_opts)
59
+ if dry_run: # don't run
60
+ return
61
+ else:
62
+ result = task_cls.delay(self.targets, **run_opts)
63
+ task = self.process_live_tasks(
64
+ result,
65
+ description=False,
66
+ results_only=True,
67
+ print_remote_status=self.print_remote_status)
68
+
69
+ # Yield task results
70
+ yield from task
71
+
72
+ # Yield targets
73
+ for target in self.targets:
74
+ yield Target(name=target, _source=self.config.name, _type='target', _context=self.context)
75
+
76
+ @staticmethod
77
+ def get_task_class(name):
78
+ """Get task class from a name.
79
+
80
+ Args:
81
+ name (str): Task name.
82
+ """
83
+ if '/' in name:
84
+ name = name.split('/')[0]
85
+ tasks_classes = discover_tasks()
86
+ for task_cls in tasks_classes:
87
+ if task_cls.__name__ == name:
88
+ return task_cls
89
+ raise ValueError(f'Task {name} not found. Aborting.')
90
+
91
+ @staticmethod
92
+ def get_tasks_from_conf(config):
93
+ """Get task names from config. Ignore hierarchy and keywords.
94
+
95
+ TODO: Add hierarchy tree / add make flow diagrams.
96
+ """
97
+ tasks = []
98
+ for name, opts in config.items():
99
+ if name == '_group':
100
+ tasks.extend(Task.get_tasks_from_conf(opts))
101
+ elif name == '_chain':
102
+ tasks.extend(Task.get_tasks_from_conf(opts))
103
+ else:
104
+ if '/' in name:
105
+ name = name.split('/')[0]
106
+ tasks.append(name)
107
+ return tasks
@@ -0,0 +1,137 @@
1
+ from secator.definitions import DEBUG
2
+ from secator.exporters import CsvExporter, JsonExporter
3
+ from secator.output_types import Target
4
+ from secator.runners._base import Runner
5
+ from secator.runners.task import Task
6
+ from secator.utils import merge_opts
7
+
8
+
9
+ class Workflow(Runner):
10
+
11
+ default_exporters = [
12
+ JsonExporter,
13
+ CsvExporter
14
+ ]
15
+
16
+ @classmethod
17
+ def delay(cls, *args, **kwargs):
18
+ from secator.celery import run_workflow
19
+ return run_workflow.delay(args=args, kwargs=kwargs)
20
+
21
+ def yielder(self):
22
+ """Run workflow.
23
+
24
+ Args:
25
+ sync (bool): Run in sync mode (main thread). If False, run in Celery worker in distributed mode.
26
+
27
+ Returns:
28
+ list: List of results.
29
+ """
30
+ # Yield targets
31
+ for target in self.targets:
32
+ yield Target(name=target, _source=self.config.name, _type='target', _context=self.context)
33
+
34
+ # Task opts
35
+ task_run_opts = self.run_opts.copy()
36
+ task_fmt_opts = {
37
+ 'json': task_run_opts.get('json', False),
38
+ 'print_cmd': True,
39
+ 'print_cmd_prefix': not self.sync,
40
+ 'print_description': self.sync,
41
+ 'print_input_file': DEBUG > 0,
42
+ 'print_item': True,
43
+ 'print_item_count': True,
44
+ 'print_line': not self.sync,
45
+ 'print_progress': self.sync,
46
+ }
47
+
48
+ # Construct run opts
49
+ task_run_opts['hooks'] = self._hooks.get(Task, {})
50
+ task_run_opts['reports_folder'] = self.reports_folder
51
+ task_run_opts.update(task_fmt_opts)
52
+
53
+ # Build Celery workflow
54
+ workflow = self.build_celery_workflow(run_opts=task_run_opts, results=self.results)
55
+
56
+ # Run Celery workflow and get results
57
+ if self.sync:
58
+ results = workflow.apply().get()
59
+ else:
60
+ result = workflow()
61
+ self.result = result
62
+ results = self.process_live_tasks(result, results_only=True, print_remote_status=self.print_remote_status)
63
+
64
+ # Get workflow results
65
+ yield from results
66
+
67
+ def build_celery_workflow(self, run_opts={}, results=[]):
68
+ """"Build Celery workflow.
69
+
70
+ Returns:
71
+ celery.chain: Celery task chain.
72
+ """
73
+ from celery import chain
74
+ from secator.celery import forward_results
75
+ sigs = self.get_tasks(
76
+ self.config.tasks.toDict(),
77
+ self.targets,
78
+ self.config.options,
79
+ run_opts)
80
+ sigs = [forward_results.si(results).set(queue='io')] + sigs + [forward_results.s().set(queue='io')]
81
+ workflow = chain(*sigs)
82
+ return workflow
83
+
84
+ def get_tasks(self, obj, targets, workflow_opts, run_opts):
85
+ """Get tasks recursively as Celery chains / chords.
86
+
87
+ Args:
88
+ obj (secator.config.ConfigLoader): Config.
89
+ targets (list): List of targets.
90
+ workflow_opts (dict): Workflow options.
91
+ run_opts (dict): Run options.
92
+ sync (bool): Synchronous mode (chain of tasks, no chords).
93
+
94
+ Returns:
95
+ list: List of signatures.
96
+ """
97
+ from celery import chain, chord
98
+ from secator.celery import forward_results
99
+ sigs = []
100
+ for task_name, task_opts in obj.items():
101
+ # Task opts can be None
102
+ task_opts = task_opts or {}
103
+
104
+ # If it's a group, process the sublevel tasks as a Celery chord.
105
+ if task_name == '_group':
106
+ tasks = self.get_tasks(
107
+ task_opts,
108
+ targets,
109
+ workflow_opts,
110
+ run_opts
111
+ )
112
+ sig = chord((tasks), forward_results.s().set(queue='io'))
113
+ elif task_name == '_chain':
114
+ tasks = self.get_tasks(
115
+ task_opts,
116
+ targets,
117
+ workflow_opts,
118
+ run_opts
119
+ )
120
+ sig = chain(*tasks)
121
+ else:
122
+ # Get task class
123
+ task = Task.get_task_class(task_name)
124
+
125
+ # Merge task options (order of priority with overrides)
126
+ opts = merge_opts(workflow_opts, task_opts, run_opts)
127
+
128
+ # Add task context and hooks to options
129
+ opts['hooks'] = {task: self._hooks.get(Task, {})}
130
+ opts['context'] = self.context.copy()
131
+ opts['name'] = task_name
132
+
133
+ # Create task signature
134
+ sig = task.s(targets, **opts).set(queue=task.profile)
135
+ self.output_types.extend(task.output_types)
136
+ sigs.append(sig)
137
+ return sigs
@@ -0,0 +1,8 @@
1
+ __all__ = [
2
+ 'JSONSerializer',
3
+ 'RegexSerializer',
4
+ 'DataclassEncoder',
5
+ ]
6
+ from secator.serializers.json import JSONSerializer
7
+ from secator.serializers.regex import RegexSerializer
8
+ from secator.serializers.dataclass import DataclassEncoder
@@ -0,0 +1,33 @@
1
+ import json
2
+ from secator.output_types import OUTPUT_TYPES
3
+
4
+
5
+ class DataclassEncoder(json.JSONEncoder):
6
+ def default(self, obj):
7
+ if hasattr(obj, 'toDict'):
8
+ return obj.toDict()
9
+ else:
10
+ return json.JSONEncoder.default(self, obj)
11
+
12
+
13
+ def get_output_cls(type):
14
+ try:
15
+ return [cls for cls in OUTPUT_TYPES if cls.get_name() == type][0]
16
+ except IndexError:
17
+ return None
18
+
19
+
20
+ def dataclass_decoder(obj):
21
+ if '_type' in obj:
22
+ output_cls = get_output_cls(obj['_type'])
23
+ if output_cls:
24
+ return output_cls.load(obj)
25
+ return obj
26
+
27
+
28
+ def dumps_dataclass(obj, indent=None):
29
+ return json.dumps(obj, cls=DataclassEncoder, indent=indent)
30
+
31
+
32
+ def loads_dataclass(obj):
33
+ return json.loads(obj, object_hook=dataclass_decoder)
@@ -0,0 +1,15 @@
1
+ import yaml
2
+
3
+
4
+ class JSONSerializer:
5
+
6
+ def run(self, line):
7
+ start_index = line.find('{')
8
+ end_index = line.rfind('}')
9
+ if start_index == -1 or end_index == -1:
10
+ return None
11
+ try:
12
+ json_obj = line[start_index:end_index+1]
13
+ return yaml.safe_load(json_obj)
14
+ except yaml.YAMLError:
15
+ return None
@@ -0,0 +1,17 @@
1
+ import re
2
+
3
+
4
+ class RegexSerializer:
5
+
6
+ def __init__(self, regex, fields=[]):
7
+ self.regex = re.compile(regex)
8
+ self.fields = fields
9
+
10
+ def run(self, line):
11
+ match = self.regex.match(line)
12
+ output = {}
13
+ if not match:
14
+ return None
15
+ for field in self.fields:
16
+ output[field] = match.group(field)
17
+ return output
@@ -0,0 +1,10 @@
1
+ from secator.utils import discover_internal_tasks, discover_external_tasks
2
+ INTERNAL_TASKS = discover_internal_tasks()
3
+ EXTERNAL_TASKS = discover_external_tasks()
4
+ ALL_TASKS = INTERNAL_TASKS + EXTERNAL_TASKS
5
+ __all__ = [
6
+ cls.__name__
7
+ for cls in ALL_TASKS
8
+ ]
9
+ for cls in INTERNAL_TASKS:
10
+ exec(f'from .{cls.__name__} import {cls.__name__}')
@@ -0,0 +1,304 @@
1
+ import json
2
+ import logging
3
+ import os
4
+
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
+ from cpe import CPE
8
+
9
+ from secator.definitions import (CIDR_RANGE, CONFIDENCE, CVSS_SCORE,
10
+ DEFAULT_HTTP_WORDLIST, DEFAULT_SKIP_CVE_SEARCH, DELAY, DEPTH, DESCRIPTION,
11
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
12
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID,
13
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
14
+ MATCH_WORDS, METHOD, NAME, PATH, PROVIDER,
15
+ PROXY, RATE_LIMIT, REFERENCES, RETRIES,
16
+ SEVERITY, TAGS, DATA_FOLDER, THREADS, TIMEOUT,
17
+ URL, USER_AGENT, USERNAME, WORDLIST)
18
+ from secator.output_types import (Ip, Port, Subdomain, Tag, Url, UserAccount,
19
+ Vulnerability)
20
+ from secator.rich import console
21
+ from secator.runners import Command
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ OPTS = {
26
+ HEADER: {'type': str, 'help': 'Custom header to add to each request in the form "KEY1:VALUE1; KEY2:VALUE2"'},
27
+ DELAY: {'type': float, 'short': 'd', 'help': 'Delay to add between each requests'},
28
+ DEPTH: {'type': int, 'help': 'Scan depth', 'default': 2},
29
+ FILTER_CODES: {'type': str, 'short': 'fc', 'help': 'Filter out responses with HTTP codes'},
30
+ FILTER_REGEX: {'type': str, 'short': 'fr', 'help': 'Filter out responses with regular expression'},
31
+ FILTER_SIZE: {'type': str, 'short': 'fs', 'help': 'Filter out responses with size'},
32
+ FILTER_WORDS: {'type': str, 'short': 'fw', 'help': 'Filter out responses with word count'},
33
+ FOLLOW_REDIRECT: {'is_flag': True, 'short': 'frd', 'help': 'Follow HTTP redirects'},
34
+ MATCH_CODES: {'type': str, 'short': 'mc', 'help': 'Match HTTP status codes e.g "201,300,301"'},
35
+ MATCH_REGEX: {'type': str, 'short': 'mr', 'help': 'Match responses with regular expression'},
36
+ MATCH_SIZE: {'type': str, 'short': 'ms', 'help': 'Match respones with size'},
37
+ MATCH_WORDS: {'type': str, 'short': 'mw', 'help': 'Match responses with word count'},
38
+ METHOD: {'type': str, 'help': 'HTTP method to use for requests'},
39
+ PROXY: {'type': str, 'help': 'HTTP(s) / SOCKS5 proxy'},
40
+ RATE_LIMIT: {'type': int, 'short': 'rl', 'help': 'Rate limit, i.e max number of requests per second'},
41
+ RETRIES: {'type': int, 'help': 'Retries'},
42
+ THREADS: {'type': int, 'help': 'Number of threads to run', 'default': 50},
43
+ TIMEOUT: {'type': int, 'help': 'Request timeout'},
44
+ USER_AGENT: {'type': str, 'short': 'ua', 'help': 'User agent, e.g "Mozilla Firefox 1.0"'},
45
+ WORDLIST: {'type': str, 'short': 'w', 'default': DEFAULT_HTTP_WORDLIST, 'help': 'Wordlist to use'}
46
+ }
47
+
48
+ OPTS_HTTP = [
49
+ HEADER, DELAY, FOLLOW_REDIRECT, METHOD, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT
50
+ ]
51
+
52
+ OPTS_HTTP_CRAWLERS = OPTS_HTTP + [
53
+ DEPTH, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, FILTER_REGEX, FILTER_CODES, FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
54
+ MATCH_CODES
55
+ ]
56
+
57
+ OPTS_HTTP_FUZZERS = OPTS_HTTP_CRAWLERS + [WORDLIST]
58
+
59
+ OPTS_RECON = [
60
+ DELAY, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT
61
+ ]
62
+
63
+ OPTS_VULN = [
64
+ HEADER, DELAY, FOLLOW_REDIRECT, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT
65
+ ]
66
+
67
+
68
+ #---------------#
69
+ # HTTP category #
70
+ #---------------#
71
+
72
+ class Http(Command):
73
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_CRAWLERS}
74
+ input_type = URL
75
+ output_types = [Url]
76
+
77
+
78
+ class HttpCrawler(Command):
79
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_CRAWLERS}
80
+ input_type = URL
81
+ output_types = [Url]
82
+
83
+
84
+ class HttpFuzzer(Command):
85
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_FUZZERS}
86
+ input_type = URL
87
+ output_types = [Url]
88
+
89
+
90
+ #----------------#
91
+ # Recon category #
92
+ #----------------#
93
+
94
+ class Recon(Command):
95
+ meta_opts = {k: OPTS[k] for k in OPTS_RECON}
96
+ output_types = [Subdomain, UserAccount, Ip, Port]
97
+
98
+
99
+ class ReconDns(Recon):
100
+ input_type = HOST
101
+ output_types = [Subdomain]
102
+
103
+
104
+ class ReconUser(Recon):
105
+ input_type = USERNAME
106
+ output_types = [UserAccount]
107
+
108
+
109
+ class ReconIp(Recon):
110
+ input_type = CIDR_RANGE
111
+ output_types = [Ip]
112
+
113
+
114
+ class ReconPort(Recon):
115
+ input_type = HOST
116
+ output_types = [Port]
117
+
118
+
119
+ #---------------#
120
+ # Vuln category #
121
+ #---------------#
122
+
123
+ class Vuln(Command):
124
+ meta_opts = {k: OPTS[k] for k in OPTS_VULN}
125
+ output_types = [Vulnerability]
126
+
127
+ @staticmethod
128
+ def lookup_local_cve(cve_id):
129
+ cve_path = f'{DATA_FOLDER}/cves/{cve_id}.json'
130
+ if os.path.exists(cve_path):
131
+ with open(cve_path, 'r') as f:
132
+ return json.load(f)
133
+ return None
134
+
135
+ # @staticmethod
136
+ # def lookup_exploitdb(exploit_id):
137
+ # print('looking up exploit')
138
+ # try:
139
+ # cve_info = requests.get(f'https://exploit-db.com/exploits/{exploit_id}', timeout=5).content
140
+ # print(cve_info)
141
+ # except Exception:
142
+ # logger.error(f'Could not fetch exploit info for exploit {exploit_id}. Skipping.')
143
+ # return None
144
+ # return cve_info
145
+
146
+ @staticmethod
147
+ def lookup_cve(cve_id, cpes=[]):
148
+ """Search for a CVE in local db or using cve.circl.lu and return vulnerability data.
149
+
150
+ Args:
151
+ cve_id (str): CVE ID in the form CVE-*
152
+ cpes (str, Optional): CPEs to match for.
153
+
154
+ Returns:
155
+ dict: vulnerability data.
156
+ """
157
+ cve_info = Vuln.lookup_local_cve(cve_id)
158
+ if not cve_info:
159
+ if DEFAULT_SKIP_CVE_SEARCH:
160
+ logger.debug(f'{cve_id} not found locally, and DEFAULT_SKIP_CVE_SEARCH is set: ignoring online search.')
161
+ return None
162
+ # logger.debug(f'{cve_id} not found locally. Use `secator install cves` to install CVEs locally.')
163
+ try:
164
+ cve_info = requests.get(f'https://cve.circl.lu/api/cve/{cve_id}', timeout=5).json()
165
+ if not cve_info:
166
+ console.print(f'Could not fetch CVE info for cve {cve_id}. Skipping.', highlight=False)
167
+ return None
168
+ except Exception:
169
+ console.print(f'Could not fetch CVE info for cve {cve_id}. Skipping.', highlight=False)
170
+ return None
171
+
172
+ # Match the CPE string against the affected products CPE FS strings from the CVE data if a CPE was passed.
173
+ # This allow to limit the number of False positives (high) that we get from nmap NSE vuln scripts like vulscan
174
+ # and ensure we keep only right matches.
175
+ # The check is not executed if no CPE was passed (sometimes nmap cannot properly detect a CPE) or if the CPE
176
+ # version cannot be determined.
177
+ cpe_match = False
178
+ tags = []
179
+ if cpes:
180
+ for cpe in cpes:
181
+ cpe_obj = CPE(cpe)
182
+ cpe_fs = cpe_obj.as_fs()
183
+ # cpe_version = cpe_obj.get_version()[0]
184
+ vulnerable_fs = cve_info['vulnerable_product']
185
+ # logger.debug(f'Matching CPE {cpe} against {len(vulnerable_fs)} vulnerable products for {cve_id}')
186
+ for fs in vulnerable_fs:
187
+ if fs == cpe_fs:
188
+ # logger.debug(f'Found matching CPE FS {cpe_fs} ! The CPE is vulnerable to CVE {cve_id}')
189
+ cpe_match = True
190
+ tags.append('cpe-match')
191
+ if not cpe_match:
192
+ return None
193
+
194
+ # Parse CVE id and CVSS
195
+ name = id = cve_info['id']
196
+ cvss = cve_info.get('cvss') or 0
197
+ # exploit_ids = cve_info.get('refmap', {}).get('exploit-db', [])
198
+ # osvdb_ids = cve_info.get('refmap', {}).get('osvdb', [])
199
+
200
+ # Get description
201
+ description = cve_info.get('summary')
202
+ if description is not None:
203
+ description = description.replace(id, '').strip()
204
+
205
+ # Get references
206
+ references = cve_info.get(REFERENCES, [])
207
+ cve_ref_url = f'https://cve.circl.lu/cve/{id}'
208
+ references.append(cve_ref_url)
209
+
210
+ # Get CWE ID
211
+ vuln_cwe_id = cve_info.get('cwe')
212
+ if vuln_cwe_id is None:
213
+ tags.append(vuln_cwe_id)
214
+
215
+ # Parse capecs for a better vuln name / type
216
+ capecs = cve_info.get('capec', [])
217
+ if capecs and len(capecs) > 0:
218
+ name = capecs[0]['name']
219
+
220
+ # Parse ovals for a better vuln name / type
221
+ ovals = cve_info.get('oval', [])
222
+ if ovals:
223
+ if description == 'none':
224
+ description = ovals[0]['title']
225
+ family = ovals[0]['family']
226
+ tags.append(family)
227
+
228
+ # Set vulnerability severity based on CVSS score
229
+ severity = None
230
+ if cvss:
231
+ if cvss < 4:
232
+ severity = 'low'
233
+ elif cvss < 7:
234
+ severity = 'medium'
235
+ elif cvss < 9:
236
+ severity = 'high'
237
+ else:
238
+ severity = 'critical'
239
+
240
+ # Set confidence
241
+ confidence = 'low' if not cpe_match else 'high'
242
+ vuln = {
243
+ ID: id,
244
+ NAME: name,
245
+ PROVIDER: 'cve.circl.lu',
246
+ SEVERITY: severity,
247
+ CVSS_SCORE: cvss,
248
+ TAGS: tags,
249
+ REFERENCES: [f'https://cve.circl.lu/cve/{id}'] + references,
250
+ DESCRIPTION: description,
251
+ CONFIDENCE: confidence
252
+ }
253
+ return vuln
254
+
255
+ @staticmethod
256
+ def lookup_ghsa(ghsa_id):
257
+ """Search for a GHSA on Github and and return associated CVE vulnerability data.
258
+
259
+ Args:
260
+ ghsa (str): CVE ID in the form GHSA-*
261
+
262
+ Returns:
263
+ dict: vulnerability data.
264
+ """
265
+ reference = f'https://github.com/advisories/{ghsa_id}'
266
+ response = requests.get(reference)
267
+ soup = BeautifulSoup(response.text, 'lxml')
268
+ sidebar_items = soup.find_all('div', {'class': 'discussion-sidebar-item'})
269
+ cve_id = sidebar_items[2].find('div').text.strip()
270
+ data = Vuln.lookup_cve(cve_id)
271
+ if data:
272
+ data[TAGS].append('ghsa')
273
+ return data
274
+ return None
275
+
276
+
277
+ class VulnHttp(Vuln):
278
+ input_type = HOST
279
+
280
+
281
+ class VulnCode(Vuln):
282
+ input_type = PATH
283
+
284
+
285
+ class VulnMulti(Vuln):
286
+ input_type = HOST
287
+ output_types = [Vulnerability]
288
+
289
+
290
+ #--------------#
291
+ # Tag category #
292
+ #--------------#
293
+
294
+ class Tagger(Command):
295
+ input_type = URL
296
+ output_types = [Tag]
297
+
298
+ #----------------#
299
+ # osint category #
300
+ #----------------#
301
+
302
+
303
+ class OSInt(Command):
304
+ output_types = [UserAccount]