secator 0.1.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (99) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +421 -0
  4. secator/cli.py +927 -0
  5. secator/config.py +137 -0
  6. secator/configs/__init__.py +0 -0
  7. secator/configs/profiles/__init__.py +0 -0
  8. secator/configs/profiles/aggressive.yaml +7 -0
  9. secator/configs/profiles/default.yaml +9 -0
  10. secator/configs/profiles/stealth.yaml +7 -0
  11. secator/configs/scans/__init__.py +0 -0
  12. secator/configs/scans/domain.yaml +18 -0
  13. secator/configs/scans/host.yaml +14 -0
  14. secator/configs/scans/network.yaml +17 -0
  15. secator/configs/scans/subdomain.yaml +8 -0
  16. secator/configs/scans/url.yaml +12 -0
  17. secator/configs/workflows/__init__.py +0 -0
  18. secator/configs/workflows/cidr_recon.yaml +28 -0
  19. secator/configs/workflows/code_scan.yaml +11 -0
  20. secator/configs/workflows/host_recon.yaml +41 -0
  21. secator/configs/workflows/port_scan.yaml +34 -0
  22. secator/configs/workflows/subdomain_recon.yaml +33 -0
  23. secator/configs/workflows/url_crawl.yaml +29 -0
  24. secator/configs/workflows/url_dirsearch.yaml +29 -0
  25. secator/configs/workflows/url_fuzz.yaml +35 -0
  26. secator/configs/workflows/url_nuclei.yaml +11 -0
  27. secator/configs/workflows/url_vuln.yaml +55 -0
  28. secator/configs/workflows/user_hunt.yaml +10 -0
  29. secator/configs/workflows/wordpress.yaml +14 -0
  30. secator/decorators.py +346 -0
  31. secator/definitions.py +183 -0
  32. secator/exporters/__init__.py +12 -0
  33. secator/exporters/_base.py +3 -0
  34. secator/exporters/csv.py +29 -0
  35. secator/exporters/gdrive.py +118 -0
  36. secator/exporters/json.py +14 -0
  37. secator/exporters/table.py +7 -0
  38. secator/exporters/txt.py +24 -0
  39. secator/hooks/__init__.py +0 -0
  40. secator/hooks/mongodb.py +212 -0
  41. secator/output_types/__init__.py +24 -0
  42. secator/output_types/_base.py +95 -0
  43. secator/output_types/exploit.py +50 -0
  44. secator/output_types/ip.py +33 -0
  45. secator/output_types/port.py +45 -0
  46. secator/output_types/progress.py +35 -0
  47. secator/output_types/record.py +34 -0
  48. secator/output_types/subdomain.py +42 -0
  49. secator/output_types/tag.py +46 -0
  50. secator/output_types/target.py +30 -0
  51. secator/output_types/url.py +76 -0
  52. secator/output_types/user_account.py +41 -0
  53. secator/output_types/vulnerability.py +97 -0
  54. secator/report.py +95 -0
  55. secator/rich.py +123 -0
  56. secator/runners/__init__.py +12 -0
  57. secator/runners/_base.py +873 -0
  58. secator/runners/_helpers.py +154 -0
  59. secator/runners/command.py +674 -0
  60. secator/runners/scan.py +67 -0
  61. secator/runners/task.py +107 -0
  62. secator/runners/workflow.py +137 -0
  63. secator/serializers/__init__.py +8 -0
  64. secator/serializers/dataclass.py +33 -0
  65. secator/serializers/json.py +15 -0
  66. secator/serializers/regex.py +17 -0
  67. secator/tasks/__init__.py +10 -0
  68. secator/tasks/_categories.py +304 -0
  69. secator/tasks/cariddi.py +102 -0
  70. secator/tasks/dalfox.py +66 -0
  71. secator/tasks/dirsearch.py +88 -0
  72. secator/tasks/dnsx.py +56 -0
  73. secator/tasks/dnsxbrute.py +34 -0
  74. secator/tasks/feroxbuster.py +89 -0
  75. secator/tasks/ffuf.py +85 -0
  76. secator/tasks/fping.py +44 -0
  77. secator/tasks/gau.py +43 -0
  78. secator/tasks/gf.py +34 -0
  79. secator/tasks/gospider.py +71 -0
  80. secator/tasks/grype.py +78 -0
  81. secator/tasks/h8mail.py +80 -0
  82. secator/tasks/httpx.py +104 -0
  83. secator/tasks/katana.py +128 -0
  84. secator/tasks/maigret.py +78 -0
  85. secator/tasks/mapcidr.py +32 -0
  86. secator/tasks/msfconsole.py +176 -0
  87. secator/tasks/naabu.py +52 -0
  88. secator/tasks/nmap.py +341 -0
  89. secator/tasks/nuclei.py +97 -0
  90. secator/tasks/searchsploit.py +53 -0
  91. secator/tasks/subfinder.py +40 -0
  92. secator/tasks/wpscan.py +177 -0
  93. secator/utils.py +404 -0
  94. secator/utils_test.py +183 -0
  95. secator-0.1.0.dist-info/METADATA +379 -0
  96. secator-0.1.0.dist-info/RECORD +99 -0
  97. secator-0.1.0.dist-info/WHEEL +5 -0
  98. secator-0.1.0.dist-info/entry_points.txt +2 -0
  99. secator-0.1.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,102 @@
1
+ import json
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
5
+ FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
6
+ HEADER, MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
7
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED,
8
+ OPT_PIPE_INPUT, PROXY, RATE_LIMIT, RETRIES,
9
+ THREADS, TIMEOUT, URL, USER_AGENT)
10
+ from secator.output_types import Tag, Url
11
+ from secator.tasks._categories import HttpCrawler
12
+
13
+
14
+ @task()
15
+ class cariddi(HttpCrawler):
16
+ """Crawl endpoints, secrets, api keys, extensions, tokens..."""
17
+ cmd = 'cariddi -info -s -err -e -ext 1'
18
+ input_type = URL
19
+ input_flag = OPT_PIPE_INPUT
20
+ output_types = [Url, Tag]
21
+ file_flag = OPT_PIPE_INPUT
22
+ json_flag = '-json'
23
+ opt_key_map = {
24
+ HEADER: 'headers',
25
+ DELAY: 'd',
26
+ DEPTH: OPT_NOT_SUPPORTED,
27
+ FILTER_CODES: OPT_NOT_SUPPORTED,
28
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
29
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
30
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
31
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
32
+ MATCH_CODES: OPT_NOT_SUPPORTED,
33
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
34
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
35
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
36
+ METHOD: OPT_NOT_SUPPORTED,
37
+ PROXY: 'proxy',
38
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
39
+ RETRIES: OPT_NOT_SUPPORTED,
40
+ THREADS: 'c',
41
+ TIMEOUT: 't',
42
+ USER_AGENT: 'ua'
43
+ }
44
+ item_loaders = []
45
+ install_cmd = 'go install -v github.com/edoardottt/cariddi/cmd/cariddi@latest'
46
+ encoding = 'ansi'
47
+ proxychains = False
48
+ proxy_socks5 = True # with leaks... https://github.com/edoardottt/cariddi/issues/122
49
+ proxy_http = True # with leaks... https://github.com/edoardottt/cariddi/issues/122
50
+ profile = 'cpu'
51
+
52
+ @staticmethod
53
+ def item_loader(self, line):
54
+ items = []
55
+ try:
56
+ item = json.loads(line)
57
+ url_item = {k: v for k, v in item.items() if k != 'matches'}
58
+ url = url_item[URL]
59
+ items.append(url_item)
60
+ matches = item.get('matches', {})
61
+ params = matches.get('parameters', [])
62
+ errors = matches.get('errors', [])
63
+ secrets = matches.get('secrets', [])
64
+ infos = matches.get('infos', [])
65
+
66
+ for param in params:
67
+ param_name = param['name']
68
+ for attack in param['attacks']:
69
+ extra_data = {'param': param_name, 'source': 'url'}
70
+ item = {
71
+ 'name': attack + ' param',
72
+ 'match': url,
73
+ 'extra_data': extra_data
74
+ }
75
+ items.append(item)
76
+
77
+ for error in errors:
78
+ match = error['match']
79
+ match = (match[:1000] + '...TRUNCATED') if len(match) > 1000 else match # truncate as this can be a very long match
80
+ error['extra_data'] = {'error': match, 'source': 'body'}
81
+ error['match'] = url
82
+ items.append(error)
83
+
84
+ for secret in secrets:
85
+ match = secret['match']
86
+ secret['extra_data'] = {'secret': match, 'source': 'body'}
87
+ secret['match'] = url
88
+ items.append(secret)
89
+
90
+ for info in infos:
91
+ CARIDDI_IGNORE_LIST = ['BTC address'] # TODO: make this a config option
92
+ if info['name'] in CARIDDI_IGNORE_LIST:
93
+ continue
94
+ match = info['match']
95
+ info['extra_data'] = {'info': match, 'source': 'body'}
96
+ info['match'] = url
97
+ items.append(info)
98
+
99
+ except json.decoder.JSONDecodeError:
100
+ pass
101
+
102
+ return items
@@ -0,0 +1,66 @@
1
+ from urllib.parse import urlparse
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (CONFIDENCE, DELAY, EXTRA_DATA, FOLLOW_REDIRECT,
5
+ HEADER, ID, MATCHED_AT, METHOD, NAME,
6
+ OPT_NOT_SUPPORTED, PROVIDER, PROXY, RATE_LIMIT,
7
+ SEVERITY, TAGS, THREADS, TIMEOUT, URL,
8
+ USER_AGENT)
9
+ from secator.output_types import Vulnerability
10
+ from secator.tasks._categories import VulnHttp
11
+
12
+ DALFOX_TYPE_MAP = {
13
+ 'G': 'Grep XSS',
14
+ 'R': 'Reflected XSS',
15
+ 'V': 'Verified XSS'
16
+ }
17
+
18
+
19
+ @task()
20
+ class dalfox(VulnHttp):
21
+ """Powerful open source XSS scanning tool."""
22
+ cmd = 'dalfox'
23
+ input_type = URL
24
+ input_flag = 'url'
25
+ file_flag = 'file'
26
+ json_flag = '--format json'
27
+ version_flag = 'version'
28
+ opt_prefix = '--'
29
+ opt_key_map = {
30
+ HEADER: 'header',
31
+ DELAY: 'delay',
32
+ FOLLOW_REDIRECT: 'follow-redirects',
33
+ METHOD: 'method',
34
+ PROXY: 'proxy',
35
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
36
+ THREADS: 'worker',
37
+ TIMEOUT: 'timeout',
38
+ USER_AGENT: 'user-agent'
39
+ }
40
+ output_map = {
41
+ Vulnerability: {
42
+ ID: lambda x: None,
43
+ NAME: lambda x: DALFOX_TYPE_MAP[x['type']],
44
+ PROVIDER: 'dalfox',
45
+ TAGS: lambda x: [x['cwe']] if x['cwe'] else [],
46
+ CONFIDENCE: lambda x: 'high',
47
+ MATCHED_AT: lambda x: urlparse(x['data'])._replace(query='').geturl(),
48
+ EXTRA_DATA: lambda x: {
49
+ k: v for k, v in x.items()
50
+ if k not in ['type', 'severity', 'cwe']
51
+ },
52
+ SEVERITY: lambda x: x['severity'].lower()
53
+ }
54
+ }
55
+ install_cmd = 'go install -v github.com/hahwul/dalfox/v2@latest'
56
+ encoding = 'ansi'
57
+ proxychains = False
58
+ proxychains_flavor = 'proxychains4'
59
+ proxy_socks5 = True
60
+ proxy_http = True
61
+ profile = 'cpu'
62
+
63
+ @staticmethod
64
+ def on_line(self, line):
65
+ line = line.rstrip(',')
66
+ return line
@@ -0,0 +1,88 @@
1
+ import os
2
+
3
+ import yaml
4
+
5
+ from secator.decorators import task
6
+ from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, DELAY, DEPTH,
7
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
8
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
9
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
10
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, OUTPUT_PATH, PROXY,
11
+ RATE_LIMIT, RETRIES, STATUS_CODE,
12
+ THREADS, TIMEOUT, USER_AGENT, WORDLIST)
13
+ from secator.output_types import Url
14
+ from secator.tasks._categories import HttpFuzzer
15
+
16
+
17
+ @task()
18
+ class dirsearch(HttpFuzzer):
19
+ """Advanced web path brute-forcer."""
20
+ cmd = 'dirsearch'
21
+ input_flag = '-u'
22
+ file_flag = '-l'
23
+ json_flag = '--format json'
24
+ opt_prefix = '--'
25
+ encoding = 'ansi'
26
+ opt_key_map = {
27
+ HEADER: 'header',
28
+ DELAY: 'delay',
29
+ DEPTH: 'max-recursion-depth',
30
+ FILTER_CODES: 'exclude-status',
31
+ FILTER_REGEX: 'exclude-regex',
32
+ FILTER_SIZE: 'exclude-sizes',
33
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
34
+ FOLLOW_REDIRECT: 'follow-redirects',
35
+ MATCH_CODES: 'include-status',
36
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
37
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
38
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
39
+ METHOD: 'http-method',
40
+ PROXY: 'proxy',
41
+ RATE_LIMIT: 'max-rate',
42
+ RETRIES: 'retries',
43
+ THREADS: 'threads',
44
+ TIMEOUT: 'timeout',
45
+ USER_AGENT: 'user-agent',
46
+ WORDLIST: 'wordlists',
47
+ }
48
+ output_map = {
49
+ Url: {
50
+ CONTENT_LENGTH: 'content-length',
51
+ CONTENT_TYPE: 'content-type',
52
+ STATUS_CODE: 'status'
53
+ }
54
+ }
55
+ install_cmd = 'pipx install dirsearch'
56
+ proxychains = True
57
+ proxy_socks5 = True
58
+ proxy_http = True
59
+ profile = 'io'
60
+
61
+ def yielder(self):
62
+ prev = self.print_item_count
63
+ self.print_item_count = False
64
+ list(super().yielder())
65
+ if self.return_code != 0:
66
+ return
67
+ self.results = []
68
+ if not self.output_json:
69
+ return
70
+ note = f'dirsearch JSON results saved to {self.output_path}'
71
+ if self.print_line:
72
+ self._print(note)
73
+ if os.path.exists(self.output_path):
74
+ with open(self.output_path, 'r') as f:
75
+ results = yaml.safe_load(f.read()).get('results', [])
76
+ for item in results:
77
+ item = self._process_item(item)
78
+ if not item:
79
+ continue
80
+ yield item
81
+ self.print_item_count = prev
82
+
83
+ @staticmethod
84
+ def on_init(self):
85
+ self.output_path = self.get_opt_value(OUTPUT_PATH)
86
+ if not self.output_path:
87
+ self.output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
88
+ self.cmd += f' -o {self.output_path}'
secator/tasks/dnsx.py ADDED
@@ -0,0 +1,56 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (OPT_PIPE_INPUT, RATE_LIMIT, RETRIES, THREADS)
3
+ from secator.output_types import Record
4
+ from secator.tasks._categories import ReconDns
5
+ import json
6
+
7
+
8
+ @task()
9
+ class dnsx(ReconDns):
10
+ """dnsx is a fast and multi-purpose DNS toolkit designed for running various retryabledns library."""
11
+ cmd = 'dnsx -resp -a -aaaa -cname -mx -ns -txt -srv -ptr -soa -axfr -caa'
12
+ json_flag = '-json'
13
+ input_flag = OPT_PIPE_INPUT
14
+ file_flag = OPT_PIPE_INPUT
15
+ output_types = [Record]
16
+ opt_key_map = {
17
+ RATE_LIMIT: 'rate-limit',
18
+ RETRIES: 'retry',
19
+ THREADS: 'threads',
20
+ }
21
+ opts = {
22
+ 'trace': {'is_flag': True, 'default': False, 'help': 'Perform dns tracing'},
23
+ 'resolver': {'type': str, 'short': 'r', 'help': 'List of resolvers to use (file or comma separated)'},
24
+ 'wildcard_domain': {'type': str, 'short': 'wd', 'help': 'Domain name for wildcard filtering'},
25
+ }
26
+
27
+ install_cmd = 'go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@latest'
28
+ profile = 'io'
29
+
30
+ @staticmethod
31
+ def item_loader(self, line):
32
+ items = []
33
+ try:
34
+ item = json.loads(line)
35
+ if self.orig: # original dnsx output
36
+ return item
37
+ host = item['host']
38
+ record_types = ['a', 'aaaa', 'cname', 'mx', 'ns', 'txt', 'srv', 'ptr', 'soa', 'axfr', 'caa']
39
+ for _type in record_types:
40
+ values = item.get(_type, [])
41
+ for value in values:
42
+ name = value
43
+ extra_data = {}
44
+ if isinstance(value, dict):
45
+ name = value['name']
46
+ extra_data = {k: v for k, v in value.items() if k != 'name'}
47
+ items.append({
48
+ 'host': host,
49
+ 'name': name,
50
+ 'type': _type.upper(),
51
+ 'extra_data': extra_data
52
+ })
53
+ except json.decoder.JSONDecodeError:
54
+ pass
55
+
56
+ return items
@@ -0,0 +1,34 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (DEFAULT_DNS_WORDLIST, DOMAIN, HOST, RATE_LIMIT, RETRIES, THREADS, WORDLIST, EXTRA_DATA)
3
+ from secator.output_types import Subdomain
4
+ from secator.tasks._categories import ReconDns
5
+
6
+
7
+ @task()
8
+ class dnsxbrute(ReconDns):
9
+ """dnsx is a fast and multi-purpose DNS toolkit designed for running various library."""
10
+ cmd = 'dnsx'
11
+ json_flag = '-json'
12
+ input_flag = '-domain'
13
+ file_flag = '-domain'
14
+ opt_key_map = {
15
+ RATE_LIMIT: 'rate-limit',
16
+ RETRIES: 'retry',
17
+ THREADS: 'threads',
18
+ }
19
+ opts = {
20
+ WORDLIST: {'type': str, 'short': 'w', 'default': DEFAULT_DNS_WORDLIST, 'help': 'Wordlist'},
21
+ 'trace': {'is_flag': True, 'default': False, 'help': 'Perform dns tracing'},
22
+ }
23
+ output_map = {
24
+ Subdomain: {
25
+ HOST: 'host',
26
+ DOMAIN: lambda x: ".".join(x['host'].split('.')[1:]),
27
+ EXTRA_DATA: lambda x: {
28
+ 'resolver': x['resolver'],
29
+ 'status_code': x['status_code']
30
+ }
31
+ }
32
+ }
33
+ install_cmd = 'go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@latest'
34
+ profile = 'cpu'
@@ -0,0 +1,89 @@
1
+ import shlex
2
+ from pathlib import Path
3
+
4
+ from secator.decorators import task
5
+ from secator.definitions import (CONTENT_TYPE, DELAY, DEPTH, FILTER_CODES,
6
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS,
7
+ FOLLOW_REDIRECT, HEADER, LINES, MATCH_CODES,
8
+ MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD,
9
+ OPT_NOT_SUPPORTED, OPT_PIPE_INPUT, OUTPUT_PATH, PROXY,
10
+ RATE_LIMIT, RETRIES, STATUS_CODE,
11
+ THREADS, TIMEOUT, USER_AGENT, WORDLIST, WORDS, DEFAULT_FEROXBUSTER_FLAGS)
12
+ from secator.output_types import Url
13
+ from secator.tasks._categories import HttpFuzzer
14
+
15
+
16
+ @task()
17
+ class feroxbuster(HttpFuzzer):
18
+ """Simple, fast, recursive content discovery tool written in Rust"""
19
+ cmd = f'feroxbuster {DEFAULT_FEROXBUSTER_FLAGS}'
20
+ input_flag = '--url'
21
+ input_chunk_size = 1
22
+ file_flag = OPT_PIPE_INPUT
23
+ json_flag = '--json'
24
+ opt_prefix = '--'
25
+ opts = {
26
+ # 'auto_tune': {'is_flag': True, 'default': False, 'help': 'Automatically lower scan rate when too many errors'},
27
+ 'extract_links': {'is_flag': True, 'default': False, 'help': 'Extract links from response body'},
28
+ 'collect_backups': {'is_flag': True, 'default': False, 'help': 'Request likely backup exts for urls'},
29
+ 'collect_extensions': {'is_flag': True, 'default': False, 'help': 'Discover exts and add to --extensions'},
30
+ 'collect_words': {'is_flag': True, 'default': False, 'help': 'Discover important words and add to wordlist'},
31
+ }
32
+ opt_key_map = {
33
+ HEADER: 'headers',
34
+ DELAY: OPT_NOT_SUPPORTED,
35
+ DEPTH: 'depth',
36
+ FILTER_CODES: 'filter-status',
37
+ FILTER_REGEX: 'filter-regex',
38
+ FILTER_SIZE: 'filter-size',
39
+ FILTER_WORDS: 'filter-words',
40
+ FOLLOW_REDIRECT: 'redirects',
41
+ MATCH_CODES: 'status-codes',
42
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
43
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
44
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
45
+ METHOD: 'methods',
46
+ PROXY: 'proxy',
47
+ RATE_LIMIT: 'rate-limit',
48
+ RETRIES: OPT_NOT_SUPPORTED,
49
+ THREADS: 'threads',
50
+ TIMEOUT: 'timeout',
51
+ USER_AGENT: 'user-agent',
52
+ WORDLIST: 'wordlist'
53
+ }
54
+ output_map = {
55
+ Url: {
56
+ STATUS_CODE: 'status',
57
+ CONTENT_TYPE: lambda x: x['headers'].get('content-type'),
58
+ LINES: 'line_count',
59
+ WORDS: 'word_count'
60
+ }
61
+ }
62
+ install_cmd = (
63
+ 'sudo apt install -y unzip curl && '
64
+ 'curl -sL https://raw.githubusercontent.com/epi052/feroxbuster/master/install-nix.sh | '
65
+ 'bash && sudo mv feroxbuster /usr/local/bin'
66
+ )
67
+ proxychains = False
68
+ proxy_socks5 = True
69
+ proxy_http = True
70
+ profile = 'cpu'
71
+
72
+ @staticmethod
73
+ def on_init(self):
74
+ self.output_path = self.get_opt_value(OUTPUT_PATH)
75
+ if not self.output_path:
76
+ self.output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
77
+ Path(self.output_path).touch()
78
+ self.cmd += f' --output {self.output_path}'
79
+
80
+ @staticmethod
81
+ def on_start(self):
82
+ if self.input_path:
83
+ self.cmd += ' --stdin'
84
+ self.cmd += f' & tail --pid=$! -f {shlex.quote(self.output_path)}'
85
+ self.shell = True
86
+
87
+ @staticmethod
88
+ def validate_item(self, item):
89
+ return item['type'] == 'response'
secator/tasks/ffuf.py ADDED
@@ -0,0 +1,85 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (AUTO_CALIBRATION, CONTENT_LENGTH,
3
+ CONTENT_TYPE, DELAY, DEPTH, EXTRA_DATA,
4
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
5
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
6
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
7
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED,
8
+ PERCENT, PROXY, RATE_LIMIT, RETRIES,
9
+ STATUS_CODE, THREADS, TIME, TIMEOUT,
10
+ USER_AGENT, WORDLIST)
11
+ from secator.output_types import Progress, Url
12
+ from secator.serializers import JSONSerializer, RegexSerializer
13
+ from secator.tasks._categories import HttpFuzzer
14
+
15
+ FFUF_PROGRESS_REGEX = r':: Progress: \[(?P<count>\d+)/(?P<total>\d+)\] :: Job \[\d/\d\] :: (?P<rps>\d+) req/sec :: Duration: \[(?P<duration>[\d:]+)\] :: Errors: (?P<errors>\d+) ::' # noqa: E501
16
+
17
+
18
+ @task()
19
+ class ffuf(HttpFuzzer):
20
+ """Fast web fuzzer written in Go."""
21
+ cmd = 'ffuf -noninteractive -recursion'
22
+ input_flag = '-u'
23
+ input_chunk_size = 1
24
+ file_flag = None
25
+ json_flag = '-json'
26
+ version_flag = '-V'
27
+ item_loaders = [
28
+ JSONSerializer(),
29
+ RegexSerializer(FFUF_PROGRESS_REGEX, fields=['count', 'total', 'rps', 'duration', 'errors'])
30
+ ]
31
+ opts = {
32
+ AUTO_CALIBRATION: {'is_flag': True, 'short': 'ac', 'help': 'Auto-calibration'},
33
+ }
34
+ opt_key_map = {
35
+ HEADER: 'H',
36
+ DELAY: 'p',
37
+ DEPTH: 'recursion-depth',
38
+ FILTER_CODES: 'fc',
39
+ FILTER_REGEX: 'fr',
40
+ FILTER_SIZE: 'fs',
41
+ FILTER_WORDS: 'fw',
42
+ FOLLOW_REDIRECT: 'r',
43
+ MATCH_CODES: 'mc',
44
+ MATCH_REGEX: 'mr',
45
+ MATCH_SIZE: 'ms',
46
+ MATCH_WORDS: 'mw',
47
+ METHOD: 'X',
48
+ PROXY: 'x',
49
+ RATE_LIMIT: 'rate',
50
+ RETRIES: OPT_NOT_SUPPORTED,
51
+ THREADS: 't',
52
+ TIMEOUT: 'timeout',
53
+ USER_AGENT: OPT_NOT_SUPPORTED,
54
+
55
+ # ffuf opts
56
+ WORDLIST: 'w',
57
+ AUTO_CALIBRATION: 'ac',
58
+ }
59
+ output_types = [Url, Progress]
60
+ output_map = {
61
+ Url: {
62
+ STATUS_CODE: 'status',
63
+ CONTENT_LENGTH: 'length',
64
+ CONTENT_TYPE: 'content-type',
65
+ TIME: lambda x: x['duration'] * 10**-9
66
+ },
67
+ Progress: {
68
+ PERCENT: lambda x: int(int(x['count']) * 100 / int(x['total'])),
69
+ EXTRA_DATA: lambda x: {k: v for k, v in x.items() if k not in ['count', 'total', 'errors']}
70
+ },
71
+ }
72
+ encoding = 'ansi'
73
+ install_cmd = (
74
+ 'go install -v github.com/ffuf/ffuf@latest && '
75
+ 'sudo git clone https://github.com/danielmiessler/SecLists /usr/share/seclists || true'
76
+ )
77
+ proxychains = False
78
+ proxy_socks5 = True
79
+ proxy_http = True
80
+ profile = 'io'
81
+
82
+ @staticmethod
83
+ def on_item(self, item):
84
+ item.method = self.get_opt_value(METHOD) or 'GET'
85
+ return item
secator/tasks/fping.py ADDED
@@ -0,0 +1,44 @@
1
+ import validators
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (DELAY, IP, OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT,
5
+ RETRIES, THREADS, TIMEOUT)
6
+ from secator.output_types import Ip
7
+ from secator.tasks._categories import ReconIp
8
+
9
+
10
+ @task()
11
+ class fping(ReconIp):
12
+ """Send ICMP echo probes to network hosts, similar to ping, but much better."""
13
+ cmd = 'fping -a'
14
+ file_flag = '-f'
15
+ input_flag = None
16
+ ignore_return_code = True
17
+ opt_prefix = '--'
18
+ opt_key_map = {
19
+ DELAY: 'period',
20
+ PROXY: OPT_NOT_SUPPORTED,
21
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
22
+ RETRIES: 'retry',
23
+ TIMEOUT: 'timeout',
24
+ THREADS: OPT_NOT_SUPPORTED
25
+ }
26
+ opt_value_map = {
27
+ DELAY: lambda x: x * 1000, # convert s to ms
28
+ TIMEOUT: lambda x: x * 1000 # convert s to ms
29
+ }
30
+ input_type = IP
31
+ output_types = [Ip]
32
+ install_cmd = 'sudo apt install -y fping'
33
+
34
+ @staticmethod
35
+ def item_loader(self, line):
36
+ if validators.ipv4(line) or validators.ipv6(line):
37
+ return {'ip': line, 'alive': True}
38
+ return None
39
+
40
+ @staticmethod
41
+ def on_line(self, line):
42
+ if 'Unreachable' in line:
43
+ return '' # discard line as it pollutes output
44
+ return line
secator/tasks/gau.py ADDED
@@ -0,0 +1,43 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
3
+ FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
4
+ HEADER, MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
5
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED,
6
+ OPT_PIPE_INPUT, PROXY, RATE_LIMIT, RETRIES,
7
+ THREADS, TIMEOUT, USER_AGENT)
8
+ from secator.tasks._categories import HttpCrawler
9
+
10
+
11
+ @task()
12
+ class gau(HttpCrawler):
13
+ """Fetch known URLs from AlienVault's Open Threat Exchange, the Wayback Machine, Common Crawl, and URLScan."""
14
+ cmd = 'gau'
15
+ file_flag = OPT_PIPE_INPUT
16
+ json_flag = '--json'
17
+ opt_prefix = '--'
18
+ opt_key_map = {
19
+ HEADER: OPT_NOT_SUPPORTED,
20
+ DELAY: OPT_NOT_SUPPORTED,
21
+ DEPTH: OPT_NOT_SUPPORTED,
22
+ FILTER_CODES: 'fc',
23
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
24
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
25
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
26
+ MATCH_CODES: 'mc',
27
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
28
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
29
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
30
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
31
+ METHOD: OPT_NOT_SUPPORTED,
32
+ PROXY: 'proxy',
33
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
34
+ RETRIES: 'retries',
35
+ THREADS: 'threads',
36
+ TIMEOUT: 'timeout',
37
+ USER_AGENT: OPT_NOT_SUPPORTED,
38
+ }
39
+ install_cmd = 'go install -v github.com/lc/gau/v2/cmd/gau@latest'
40
+ proxychains = False
41
+ proxy_socks5 = True
42
+ proxy_http = True
43
+ profile = 'io'
secator/tasks/gf.py ADDED
@@ -0,0 +1,34 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import OPT_PIPE_INPUT, OPT_NOT_SUPPORTED, URL
3
+ from secator.output_types import Tag
4
+ from secator.tasks._categories import Tagger
5
+
6
+
7
+ @task()
8
+ class gf(Tagger):
9
+ """Wrapper around grep, to help you grep for things."""
10
+ cmd = 'gf'
11
+ file_flag = OPT_PIPE_INPUT
12
+ input_flag = OPT_PIPE_INPUT
13
+ version_flag = OPT_NOT_SUPPORTED
14
+ opts = {
15
+ 'pattern': {'type': str, 'help': 'Pattern names to match against (comma-delimited)'}
16
+ }
17
+ opt_key_map = {
18
+ 'pattern': ''
19
+ }
20
+ input_type = URL
21
+ install_cmd = (
22
+ 'go install -v github.com/tomnomnom/gf@latest && '
23
+ 'git clone https://github.com/1ndianl33t/Gf-Patterns $HOME/.gf || true'
24
+ )
25
+ output_types = [Tag]
26
+
27
+ @staticmethod
28
+ def item_loader(self, line):
29
+ return {'match': line, 'name': self.get_opt_value('pattern').rstrip() + ' pattern'} # noqa: E731,E501
30
+
31
+ @staticmethod
32
+ def on_item(self, item):
33
+ item.extra_data = {'source': 'url'}
34
+ return item