secator 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +453 -0
  4. secator/celery_signals.py +138 -0
  5. secator/celery_utils.py +320 -0
  6. secator/cli.py +2035 -0
  7. secator/cli_helper.py +395 -0
  8. secator/click.py +87 -0
  9. secator/config.py +670 -0
  10. secator/configs/__init__.py +0 -0
  11. secator/configs/profiles/__init__.py +0 -0
  12. secator/configs/profiles/aggressive.yaml +8 -0
  13. secator/configs/profiles/all_ports.yaml +7 -0
  14. secator/configs/profiles/full.yaml +31 -0
  15. secator/configs/profiles/http_headless.yaml +7 -0
  16. secator/configs/profiles/http_record.yaml +8 -0
  17. secator/configs/profiles/insane.yaml +8 -0
  18. secator/configs/profiles/paranoid.yaml +8 -0
  19. secator/configs/profiles/passive.yaml +11 -0
  20. secator/configs/profiles/polite.yaml +8 -0
  21. secator/configs/profiles/sneaky.yaml +8 -0
  22. secator/configs/profiles/tor.yaml +5 -0
  23. secator/configs/scans/__init__.py +0 -0
  24. secator/configs/scans/domain.yaml +31 -0
  25. secator/configs/scans/host.yaml +23 -0
  26. secator/configs/scans/network.yaml +30 -0
  27. secator/configs/scans/subdomain.yaml +27 -0
  28. secator/configs/scans/url.yaml +19 -0
  29. secator/configs/workflows/__init__.py +0 -0
  30. secator/configs/workflows/cidr_recon.yaml +48 -0
  31. secator/configs/workflows/code_scan.yaml +29 -0
  32. secator/configs/workflows/domain_recon.yaml +46 -0
  33. secator/configs/workflows/host_recon.yaml +95 -0
  34. secator/configs/workflows/subdomain_recon.yaml +120 -0
  35. secator/configs/workflows/url_bypass.yaml +15 -0
  36. secator/configs/workflows/url_crawl.yaml +98 -0
  37. secator/configs/workflows/url_dirsearch.yaml +62 -0
  38. secator/configs/workflows/url_fuzz.yaml +68 -0
  39. secator/configs/workflows/url_params_fuzz.yaml +66 -0
  40. secator/configs/workflows/url_secrets_hunt.yaml +23 -0
  41. secator/configs/workflows/url_vuln.yaml +91 -0
  42. secator/configs/workflows/user_hunt.yaml +29 -0
  43. secator/configs/workflows/wordpress.yaml +38 -0
  44. secator/cve.py +718 -0
  45. secator/decorators.py +7 -0
  46. secator/definitions.py +168 -0
  47. secator/exporters/__init__.py +14 -0
  48. secator/exporters/_base.py +3 -0
  49. secator/exporters/console.py +10 -0
  50. secator/exporters/csv.py +37 -0
  51. secator/exporters/gdrive.py +123 -0
  52. secator/exporters/json.py +16 -0
  53. secator/exporters/table.py +36 -0
  54. secator/exporters/txt.py +28 -0
  55. secator/hooks/__init__.py +0 -0
  56. secator/hooks/gcs.py +80 -0
  57. secator/hooks/mongodb.py +281 -0
  58. secator/installer.py +694 -0
  59. secator/loader.py +128 -0
  60. secator/output_types/__init__.py +49 -0
  61. secator/output_types/_base.py +108 -0
  62. secator/output_types/certificate.py +78 -0
  63. secator/output_types/domain.py +50 -0
  64. secator/output_types/error.py +42 -0
  65. secator/output_types/exploit.py +58 -0
  66. secator/output_types/info.py +24 -0
  67. secator/output_types/ip.py +47 -0
  68. secator/output_types/port.py +55 -0
  69. secator/output_types/progress.py +36 -0
  70. secator/output_types/record.py +36 -0
  71. secator/output_types/stat.py +41 -0
  72. secator/output_types/state.py +29 -0
  73. secator/output_types/subdomain.py +45 -0
  74. secator/output_types/tag.py +69 -0
  75. secator/output_types/target.py +38 -0
  76. secator/output_types/url.py +112 -0
  77. secator/output_types/user_account.py +41 -0
  78. secator/output_types/vulnerability.py +101 -0
  79. secator/output_types/warning.py +30 -0
  80. secator/report.py +140 -0
  81. secator/rich.py +130 -0
  82. secator/runners/__init__.py +14 -0
  83. secator/runners/_base.py +1240 -0
  84. secator/runners/_helpers.py +218 -0
  85. secator/runners/celery.py +18 -0
  86. secator/runners/command.py +1178 -0
  87. secator/runners/python.py +126 -0
  88. secator/runners/scan.py +87 -0
  89. secator/runners/task.py +81 -0
  90. secator/runners/workflow.py +168 -0
  91. secator/scans/__init__.py +29 -0
  92. secator/serializers/__init__.py +8 -0
  93. secator/serializers/dataclass.py +39 -0
  94. secator/serializers/json.py +45 -0
  95. secator/serializers/regex.py +25 -0
  96. secator/tasks/__init__.py +8 -0
  97. secator/tasks/_categories.py +487 -0
  98. secator/tasks/arjun.py +113 -0
  99. secator/tasks/arp.py +53 -0
  100. secator/tasks/arpscan.py +70 -0
  101. secator/tasks/bbot.py +372 -0
  102. secator/tasks/bup.py +118 -0
  103. secator/tasks/cariddi.py +193 -0
  104. secator/tasks/dalfox.py +87 -0
  105. secator/tasks/dirsearch.py +84 -0
  106. secator/tasks/dnsx.py +186 -0
  107. secator/tasks/feroxbuster.py +93 -0
  108. secator/tasks/ffuf.py +135 -0
  109. secator/tasks/fping.py +85 -0
  110. secator/tasks/gau.py +102 -0
  111. secator/tasks/getasn.py +60 -0
  112. secator/tasks/gf.py +36 -0
  113. secator/tasks/gitleaks.py +96 -0
  114. secator/tasks/gospider.py +84 -0
  115. secator/tasks/grype.py +109 -0
  116. secator/tasks/h8mail.py +75 -0
  117. secator/tasks/httpx.py +167 -0
  118. secator/tasks/jswhois.py +36 -0
  119. secator/tasks/katana.py +203 -0
  120. secator/tasks/maigret.py +87 -0
  121. secator/tasks/mapcidr.py +42 -0
  122. secator/tasks/msfconsole.py +179 -0
  123. secator/tasks/naabu.py +85 -0
  124. secator/tasks/nmap.py +487 -0
  125. secator/tasks/nuclei.py +151 -0
  126. secator/tasks/search_vulns.py +225 -0
  127. secator/tasks/searchsploit.py +109 -0
  128. secator/tasks/sshaudit.py +299 -0
  129. secator/tasks/subfinder.py +48 -0
  130. secator/tasks/testssl.py +283 -0
  131. secator/tasks/trivy.py +130 -0
  132. secator/tasks/trufflehog.py +240 -0
  133. secator/tasks/urlfinder.py +100 -0
  134. secator/tasks/wafw00f.py +106 -0
  135. secator/tasks/whois.py +34 -0
  136. secator/tasks/wpprobe.py +116 -0
  137. secator/tasks/wpscan.py +202 -0
  138. secator/tasks/x8.py +94 -0
  139. secator/tasks/xurlfind3r.py +83 -0
  140. secator/template.py +294 -0
  141. secator/thread.py +24 -0
  142. secator/tree.py +196 -0
  143. secator/utils.py +922 -0
  144. secator/utils_test.py +297 -0
  145. secator/workflows/__init__.py +29 -0
  146. secator-0.22.0.dist-info/METADATA +447 -0
  147. secator-0.22.0.dist-info/RECORD +150 -0
  148. secator-0.22.0.dist-info/WHEEL +4 -0
  149. secator-0.22.0.dist-info/entry_points.txt +2 -0
  150. secator-0.22.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,193 @@
1
+ import re
2
+
3
+ from urllib.parse import urlparse, urlunparse
4
+
5
+ from secator.decorators import task
6
+ from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
7
+ FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
8
+ HEADER, MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
9
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED,
10
+ OPT_PIPE_INPUT, PROXY, RATE_LIMIT, RETRIES,
11
+ THREADS, TIMEOUT, URL, USER_AGENT)
12
+ from secator.output_types import Tag, Url
13
+ from secator.serializers import JSONSerializer
14
+ from secator.tasks._categories import HttpCrawler
15
+
16
+ CARIDDI_IGNORE_PATTERNS = re.compile(r"|".join([
17
+ r"<!--\s*Instance.*\s*-->",
18
+ r"<!--\s*(Styles|Scripts|Fonts|Images|Links|Forms|Inputs|Buttons|List|Next|Prev|Navigation dots)\s*-->",
19
+ r"<!--\s*end.*-->",
20
+ r"<!--\s*start.*-->",
21
+ r"<!--\s*begin.*-->",
22
+ r"<!--\s*here goes.*-->",
23
+ r"<!--\s*.*Yoast SEO.*\s*-->",
24
+ r"<!--\s*.*Google Analytics.*\s*-->",
25
+ ]), re.IGNORECASE)
26
+
27
+ CARIDDI_IGNORE_LIST = ['BTC address']
28
+ CARIDDI_RENAME_LIST = {
29
+ 'IPv4 address': 'IpV4 address',
30
+ 'MySQL error': 'Mysql error',
31
+ 'MariaDB error': 'Mariadb error',
32
+ 'PostgreSQL error': 'Postgresql error',
33
+ 'SQLite error': 'Sqlite error',
34
+ }
35
+
36
+
37
+ @task()
38
+ class cariddi(HttpCrawler):
39
+ """Crawl endpoints, secrets, api keys, extensions, tokens..."""
40
+ cmd = 'cariddi'
41
+ input_types = [URL]
42
+ output_types = [Url, Tag]
43
+ tags = ['url', 'crawl']
44
+ input_flag = OPT_PIPE_INPUT
45
+ file_flag = OPT_PIPE_INPUT
46
+ json_flag = '-json'
47
+ opts = {
48
+ 'info': {'is_flag': True, 'short': 'info', 'help': 'Hunt for useful informations in websites.'},
49
+ 'secrets': {'is_flag': True, 'short': 'secrets', 'help': 'Hunt for secrets.'},
50
+ 'errors': {'is_flag': True, 'short': 'err', 'help': 'Hunt for errors in websites.'},
51
+ 'juicy_extensions': {'type': int, 'short': 'jext', 'help': 'Hunt for juicy file extensions. Integer from 1(juicy) to 7(not juicy)'}, # noqa: E501
52
+ 'juicy_endpoints': {'is_flag': True, 'short': 'jep', 'help': 'Hunt for juicy endpoints.'}
53
+ }
54
+ opt_value_map = {
55
+ HEADER: lambda headers: headers
56
+ }
57
+ opt_key_map = {
58
+ HEADER: 'headers',
59
+ DELAY: 'd',
60
+ DEPTH: OPT_NOT_SUPPORTED,
61
+ FILTER_CODES: OPT_NOT_SUPPORTED,
62
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
63
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
64
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
65
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
66
+ MATCH_CODES: OPT_NOT_SUPPORTED,
67
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
68
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
69
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
70
+ METHOD: OPT_NOT_SUPPORTED,
71
+ PROXY: 'proxy',
72
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
73
+ RETRIES: OPT_NOT_SUPPORTED,
74
+ THREADS: 'c',
75
+ TIMEOUT: 't',
76
+ USER_AGENT: 'ua',
77
+ 'secrets': 's',
78
+ 'errors': 'err',
79
+ 'juicy_endpoints': 'e',
80
+ 'juicy_extensions': 'ext'
81
+ }
82
+ item_loaders = [JSONSerializer()]
83
+ install_version = 'v1.3.6'
84
+ install_cmd = 'go install -v github.com/edoardottt/cariddi/cmd/cariddi@[install_version]'
85
+ github_handle = 'edoardottt/cariddi'
86
+ encoding = 'ansi'
87
+ proxychains = False
88
+ proxy_socks5 = True # with leaks... https://github.com/edoardottt/cariddi/issues/122
89
+ proxy_http = True # with leaks... https://github.com/edoardottt/cariddi/issues/122
90
+ profile = lambda opts: cariddi.dynamic_profile(opts) # noqa: E731
91
+
92
+ @staticmethod
93
+ def dynamic_profile(opts):
94
+ juicy_endpoints = cariddi._get_opt_value(
95
+ opts,
96
+ 'juicy_endpoints',
97
+ opts_conf=dict(cariddi.opts, **cariddi.meta_opts),
98
+ opt_aliases=opts.get('aliases', [])
99
+ )
100
+ juicy_extensions = cariddi._get_opt_value(
101
+ opts,
102
+ 'juicy_extensions',
103
+ opts_conf=dict(cariddi.opts, **cariddi.meta_opts),
104
+ opt_aliases=opts.get('aliases', [])
105
+ )
106
+ info = cariddi._get_opt_value(
107
+ opts,
108
+ 'info',
109
+ opts_conf=dict(cariddi.opts, **cariddi.meta_opts),
110
+ opt_aliases=opts.get('aliases', [])
111
+ )
112
+ secrets = cariddi._get_opt_value(
113
+ opts,
114
+ 'secrets',
115
+ opts_conf=dict(cariddi.opts, **cariddi.meta_opts),
116
+ opt_aliases=opts.get('aliases', [])
117
+ )
118
+ errors = cariddi._get_opt_value(
119
+ opts,
120
+ 'errors',
121
+ opts_conf=dict(cariddi.opts, **cariddi.meta_opts),
122
+ opt_aliases=opts.get('aliases', [])
123
+ )
124
+ hunt = juicy_endpoints or (juicy_extensions is not None) or info or secrets or errors
125
+ return 'cpu' if hunt is True else 'io'
126
+
127
+ @staticmethod
128
+ def on_json_loaded(self, item):
129
+ url_item = {k: v for k, v in item.items() if k != 'matches'}
130
+ url_item['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
131
+ yield Url(**url_item)
132
+
133
+ # Get matches, params, errors, secrets, infos
134
+ url = url_item[URL]
135
+ matches = item.get('matches', {})
136
+ params = matches.get('parameters', [])
137
+ errors = matches.get('errors', [])
138
+ secrets = matches.get('secrets', [])
139
+ infos = matches.get('infos', [])
140
+
141
+ for param in params:
142
+ param_name = param['name']
143
+ for attack in param['attacks']:
144
+ extra_data = {k: v for k, v in param.items() if k not in ['name', 'attacks']}
145
+ extra_data['content'] = attack
146
+ parsed_url = urlparse(url)
147
+ params = parsed_url.query.split('&')
148
+ url_without_param = urlunparse(parsed_url._replace(query=''))
149
+ for p in params:
150
+ p_name, p_value = p.split('=')
151
+ if p_name == param_name:
152
+ p_value = p_value
153
+ break
154
+ yield Tag(
155
+ category='info',
156
+ name='url_param',
157
+ value=p_name,
158
+ match=url_without_param,
159
+ extra_data={'value': p_value, 'url': url}
160
+ )
161
+
162
+ for error in errors:
163
+ error['category'] = 'error'
164
+ error['name'] = '_'.join(f'{error["name"]}'.lower().split())
165
+ error['value'] = error['match']
166
+ error['extra_data'] = {'url': url}
167
+ error['match'] = url_without_param
168
+ yield Tag(**error)
169
+
170
+ for secret in secrets:
171
+ secret['category'] = 'secret'
172
+ secret['name'] = '_'.join(f'{secret["name"]}'.lower().split())
173
+ secret['value'] = secret['match']
174
+ secret['extra_data'] = {'url': url}
175
+ secret['match'] = url_without_param
176
+ yield Tag(**secret)
177
+
178
+ for info in infos:
179
+ if info['name'] in CARIDDI_IGNORE_LIST:
180
+ continue
181
+ if info['name'] in CARIDDI_RENAME_LIST:
182
+ info['name'] = CARIDDI_RENAME_LIST[info['name']]
183
+ content = info['match']
184
+ parsed_url = urlparse(url)
185
+ url_without_param = urlunparse(parsed_url._replace(query=''))
186
+ info['category'] = 'info'
187
+ info['name'] = '_'.join(f'{info["name"]}'.lower().split())
188
+ info['match'] = url_without_param
189
+ if CARIDDI_IGNORE_PATTERNS.match(content):
190
+ continue
191
+ info['value'] = content
192
+ info['extra_data'] = {'url': url}
193
+ yield Tag(**info)
@@ -0,0 +1,87 @@
1
+ from urllib.parse import urlparse
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (CONFIDENCE, DELAY, EXTRA_DATA, FOLLOW_REDIRECT,
5
+ HEADER, ID, MATCHED_AT, METHOD, NAME,
6
+ OPT_NOT_SUPPORTED, PROVIDER, PROXY, RATE_LIMIT,
7
+ RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, URL,
8
+ USER_AGENT)
9
+ from secator.output_types import Vulnerability, Url
10
+ from secator.serializers import JSONSerializer
11
+ from secator.tasks._categories import VulnHttp
12
+
13
+ DALFOX_TYPE_MAP = {
14
+ 'G': 'Grep XSS',
15
+ 'R': 'Reflected XSS',
16
+ 'V': 'Verified XSS'
17
+ }
18
+
19
+
20
+ @task()
21
+ class dalfox(VulnHttp):
22
+ """Powerful open source XSS scanning tool."""
23
+ cmd = 'dalfox'
24
+ input_types = [URL]
25
+ output_types = [Vulnerability, Url]
26
+ tags = ['url', 'fuzz']
27
+ input_flag = 'url'
28
+ input_chunk_size = 20
29
+ ignore_return_code = True
30
+ file_flag = 'file'
31
+ json_flag = '--format jsonl'
32
+ version_flag = 'version'
33
+ opt_prefix = '--'
34
+ opt_key_map = {
35
+ HEADER: 'header',
36
+ DELAY: 'delay',
37
+ FOLLOW_REDIRECT: 'follow-redirects',
38
+ METHOD: 'method',
39
+ PROXY: 'proxy',
40
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
41
+ RETRIES: OPT_NOT_SUPPORTED,
42
+ THREADS: 'worker',
43
+ TIMEOUT: 'timeout',
44
+ USER_AGENT: 'user-agent'
45
+ }
46
+ item_loaders = [JSONSerializer()]
47
+ output_map = {
48
+ Vulnerability: {
49
+ ID: lambda x: None,
50
+ NAME: lambda x: DALFOX_TYPE_MAP[x['type']],
51
+ PROVIDER: 'dalfox',
52
+ TAGS: lambda x: [x['cwe']] if x['cwe'] else [],
53
+ CONFIDENCE: lambda x: 'high',
54
+ MATCHED_AT: lambda x: urlparse(x['data'])._replace(query='').geturl(),
55
+ EXTRA_DATA: lambda x: dalfox.extra_data_extractor(x),
56
+ SEVERITY: lambda x: x['severity'].lower()
57
+ }
58
+ }
59
+ install_version = 'v2.11.0'
60
+ install_cmd = 'go install -v github.com/hahwul/dalfox/v2@[install_version]'
61
+ github_handle = 'hahwul/dalfox'
62
+ encoding = 'ansi'
63
+ proxychains = False
64
+ proxychains_flavor = 'proxychains4'
65
+ proxy_socks5 = True
66
+ proxy_http = True
67
+ profile = 'cpu'
68
+
69
+ @staticmethod
70
+ def on_json_loaded(self, item):
71
+ if item.get('type', '') == 'V':
72
+ item['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
73
+ yield Url(
74
+ url=item['data'],
75
+ method=item['method'],
76
+ request_headers=item['request_headers'],
77
+ extra_data={k: v for k, v in item.items() if k not in ['type', 'severity', 'cwe', 'request_headers', 'method', 'data']} # noqa: E501
78
+ )
79
+ yield item
80
+
81
+ @staticmethod
82
+ def extra_data_extractor(item):
83
+ extra_data = {}
84
+ for key, value in item.items():
85
+ if key not in ['type', 'severity', 'cwe']:
86
+ extra_data[key] = value
87
+ return extra_data
@@ -0,0 +1,84 @@
1
+ import os
2
+ import shlex
3
+ import yaml
4
+
5
+ from secator.decorators import task
6
+ from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, DATA, DELAY, DEPTH,
7
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
8
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
9
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
10
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, OUTPUT_PATH, PROXY,
11
+ RATE_LIMIT, RETRIES, STATUS_CODE,
12
+ THREADS, TIMEOUT, USER_AGENT, WORDLIST, URL)
13
+ from secator.output_types import Url, Info, Error
14
+ from secator.tasks._categories import HttpFuzzer
15
+
16
+
17
+ @task()
18
+ class dirsearch(HttpFuzzer):
19
+ """Advanced web path brute-forcer."""
20
+ cmd = 'dirsearch'
21
+ input_types = [URL]
22
+ output_types = [Url]
23
+ tags = ['url', 'fuzz']
24
+ input_flag = '-u'
25
+ file_flag = '-l'
26
+ json_flag = '-O json'
27
+ opt_prefix = '--'
28
+ encoding = 'ansi'
29
+ opt_key_map = {
30
+ HEADER: 'header',
31
+ DATA: 'data',
32
+ DELAY: 'delay',
33
+ DEPTH: 'max-recursion-depth',
34
+ FILTER_CODES: 'exclude-status',
35
+ FILTER_REGEX: 'exclude-regex',
36
+ FILTER_SIZE: 'exclude-sizes',
37
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
38
+ FOLLOW_REDIRECT: 'follow-redirects',
39
+ MATCH_CODES: 'include-status',
40
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
41
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
42
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
43
+ METHOD: 'http-method',
44
+ PROXY: 'proxy',
45
+ RATE_LIMIT: 'max-rate',
46
+ RETRIES: 'retries',
47
+ THREADS: 'threads',
48
+ TIMEOUT: 'timeout',
49
+ USER_AGENT: 'user-agent',
50
+ WORDLIST: 'wordlists',
51
+ }
52
+ output_map = {
53
+ Url: {
54
+ CONTENT_LENGTH: 'content-length',
55
+ CONTENT_TYPE: 'content-type',
56
+ STATUS_CODE: 'status',
57
+ 'request_headers': 'request_headers'
58
+ }
59
+ }
60
+ install_cmd = 'pipx install git+https://github.com/maurosoria/dirsearch.git --force'
61
+ install_version = '0.4.3'
62
+ proxychains = True
63
+ proxy_socks5 = True
64
+ proxy_http = True
65
+
66
+ @staticmethod
67
+ def on_init(self):
68
+ self.output_path = self.get_opt_value(OUTPUT_PATH)
69
+ if not self.output_path:
70
+ self.output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
71
+ self.cmd += f' -o {shlex.quote(self.output_path)}'
72
+
73
+ @staticmethod
74
+ def on_cmd_done(self):
75
+ if not os.path.exists(self.output_path):
76
+ yield Error(message=f'Could not find JSON results in {self.output_path}')
77
+ return
78
+
79
+ yield Info(message=f'JSON results saved to {self.output_path}')
80
+ with open(self.output_path, 'r') as f:
81
+ results = yaml.safe_load(f.read()).get('results', [])
82
+ for result in results:
83
+ result['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
84
+ yield result
secator/tasks/dnsx.py ADDED
@@ -0,0 +1,186 @@
1
+ import validators
2
+ import dns.resolver
3
+
4
+ from secator.decorators import task
5
+ from secator.definitions import (HOST, CIDR_RANGE, DELAY, IP, OPT_PIPE_INPUT, PROXY,
6
+ RATE_LIMIT, RETRIES, THREADS, TIMEOUT, WORDLIST, OPT_NOT_SUPPORTED)
7
+ from secator.output_types import Record, Ip, Subdomain, Error, Warning
8
+ from secator.output_types.ip import IpProtocol
9
+ from secator.tasks._categories import ReconDns
10
+ from secator.serializers import JSONSerializer
11
+ from secator.utils import extract_domain_info, process_wordlist
12
+
13
+
14
+ @task()
15
+ class dnsx(ReconDns):
16
+ """dnsx is a fast and multi-purpose DNS toolkit designed for running various retryabledns library."""
17
+ cmd = 'dnsx -resp -recon'
18
+ tags = ['dns', 'fuzz']
19
+ input_types = [HOST, CIDR_RANGE, IP]
20
+ output_types = [Record, Ip, Subdomain]
21
+ json_flag = '-json'
22
+ input_flag = OPT_PIPE_INPUT
23
+ file_flag = OPT_PIPE_INPUT
24
+ opt_key_map = {
25
+ RATE_LIMIT: 'rate-limit',
26
+ RETRIES: 'retry',
27
+ THREADS: 'threads',
28
+ PROXY: 'proxy',
29
+ DELAY: OPT_NOT_SUPPORTED,
30
+ TIMEOUT: OPT_NOT_SUPPORTED,
31
+ }
32
+ opts = {
33
+ 'trace': {'is_flag': True, 'default': False, 'help': 'Perform dns tracing'},
34
+ 'resolver': {'type': str, 'short': 'r', 'help': 'List of resolvers to use (file or comma separated)'},
35
+ 'wildcard_domain': {'type': str, 'short': 'wd', 'help': 'Domain name for wildcard filtering'},
36
+ 'rc': {'type': str, 'short': 'rc', 'help': 'DNS return code to filter (noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, xrrset, notauth, notzone)'}, # noqa: E501
37
+ 'subdomains_only': {'is_flag': True, 'short': 'so', 'default': False, 'internal': True, 'help': 'Only return subdomains'}, # noqa: E501
38
+ WORDLIST: {'type': str, 'short': 'w', 'default': None, 'process': process_wordlist, 'help': 'Wordlist to use'}, # noqa: E501
39
+ }
40
+ item_loaders = [JSONSerializer()]
41
+ install_version = 'v1.2.2'
42
+ install_cmd = 'go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@[install_version]'
43
+ github_handle = 'projectdiscovery/dnsx'
44
+ profile = 'io'
45
+
46
+ @staticmethod
47
+ def validate_input(self, inputs):
48
+ """All targets will return positive DNS queries. Aborting bruteforcing."""
49
+ if not self.get_opt_value('wordlist'):
50
+ return True
51
+ if self.get_opt_value('wildcard_domain'):
52
+ return True
53
+ for target in self.inputs:
54
+ subdomain = f'xxxxxx.{target}'
55
+ if check_dns_response(subdomain):
56
+ self.add_result(Warning(message=f'Domain {target} returns false positive DNS results for A queries. Removing target.'), print=False) # noqa: E501
57
+ self.inputs = [t for t in self.inputs if t != target]
58
+ if len(self.inputs) == 0 and not self.has_parent:
59
+ self.add_result(Warning(message='Please specify the wildcard_domain option to get accurate results.'), print=False) # noqa: E501
60
+ return False
61
+ return True
62
+
63
+ @staticmethod
64
+ def before_init(self):
65
+ self.wordlist = self.get_opt_value('wordlist')
66
+ self.subdomains = []
67
+ if self.wordlist:
68
+ self.file_flag = '-d'
69
+ self.input_flag = '-d'
70
+ rc = self.get_opt_value('rc')
71
+ if not rc:
72
+ self.cmd += ' -rc noerror'
73
+ if len(self.inputs) > 1 and self.get_opt_value('wildcard_domain'):
74
+ fqdn = extract_domain_info(self.inputs[0], domain_only=True)
75
+ for input in self.inputs[1:]:
76
+ fqdn_item = extract_domain_info(input, domain_only=True)
77
+ if fqdn_item != fqdn:
78
+ return Error('Wildcard domain is not supported when using multiple hosts with different FQDNs !')
79
+
80
+ @staticmethod
81
+ def on_json_loaded(self, item):
82
+ record_types = ['a', 'aaaa', 'cname', 'mx', 'ns', 'txt', 'srv', 'ptr', 'soa', 'axfr', 'caa']
83
+ host = item['host']
84
+ status_code = item.get('status_code')
85
+ # if host.startswith('*'):
86
+ # yield Warning(f'Wildcard domain detected: {host}. Ignore previous results.')
87
+ # self.stop_process(exit_ok=True)
88
+ # return
89
+ is_ip = validators.ipv4(host) or validators.ipv6(host)
90
+ if status_code and status_code == 'NOERROR' and not is_ip:
91
+ subdomain = Subdomain(
92
+ host=host,
93
+ domain=extract_domain_info(host, domain_only=True),
94
+ verified=True,
95
+ sources=['dns'],
96
+ )
97
+ self.subdomains.append(subdomain)
98
+ yield subdomain
99
+ if self.get_opt_value('subdomains_only'):
100
+ return
101
+ for _type in record_types:
102
+ values = item.get(_type, [])
103
+ if isinstance(values, dict):
104
+ values = [values]
105
+ for value in values:
106
+ name = value
107
+ extra_data = {}
108
+ if isinstance(value, dict):
109
+ name = value.get('name', host)
110
+ extra_data = {k: v for k, v in value.items() if k != 'name' and k != 'host'}
111
+ if _type == 'a':
112
+ ip = Ip(
113
+ host=host,
114
+ ip=name,
115
+ protocol=IpProtocol.IPv4,
116
+ alive=False
117
+ )
118
+ if ip not in self.results:
119
+ yield ip
120
+ elif _type == 'aaaa':
121
+ ip = Ip(
122
+ host=host,
123
+ ip=name,
124
+ protocol=IpProtocol.IPv6,
125
+ alive=False
126
+ )
127
+ if ip not in self.results:
128
+ yield ip
129
+ elif _type == 'ptr':
130
+ ip = Ip(
131
+ host=host,
132
+ ip=name,
133
+ protocol=IpProtocol.IPv4,
134
+ alive=False
135
+ )
136
+ if ip not in self.results:
137
+ yield ip
138
+ record = Record(
139
+ host=host,
140
+ name=name,
141
+ type=_type.upper(),
142
+ extra_data=extra_data,
143
+ _source=self.unique_name
144
+ )
145
+
146
+ if record not in self.results:
147
+ yield record
148
+
149
+
150
+ def stream_file_up_to_line(file_path, max_lines=50):
151
+ """
152
+ Streams a file line by line up to line 50.
153
+
154
+ Args:
155
+ file_path (str): Path to the file to be streamed.
156
+
157
+ Yields:
158
+ str: Each line from the file up to line 50.
159
+ """
160
+ with open(file_path, 'r') as file:
161
+ for line_number, line in enumerate(file, start=1):
162
+ if line_number > max_lines:
163
+ break
164
+ yield line
165
+
166
+
167
+ def check_dns_response(domain, record_type="A"):
168
+ try:
169
+ # Query DNS for the specified record type (A, MX, NS, etc.)
170
+ resolver = dns.resolver.Resolver()
171
+ resolver.timeout = 60
172
+ resolver.lifetime = 1
173
+ dns.resolver.resolve(domain, record_type)
174
+ return True
175
+ except dns.resolver.NXDOMAIN:
176
+ # print(f"❌ Domain '{domain}' does not exist (NXDOMAIN)")
177
+ return False
178
+ except dns.resolver.NoAnswer:
179
+ # print(f"⚠️ Domain '{domain}' exists but has no {record_type} record")
180
+ return False
181
+ except dns.resolver.Timeout:
182
+ # print(f"⏱️ DNS query timed out for '{domain}'")
183
+ return False
184
+ except Exception:
185
+ # print(f"❌ Error checking DNS for '{domain}': {e}")
186
+ return False
@@ -0,0 +1,93 @@
1
+ from secator.config import CONFIG
2
+ from secator.decorators import task
3
+ from secator.definitions import (CONTENT_TYPE, DATA, DELAY, DEPTH, FILTER_CODES,
4
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS,
5
+ FOLLOW_REDIRECT, HEADER, LINES, MATCH_CODES,
6
+ MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD,
7
+ OPT_NOT_SUPPORTED, OPT_PIPE_INPUT, PROXY,
8
+ RATE_LIMIT, RETRIES, STATUS_CODE,
9
+ THREADS, TIMEOUT, USER_AGENT, WORDLIST, WORDS, URL)
10
+ from secator.output_types import Url
11
+ from secator.serializers import JSONSerializer
12
+ from secator.tasks._categories import HttpFuzzer
13
+
14
+
15
+ @task()
16
+ class feroxbuster(HttpFuzzer):
17
+ """Simple, fast, recursive content discovery tool written in Rust"""
18
+ cmd = 'feroxbuster --auto-bail --no-state'
19
+ input_types = [URL]
20
+ output_types = [Url]
21
+ tags = ['url', 'fuzz']
22
+ input_flag = '--url'
23
+ input_chunk_size = 1
24
+ file_flag = OPT_PIPE_INPUT
25
+ json_flag = '--silent --json'
26
+ opt_prefix = '--'
27
+ opts = {
28
+ # 'auto_tune': {'is_flag': True, 'default': False, 'help': 'Automatically lower scan rate when too many errors'},
29
+ 'extract_links': {'is_flag': True, 'default': False, 'help': 'Extract links from response body'},
30
+ 'collect_backups': {'is_flag': True, 'default': False, 'help': 'Request likely backup exts for urls'},
31
+ 'collect_extensions': {'is_flag': True, 'default': False, 'help': 'Discover exts and add to --extensions'},
32
+ 'collect_words': {'is_flag': True, 'default': False, 'help': 'Discover important words and add to wordlist'},
33
+ }
34
+ opt_key_map = {
35
+ HEADER: 'headers',
36
+ DATA: 'data',
37
+ DELAY: OPT_NOT_SUPPORTED,
38
+ DEPTH: 'depth',
39
+ FILTER_CODES: 'filter-status',
40
+ FILTER_REGEX: 'filter-regex',
41
+ FILTER_SIZE: 'filter-size',
42
+ FILTER_WORDS: 'filter-words',
43
+ FOLLOW_REDIRECT: 'redirects',
44
+ MATCH_CODES: 'status-codes',
45
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
46
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
47
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
48
+ METHOD: 'methods',
49
+ PROXY: 'proxy',
50
+ RATE_LIMIT: 'rate-limit',
51
+ RETRIES: OPT_NOT_SUPPORTED,
52
+ THREADS: 'threads',
53
+ TIMEOUT: 'timeout',
54
+ USER_AGENT: 'user-agent',
55
+ WORDLIST: 'wordlist',
56
+ 'request_headers': 'headers'
57
+ }
58
+ item_loaders = [JSONSerializer()]
59
+ output_map = {
60
+ Url: {
61
+ STATUS_CODE: 'status',
62
+ CONTENT_TYPE: lambda x: x['headers'].get('content-type'),
63
+ LINES: 'line_count',
64
+ WORDS: 'word_count'
65
+ }
66
+ }
67
+ install_cmd_pre = {
68
+ '*': ['curl', 'bash']
69
+ }
70
+ install_version = 'v2.11.0'
71
+ install_cmd = (
72
+ f'cd /tmp && curl -sL https://raw.githubusercontent.com/epi052/feroxbuster/master/install-nix.sh | bash -s {CONFIG.dirs.bin}' # noqa: E501
73
+ )
74
+ github_handle = 'epi052/feroxbuster'
75
+ proxychains = False
76
+ proxy_socks5 = True
77
+ proxy_http = True
78
+
79
+ @staticmethod
80
+ def on_start(self):
81
+ if self.inputs_path:
82
+ self.cmd += ' --stdin'
83
+
84
+ @staticmethod
85
+ def validate_item(self, item):
86
+ if isinstance(item, dict):
87
+ return item['type'] == 'response'
88
+ return True
89
+
90
+ @staticmethod
91
+ def on_item(self, item):
92
+ item.request_headers = self.get_opt_value('header', preprocess=True)
93
+ return item