secator 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +453 -0
  4. secator/celery_signals.py +138 -0
  5. secator/celery_utils.py +320 -0
  6. secator/cli.py +2035 -0
  7. secator/cli_helper.py +395 -0
  8. secator/click.py +87 -0
  9. secator/config.py +670 -0
  10. secator/configs/__init__.py +0 -0
  11. secator/configs/profiles/__init__.py +0 -0
  12. secator/configs/profiles/aggressive.yaml +8 -0
  13. secator/configs/profiles/all_ports.yaml +7 -0
  14. secator/configs/profiles/full.yaml +31 -0
  15. secator/configs/profiles/http_headless.yaml +7 -0
  16. secator/configs/profiles/http_record.yaml +8 -0
  17. secator/configs/profiles/insane.yaml +8 -0
  18. secator/configs/profiles/paranoid.yaml +8 -0
  19. secator/configs/profiles/passive.yaml +11 -0
  20. secator/configs/profiles/polite.yaml +8 -0
  21. secator/configs/profiles/sneaky.yaml +8 -0
  22. secator/configs/profiles/tor.yaml +5 -0
  23. secator/configs/scans/__init__.py +0 -0
  24. secator/configs/scans/domain.yaml +31 -0
  25. secator/configs/scans/host.yaml +23 -0
  26. secator/configs/scans/network.yaml +30 -0
  27. secator/configs/scans/subdomain.yaml +27 -0
  28. secator/configs/scans/url.yaml +19 -0
  29. secator/configs/workflows/__init__.py +0 -0
  30. secator/configs/workflows/cidr_recon.yaml +48 -0
  31. secator/configs/workflows/code_scan.yaml +29 -0
  32. secator/configs/workflows/domain_recon.yaml +46 -0
  33. secator/configs/workflows/host_recon.yaml +95 -0
  34. secator/configs/workflows/subdomain_recon.yaml +120 -0
  35. secator/configs/workflows/url_bypass.yaml +15 -0
  36. secator/configs/workflows/url_crawl.yaml +98 -0
  37. secator/configs/workflows/url_dirsearch.yaml +62 -0
  38. secator/configs/workflows/url_fuzz.yaml +68 -0
  39. secator/configs/workflows/url_params_fuzz.yaml +66 -0
  40. secator/configs/workflows/url_secrets_hunt.yaml +23 -0
  41. secator/configs/workflows/url_vuln.yaml +91 -0
  42. secator/configs/workflows/user_hunt.yaml +29 -0
  43. secator/configs/workflows/wordpress.yaml +38 -0
  44. secator/cve.py +718 -0
  45. secator/decorators.py +7 -0
  46. secator/definitions.py +168 -0
  47. secator/exporters/__init__.py +14 -0
  48. secator/exporters/_base.py +3 -0
  49. secator/exporters/console.py +10 -0
  50. secator/exporters/csv.py +37 -0
  51. secator/exporters/gdrive.py +123 -0
  52. secator/exporters/json.py +16 -0
  53. secator/exporters/table.py +36 -0
  54. secator/exporters/txt.py +28 -0
  55. secator/hooks/__init__.py +0 -0
  56. secator/hooks/gcs.py +80 -0
  57. secator/hooks/mongodb.py +281 -0
  58. secator/installer.py +694 -0
  59. secator/loader.py +128 -0
  60. secator/output_types/__init__.py +49 -0
  61. secator/output_types/_base.py +108 -0
  62. secator/output_types/certificate.py +78 -0
  63. secator/output_types/domain.py +50 -0
  64. secator/output_types/error.py +42 -0
  65. secator/output_types/exploit.py +58 -0
  66. secator/output_types/info.py +24 -0
  67. secator/output_types/ip.py +47 -0
  68. secator/output_types/port.py +55 -0
  69. secator/output_types/progress.py +36 -0
  70. secator/output_types/record.py +36 -0
  71. secator/output_types/stat.py +41 -0
  72. secator/output_types/state.py +29 -0
  73. secator/output_types/subdomain.py +45 -0
  74. secator/output_types/tag.py +69 -0
  75. secator/output_types/target.py +38 -0
  76. secator/output_types/url.py +112 -0
  77. secator/output_types/user_account.py +41 -0
  78. secator/output_types/vulnerability.py +101 -0
  79. secator/output_types/warning.py +30 -0
  80. secator/report.py +140 -0
  81. secator/rich.py +130 -0
  82. secator/runners/__init__.py +14 -0
  83. secator/runners/_base.py +1240 -0
  84. secator/runners/_helpers.py +218 -0
  85. secator/runners/celery.py +18 -0
  86. secator/runners/command.py +1178 -0
  87. secator/runners/python.py +126 -0
  88. secator/runners/scan.py +87 -0
  89. secator/runners/task.py +81 -0
  90. secator/runners/workflow.py +168 -0
  91. secator/scans/__init__.py +29 -0
  92. secator/serializers/__init__.py +8 -0
  93. secator/serializers/dataclass.py +39 -0
  94. secator/serializers/json.py +45 -0
  95. secator/serializers/regex.py +25 -0
  96. secator/tasks/__init__.py +8 -0
  97. secator/tasks/_categories.py +487 -0
  98. secator/tasks/arjun.py +113 -0
  99. secator/tasks/arp.py +53 -0
  100. secator/tasks/arpscan.py +70 -0
  101. secator/tasks/bbot.py +372 -0
  102. secator/tasks/bup.py +118 -0
  103. secator/tasks/cariddi.py +193 -0
  104. secator/tasks/dalfox.py +87 -0
  105. secator/tasks/dirsearch.py +84 -0
  106. secator/tasks/dnsx.py +186 -0
  107. secator/tasks/feroxbuster.py +93 -0
  108. secator/tasks/ffuf.py +135 -0
  109. secator/tasks/fping.py +85 -0
  110. secator/tasks/gau.py +102 -0
  111. secator/tasks/getasn.py +60 -0
  112. secator/tasks/gf.py +36 -0
  113. secator/tasks/gitleaks.py +96 -0
  114. secator/tasks/gospider.py +84 -0
  115. secator/tasks/grype.py +109 -0
  116. secator/tasks/h8mail.py +75 -0
  117. secator/tasks/httpx.py +167 -0
  118. secator/tasks/jswhois.py +36 -0
  119. secator/tasks/katana.py +203 -0
  120. secator/tasks/maigret.py +87 -0
  121. secator/tasks/mapcidr.py +42 -0
  122. secator/tasks/msfconsole.py +179 -0
  123. secator/tasks/naabu.py +85 -0
  124. secator/tasks/nmap.py +487 -0
  125. secator/tasks/nuclei.py +151 -0
  126. secator/tasks/search_vulns.py +225 -0
  127. secator/tasks/searchsploit.py +109 -0
  128. secator/tasks/sshaudit.py +299 -0
  129. secator/tasks/subfinder.py +48 -0
  130. secator/tasks/testssl.py +283 -0
  131. secator/tasks/trivy.py +130 -0
  132. secator/tasks/trufflehog.py +240 -0
  133. secator/tasks/urlfinder.py +100 -0
  134. secator/tasks/wafw00f.py +106 -0
  135. secator/tasks/whois.py +34 -0
  136. secator/tasks/wpprobe.py +116 -0
  137. secator/tasks/wpscan.py +202 -0
  138. secator/tasks/x8.py +94 -0
  139. secator/tasks/xurlfind3r.py +83 -0
  140. secator/template.py +294 -0
  141. secator/thread.py +24 -0
  142. secator/tree.py +196 -0
  143. secator/utils.py +922 -0
  144. secator/utils_test.py +297 -0
  145. secator/workflows/__init__.py +29 -0
  146. secator-0.22.0.dist-info/METADATA +447 -0
  147. secator-0.22.0.dist-info/RECORD +150 -0
  148. secator-0.22.0.dist-info/WHEEL +4 -0
  149. secator-0.22.0.dist-info/entry_points.txt +2 -0
  150. secator-0.22.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,487 @@
1
+ import json
2
+ import os
3
+ import re
4
+
5
+ from functools import cache
6
+
7
+ import requests
8
+ from bs4 import BeautifulSoup
9
+ from cpe import CPE
10
+
11
+ from secator.definitions import (CIDR_RANGE, CVSS_SCORE, DATA, DELAY, DEPTH, DESCRIPTION, FILTER_CODES,
12
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID, IP,
13
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD, NAME, PATH, PORTS, PROVIDER, PROXY,
14
+ RATE_LIMIT, REFERENCES, RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, TOP_PORTS, URL, USER_AGENT,
15
+ USERNAME, WORDLIST)
16
+ from secator.output_types import Ip, Port, Subdomain, Tag, Url, UserAccount, Vulnerability
17
+ from secator.config import CONFIG
18
+ from secator.runners import Command
19
+ from secator.utils import debug, process_wordlist, headers_to_dict
20
+
21
+
22
+ def process_headers(headers_dict):
23
+ headers = []
24
+ for key, value in headers_dict.items():
25
+ headers.append(f'{key}:{value}')
26
+ return headers
27
+
28
+
29
+ OPTS = {
30
+ HEADER: {'type': str, 'short': 'H', 'help': 'Custom header to add to each request in the form "KEY1:VALUE1;; KEY2:VALUE2"', 'pre_process': headers_to_dict, 'process': process_headers, 'default': CONFIG.http.default_header}, # noqa: E501
31
+ DATA: {'type': str, 'help': 'Data to send in the request body'},
32
+ DELAY: {'type': float, 'short': 'd', 'help': 'Delay to add between each requests'},
33
+ DEPTH: {'type': int, 'help': 'Scan depth'},
34
+ FILTER_CODES: {'type': str, 'short': 'fc', 'help': 'Filter out responses with HTTP codes'},
35
+ FILTER_REGEX: {'type': str, 'short': 'fr', 'help': 'Filter out responses with regular expression'},
36
+ FILTER_SIZE: {'type': int, 'short': 'fs', 'help': 'Filter out responses with size'},
37
+ FILTER_WORDS: {'type': int, 'short': 'fw', 'help': 'Filter out responses with word count'},
38
+ FOLLOW_REDIRECT: {'is_flag': True, 'short': 'frd', 'help': 'Follow HTTP redirects'},
39
+ MATCH_CODES: {'type': str, 'short': 'mc', 'help': 'Match HTTP status codes e.g "201,300,301"'},
40
+ MATCH_REGEX: {'type': str, 'short': 'mr', 'help': 'Match responses with regular expression'},
41
+ MATCH_SIZE: {'type': int, 'short': 'ms', 'help': 'Match responses with size'},
42
+ MATCH_WORDS: {'type': int, 'short': 'mw', 'help': 'Match responses with word count'},
43
+ METHOD: {'type': str, 'help': 'HTTP method to use for requests'},
44
+ PROXY: {'type': str, 'help': 'HTTP(s) / SOCKS5 proxy'},
45
+ RATE_LIMIT: {'type': int, 'short': 'rl', 'help': 'Rate limit, i.e max number of requests per second'},
46
+ RETRIES: {'type': int, 'help': 'Retries'},
47
+ THREADS: {'type': int, 'help': 'Number of threads to run'},
48
+ TIMEOUT: {'type': int, 'help': 'Request timeout'},
49
+ USER_AGENT: {'type': str, 'short': 'ua', 'help': 'User agent, e.g "Mozilla Firefox 1.0"'},
50
+ WORDLIST: {'type': str, 'short': 'w', 'default': 'http', 'process': process_wordlist, 'help': 'Wordlist to use'}
51
+ }
52
+
53
+ OPTS_HTTP = [
54
+ HEADER, DELAY, FOLLOW_REDIRECT, METHOD, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT
55
+ ]
56
+
57
+ OPTS_HTTP_CRAWLERS = OPTS_HTTP + [
58
+ DEPTH, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, FILTER_REGEX, FILTER_CODES, FILTER_SIZE, FILTER_WORDS,
59
+ MATCH_CODES
60
+ ]
61
+
62
+ OPTS_HTTP_FUZZERS = OPTS_HTTP_CRAWLERS + [WORDLIST, DATA]
63
+
64
+ OPTS_RECON = [
65
+ DELAY, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT
66
+ ]
67
+
68
+ OPTS_VULN = [
69
+ HEADER, DELAY, FOLLOW_REDIRECT, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT
70
+ ]
71
+
72
+
73
+ #---------------#
74
+ # HTTP category #
75
+ #---------------#
76
+
77
+ class Http(Command):
78
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_CRAWLERS}
79
+ input_types = [URL]
80
+ output_types = [Url]
81
+
82
+
83
+ class HttpCrawler(Command):
84
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_CRAWLERS}
85
+ input_types = [URL]
86
+ output_types = [Url]
87
+
88
+
89
+ class HttpFuzzer(Command):
90
+ meta_opts = {k: OPTS[k] for k in OPTS_HTTP_FUZZERS}
91
+ input_types = [URL]
92
+ output_types = [Url]
93
+ profile = lambda opts: HttpFuzzer.dynamic_profile(opts) # noqa: E731
94
+
95
+ @staticmethod
96
+ def dynamic_profile(opts):
97
+ wordlist = HttpFuzzer._get_opt_value(
98
+ opts,
99
+ 'wordlist',
100
+ opts_conf=dict(HttpFuzzer.opts, **HttpFuzzer.meta_opts),
101
+ opt_aliases=opts.get('aliases', []),
102
+ preprocess=True,
103
+ process=True,
104
+ )
105
+ wordlist_size_mb = os.path.getsize(wordlist) / (1024 * 1024)
106
+ return 'cpu' if wordlist_size_mb > 5 else 'io'
107
+
108
+
109
+ #----------------#
110
+ # Recon category #
111
+ #----------------#
112
+
113
+ class Recon(Command):
114
+ meta_opts = {k: OPTS[k] for k in OPTS_RECON}
115
+ output_types = [Subdomain, UserAccount, Ip, Port]
116
+
117
+
118
+ class ReconDns(Recon):
119
+ input_types = [HOST]
120
+ output_types = [Subdomain]
121
+
122
+
123
+ class ReconUser(Recon):
124
+ input_types = [USERNAME]
125
+ output_types = [UserAccount]
126
+
127
+
128
+ class ReconIp(Recon):
129
+ input_types = [CIDR_RANGE]
130
+ output_types = [Ip]
131
+
132
+
133
+ class ReconPort(Recon):
134
+ input_types = [IP]
135
+ output_types = [Port]
136
+ meta_opts = {
137
+ PORTS: {'type': str, 'short': 'p', 'help': 'Only scan specific ports (comma separated list, "-" for all ports)'}, # noqa: E501
138
+ TOP_PORTS: {'type': str, 'short': 'tp', 'help': 'Scan <number> most common ports'},
139
+ }
140
+
141
+
142
+ #---------------#
143
+ # Vuln category #
144
+ #---------------#
145
+
146
+ class Vuln(Command):
147
+ meta_opts = {k: OPTS[k] for k in OPTS_VULN}
148
+ output_types = [Vulnerability]
149
+
150
+ @staticmethod
151
+ def lookup_local_cve(cve_id):
152
+ cve_path = f'{CONFIG.dirs.data}/cves/{cve_id}.json'
153
+ if os.path.exists(cve_path):
154
+ with open(cve_path, 'r') as f:
155
+ return json.load(f)
156
+ debug(f'{cve_id}: not found in cache', sub='cve')
157
+ return None
158
+
159
+ # @staticmethod
160
+ # def lookup_exploitdb(exploit_id):
161
+ # print('looking up exploit')
162
+ # try:
163
+ # resp = requests.get(f'https://exploit-db.com/exploits/{exploit_id}', timeout=5)
164
+ # resp.raise_for_status()
165
+ # content = resp.content
166
+ # except requests.RequestException as e:
167
+ # debug(f'Failed remote query for {exploit_id} ({str(e)}).', sub='cve')
168
+ # logger.error(f'Could not fetch exploit info for exploit {exploit_id}. Skipping.')
169
+ # return None
170
+ # return cve_info
171
+
172
+ @staticmethod
173
+ def create_cpe_string(product_name, version):
174
+ """
175
+ Generate a CPE string for a given product and version.
176
+
177
+ Args:
178
+ product_name (str): The name of the product.
179
+ version (str): The version of the product.
180
+
181
+ Returns:
182
+ str: A CPE string formatted according to the CPE 2.3 specification.
183
+ """
184
+ cpe_version = "2.3" # CPE Specification version
185
+ part = "a" # 'a' for application
186
+ vendor = product_name.lower() # Vendor name, using product name
187
+ product = product_name.lower() # Product name
188
+ version = version # Product version
189
+ cpe_string = f"cpe:{cpe_version}:{part}:{vendor}:{product}:{version}:*:*:*:*:*:*:*"
190
+ return cpe_string
191
+
192
+ @staticmethod
193
+ def match_cpes(fs1, fs2):
194
+ """Check if two CPEs match. Partial matches consisting of <vendor>:<product>:<version> are considered a match.
195
+
196
+ Args:
197
+ fs1 (str): Format string 1.
198
+ fs2 (str): Format string 2.
199
+
200
+ Returns:
201
+ bool: True if the two CPEs match, False otherwise.
202
+ """
203
+ if fs1 == fs2:
204
+ return True
205
+ split_fs1 = fs1.split(':')
206
+ split_fs2 = fs2.split(':')
207
+ tup1 = split_fs1[3], split_fs1[4], split_fs1[5]
208
+ tup2 = split_fs2[3], split_fs2[4], split_fs2[5]
209
+ return tup1 == tup2
210
+
211
+ @staticmethod
212
+ def get_cpe_fs(cpe):
213
+ """"Return formatted string for given CPE.
214
+
215
+ Args:
216
+ cpe (string): Input CPE
217
+
218
+ Returns:
219
+ string: CPE formatted string.
220
+ """
221
+ try:
222
+ return CPE(cpe).as_fs()
223
+ except NotImplementedError:
224
+ return None
225
+
226
+ @cache
227
+ @staticmethod
228
+ def lookup_cve_from_vulners_exploit(exploit_id, *cpes):
229
+ """Search for a CVE corresponding to an exploit by extracting the CVE id from the exploit HTML page.
230
+
231
+ Args:
232
+ exploit_id (str): Exploit ID.
233
+ cpes (tuple[str], Optional): CPEs to match for.
234
+
235
+ Returns:
236
+ dict: vulnerability data.
237
+ """
238
+ if CONFIG.runners.skip_exploit_search:
239
+ debug(f'{exploit_id}: skipped remote query since config.runners.skip_exploit_search is set.', sub='cve.vulners')
240
+ return None
241
+ if CONFIG.offline_mode:
242
+ debug(f'{exploit_id}: skipped remote query since config.offline_mode is set.', sub='cve.vulners')
243
+ return None
244
+ try:
245
+ resp = requests.get(f'https://vulners.com/githubexploit/{exploit_id}', timeout=5)
246
+ resp.raise_for_status()
247
+ soup = BeautifulSoup(resp.text, 'lxml')
248
+ title = soup.title.get_text(strip=True)
249
+ h1 = [h1.get_text(strip=True) for h1 in soup.find_all('h1')]
250
+ if '404' in h1:
251
+ raise requests.RequestException("404 [not found or rate limited]")
252
+ code = [code.get_text(strip=True) for code in soup.find_all('code')]
253
+ elems = [title] + h1 + code
254
+ content = '\n'.join(elems)
255
+ cve_regex = re.compile(r'(CVE(?:-|_)\d{4}(?:-|_)\d{4,7})', re.IGNORECASE)
256
+ matches = cve_regex.findall(str(content))
257
+ if not matches:
258
+ debug(f'{exploit_id}: no matching CVE found in https://vulners.com/githubexploit/{exploit_id}.', sub='cve.vulners')
259
+ return None
260
+ cve_id = matches[0].replace('_', '-').upper()
261
+ cve_data = Vuln.lookup_cve(cve_id, *cpes)
262
+ if cve_data:
263
+ return cve_data
264
+
265
+ except requests.RequestException as e:
266
+ debug(f'{exploit_id}: failed remote query ({str(e)}).', sub='cve.vulners')
267
+ return None
268
+
269
+ @cache
270
+ @staticmethod
271
+ def lookup_cve_from_cve_circle(cve_id):
272
+ """Get CVE data from vulnerability.circl.lu.
273
+
274
+ Args:
275
+ cve_id (str): CVE id.
276
+
277
+ Returns:
278
+ dict | None: CVE data, None if no response or empty response.
279
+ """
280
+ if CONFIG.runners.skip_cve_search:
281
+ debug(f'{cve_id}: skipped remote query since config.runners.skip_cve_search is set.', sub='cve.circl')
282
+ return None
283
+ if CONFIG.offline_mode:
284
+ debug(f'{cve_id}: skipped remote query since config.offline_mode is set.', sub='cve.circl')
285
+ return None
286
+ try:
287
+ resp = requests.get(f'https://vulnerability.circl.lu/api/cve/{cve_id}', timeout=5)
288
+ resp.raise_for_status()
289
+ cve_info = resp.json()
290
+ if not cve_info:
291
+ debug(f'{cve_id}: empty response from https://vulnerability.circl.lu/api/cve/{cve_id}', sub='cve.circl')
292
+ return None
293
+ cve_path = f'{CONFIG.dirs.data}/cves/{cve_id}.json'
294
+ with open(cve_path, 'w') as f:
295
+ f.write(json.dumps(cve_info, indent=2))
296
+ debug(f'{cve_id}: downloaded to {cve_path}', sub='cve.circl')
297
+ return cve_info
298
+ except requests.RequestException as e:
299
+ debug(f'{cve_id}: failed remote query ({str(e)}).', sub='cve.circl')
300
+ return None
301
+
302
+ @cache
303
+ @staticmethod
304
+ def lookup_cve(cve_id, *cpes):
305
+ """Search for a CVE info and return vulnerability data.
306
+
307
+ Args:
308
+ cve_id (str): CVE ID in the form CVE-*
309
+ cpes (tuple[str], Optional): CPEs to match for.
310
+
311
+ Returns:
312
+ dict: vulnerability data.
313
+ """
314
+ cve_info = Vuln.lookup_local_cve(cve_id)
315
+
316
+ # Online CVE lookup
317
+ if not cve_info:
318
+ cve_info = Vuln.lookup_cve_from_cve_circle(cve_id)
319
+ if not cve_info:
320
+ return None
321
+
322
+ # Convert cve info to easy format
323
+ cve_id = cve_info['cveMetadata']['cveId']
324
+ cna = cve_info['containers']['cna']
325
+ metrics = cna.get('metrics', [])
326
+ cvss_score = 0
327
+ for metric in metrics:
328
+ for name, value in metric.items():
329
+ if 'cvss' in name:
330
+ cvss_score = metric[name]['baseScore']
331
+ description = cna.get('descriptions', [{}])[0].get('value')
332
+ cwe_id = cna.get('problemTypes', [{}])[0].get('descriptions', [{}])[0].get('cweId')
333
+ cpes_affected = []
334
+ for product in cna['affected']:
335
+ cpes_affected.extend(product.get('cpes', []))
336
+ references = [u['url'] for u in cna['references']]
337
+ cve_info = {
338
+ 'id': cve_id,
339
+ 'cwe_id': cwe_id,
340
+ 'cvss_score': cvss_score,
341
+ 'description': description,
342
+ 'cpes': cpes_affected,
343
+ 'references': references
344
+ }
345
+ if not cpes_affected:
346
+ debug(f'{cve_id}: no CPEs found in CVE data', sub='cve.circl', verbose=True)
347
+ else:
348
+ debug(f'{cve_id}: {len(cpes_affected)} CPEs found in CVE data', sub='cve.circl', verbose=True)
349
+
350
+ # Match the CPE string against the affected products CPE FS strings from the CVE data if a CPE was passed.
351
+ # This allow to limit the number of False positives (high) that we get from nmap NSE vuln scripts like vulscan
352
+ # and ensure we keep only right matches.
353
+ # The check is not executed if no CPE was passed (sometimes nmap cannot properly detect a CPE) or if the CPE
354
+ # version cannot be determined.
355
+ cpe_match = False
356
+ tags = []
357
+ if cpes and cpes_affected:
358
+ for cpe in cpes:
359
+ cpe_fs = Vuln.get_cpe_fs(cpe)
360
+ if not cpe_fs:
361
+ debug(f'{cve_id}: Failed to parse CPE {cpe} with CPE parser', sub='cve.match', verbose=True)
362
+ tags.append('cpe-invalid')
363
+ continue
364
+ for cpe_affected in cpes_affected:
365
+ cpe_affected_fs = Vuln.get_cpe_fs(cpe_affected)
366
+ if not cpe_affected_fs:
367
+ debug(f'{cve_id}: Failed to parse CPE {cpe} (from online data) with CPE parser', sub='cve.match', verbose=True)
368
+ continue
369
+ debug(f'{cve_id}: Testing {cpe_fs} against {cpe_affected_fs}', sub='cve.match', verbose=True)
370
+ cpe_match = Vuln.match_cpes(cpe_fs, cpe_affected_fs)
371
+ if cpe_match:
372
+ debug(f'{cve_id}: CPE match found for {cpe}.', sub='cve.match')
373
+ tags.append('cpe-match')
374
+ break
375
+
376
+ if not cpe_match:
377
+ debug(f'{cve_id}: no CPE match found for {cpe}.', sub='cve.match')
378
+
379
+ # Parse CVE id and CVSS
380
+ name = id = cve_info['id']
381
+ # exploit_ids = cve_info.get('refmap', {}).get('exploit-db', [])
382
+ # osvdb_ids = cve_info.get('refmap', {}).get('osvdb', [])
383
+
384
+ # Get description
385
+ description = cve_info['description']
386
+ if description is not None:
387
+ description = description.replace(id, '').strip()
388
+
389
+ # Get references
390
+ references = cve_info.get(REFERENCES, [])
391
+ cve_ref_url = f'https://vulnerability.circl.lu/cve/{id}'
392
+ references.append(cve_ref_url)
393
+
394
+ # Get CWE ID
395
+ cwe_id = cve_info['cwe_id']
396
+ if cwe_id is not None:
397
+ tags.append(cwe_id)
398
+
399
+ # Set vulnerability severity based on CVSS score
400
+ severity = None
401
+ cvss = cve_info['cvss_score']
402
+ if cvss:
403
+ severity = Vuln.cvss_to_severity(cvss)
404
+
405
+ # Set confidence
406
+ vuln = {
407
+ ID: id,
408
+ NAME: name,
409
+ PROVIDER: 'vulnerability.circl.lu',
410
+ SEVERITY: severity,
411
+ CVSS_SCORE: cvss,
412
+ TAGS: tags,
413
+ REFERENCES: [f'https://vulnerability.circl.lu/cve/{id}'] + references,
414
+ DESCRIPTION: description,
415
+ }
416
+ return vuln
417
+
418
+ @cache
419
+ @staticmethod
420
+ def lookup_cve_from_ghsa(ghsa_id):
421
+ """Search for a GHSA on Github and and return associated CVE vulnerability data.
422
+
423
+ Args:
424
+ ghsa (str): GHSA ID in the form GHSA-*
425
+
426
+ Returns:
427
+ dict: vulnerability data.
428
+ """
429
+ try:
430
+ resp = requests.get(f'https://github.com/advisories/{ghsa_id}', timeout=5)
431
+ resp.raise_for_status()
432
+ except requests.RequestException as e:
433
+ debug(f'Failed remote query for {ghsa_id} ({str(e)}).', sub='cve')
434
+ return None
435
+ soup = BeautifulSoup(resp.text, 'lxml')
436
+ sidebar_items = soup.find_all('div', {'class': 'discussion-sidebar-item'})
437
+ cve_id = sidebar_items[3].find('div').text.strip()
438
+ if not cve_id.startswith('CVE'):
439
+ debug(f'{ghsa_id}: No CVE_ID extracted from https://github.com/advisories/{ghsa_id}', sub='cve')
440
+ return None
441
+ vuln = Vuln.lookup_cve(cve_id)
442
+ if vuln:
443
+ vuln[TAGS].append('ghsa')
444
+ return vuln
445
+ return None
446
+
447
+ @staticmethod
448
+ def cvss_to_severity(cvss):
449
+ if cvss < 4:
450
+ severity = 'low'
451
+ elif cvss < 7:
452
+ severity = 'medium'
453
+ elif cvss < 9:
454
+ severity = 'high'
455
+ else:
456
+ severity = 'critical'
457
+ return severity
458
+
459
+
460
+ class VulnHttp(Vuln):
461
+ input_types = [HOST]
462
+
463
+
464
+ class VulnCode(Vuln):
465
+ input_types = [PATH]
466
+
467
+
468
+ class VulnMulti(Vuln):
469
+ input_types = [HOST]
470
+ output_types = [Vulnerability]
471
+
472
+
473
+ #--------------#
474
+ # Tag category #
475
+ #--------------#
476
+
477
+ class Tagger(Command):
478
+ input_types = [URL]
479
+ output_types = [Tag]
480
+
481
+ #----------------#
482
+ # osint category #
483
+ #----------------#
484
+
485
+
486
+ class OSInt(Command):
487
+ output_types = [UserAccount]
secator/tasks/arjun.py ADDED
@@ -0,0 +1,113 @@
1
+ import os
2
+ import shlex
3
+ import yaml
4
+
5
+ from urllib.parse import urlparse, urlunparse, urlencode, parse_qs
6
+
7
+ from secator.decorators import task
8
+ from secator.definitions import (OUTPUT_PATH, RATE_LIMIT, THREADS, DELAY, TIMEOUT, METHOD, WORDLIST,
9
+ HEADER, URL, FOLLOW_REDIRECT)
10
+ from secator.output_types import Info, Url, Warning, Tag
11
+ from secator.runners import Command
12
+ from secator.tasks._categories import OPTS
13
+ from secator.utils import process_wordlist
14
+
15
+
16
+ @task()
17
+ class arjun(Command):
18
+ """HTTP Parameter Discovery Suite."""
19
+ cmd = 'arjun'
20
+ input_types = [URL]
21
+ output_types = [Url, Tag]
22
+ tags = ['url', 'fuzz', 'params']
23
+ input_flag = '-u'
24
+ file_flag = '-i'
25
+ version_flag = ' '
26
+ opts = {
27
+ 'chunk_size': {'type': int, 'help': 'Control query/chunk size'},
28
+ 'stable': {'is_flag': True, 'default': False, 'help': 'Use stable mode'},
29
+ 'include': {'type': str, 'help': 'Include persistent data (e.g: "api_key=xxxxx" or {"api_key": "xxxx"})'},
30
+ 'passive': {'is_flag': True, 'default': False, 'help': 'Passive mode'},
31
+ 'casing': {'type': str, 'help': 'Casing style for params e.g. like_this, likeThis, LIKE_THIS, like_this'}, # noqa: E501
32
+ WORDLIST: {'type': str, 'short': 'w', 'default': 'burp-parameter-names', 'process': process_wordlist, 'help': 'Wordlist to use (default: arjun wordlist)'}, # noqa: E501
33
+ }
34
+ meta_opts = {
35
+ THREADS: OPTS[THREADS],
36
+ DELAY: OPTS[DELAY],
37
+ TIMEOUT: OPTS[TIMEOUT],
38
+ RATE_LIMIT: OPTS[RATE_LIMIT],
39
+ METHOD: OPTS[METHOD],
40
+ HEADER: OPTS[HEADER],
41
+ FOLLOW_REDIRECT: OPTS[FOLLOW_REDIRECT],
42
+ }
43
+ opt_key_map = {
44
+ THREADS: 't',
45
+ DELAY: 'd',
46
+ TIMEOUT: 'T',
47
+ RATE_LIMIT: '--rate-limit',
48
+ METHOD: 'm',
49
+ WORDLIST: 'w',
50
+ HEADER: '--headers',
51
+ 'chunk_size': 'c',
52
+ 'stable': '--stable',
53
+ 'passive': '--passive',
54
+ 'casing': '--casing',
55
+ 'follow_redirect': '--follow-redirect',
56
+ }
57
+ opt_value_map = {
58
+ HEADER: lambda headers: "\\n".join(c.strip() for c in headers.split(";;"))
59
+ }
60
+ install_version = '2.2.7'
61
+ install_cmd = 'pipx install arjun==[install_version] --force'
62
+ install_github_bin = False
63
+ github_handle = 's0md3v/Arjun'
64
+
65
+ @staticmethod
66
+ def on_line(self, line):
67
+ if 'Processing chunks' in line:
68
+ return ''
69
+ return line
70
+
71
+ @staticmethod
72
+ def on_cmd(self):
73
+ follow_redirect = self.get_opt_value(FOLLOW_REDIRECT)
74
+ self.cmd = self.cmd.replace(' --follow-redirect', '')
75
+ if not follow_redirect:
76
+ self.cmd += ' --disable-redirects'
77
+
78
+ self.output_path = self.get_opt_value(OUTPUT_PATH)
79
+ if not self.output_path:
80
+ self.output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
81
+ self.cmd += f' -oJ {shlex.quote(self.output_path)}'
82
+
83
+ @staticmethod
84
+ def on_cmd_done(self):
85
+ if not os.path.exists(self.output_path):
86
+ # yield Error(message=f'Could not find JSON results in {self.output_path}')
87
+ return
88
+ yield Info(message=f'JSON results saved to {self.output_path}')
89
+ with open(self.output_path, 'r') as f:
90
+ results = yaml.safe_load(f.read())
91
+ if not results:
92
+ yield Warning(message='No results found !')
93
+ return
94
+ for url, values in results.items():
95
+ parsed_url = urlparse(url)
96
+ yield Url(
97
+ url=url,
98
+ host=parsed_url.hostname,
99
+ request_headers=values['headers'],
100
+ method=values['method'],
101
+ )
102
+ for param in values['params']:
103
+ new_params = parse_qs(parsed_url.query).copy()
104
+ new_params[param] = 'FUZZ'
105
+ new_query = urlencode(new_params, doseq=True)
106
+ new_url = urlunparse(parsed_url._replace(query=new_query))
107
+ yield Tag(
108
+ category='info',
109
+ name='url_param',
110
+ value=param,
111
+ match=url,
112
+ extra_data={'url': new_url}
113
+ )
secator/tasks/arp.py ADDED
@@ -0,0 +1,53 @@
1
+ import re
2
+ import validators
3
+
4
+ from secator.decorators import task
5
+ from secator.output_types import Ip
6
+ from secator.runners import Command
7
+
8
+
9
+ @task()
10
+ class arp(Command):
11
+ """Display the system ARP cache."""
12
+ cmd = 'arp -a'
13
+ output_types = [Ip]
14
+ input_flag = None
15
+ default_inputs = ''
16
+ requires_sudo = True
17
+ tags = ['ip', 'recon']
18
+ opts = {}
19
+ install_pre = {
20
+ '*': ['net-tools'],
21
+ }
22
+
23
+ @staticmethod
24
+ def item_loader(self, line):
25
+ # Parse ARP output format:
26
+ # ? (172.18.0.4) at 02:42:ac:12:00:04 [ether] on br-781c859806d7
27
+ # _gateway (192.168.59.254) at 00:50:56:f5:67:e7 [ether] on ens33
28
+
29
+ # Use regex to extract components
30
+ # Pattern: <name> (<ip>) at <mac> [<physical>] on <interface>
31
+ pattern = r'^(.+?)\s+\(([0-9.]+)\)\s+at\s+([0-9a-f:]+)\s+\[(\w+)\]\s+on\s+(\S+)$'
32
+ match = re.match(pattern, line.strip())
33
+
34
+ if match:
35
+ name, ip, mac, physical, interface = match.groups()
36
+
37
+ # Validate IP address
38
+ if not (validators.ipv4(ip) or validators.ipv6(ip)):
39
+ return
40
+
41
+ # Set host to the name if it's not just '?'
42
+ host = name.strip() if name.strip() != '?' else ''
43
+
44
+ yield Ip(
45
+ ip=ip,
46
+ host=host,
47
+ alive=True,
48
+ extra_data={
49
+ 'mac': mac,
50
+ 'physical': physical,
51
+ 'interface': interface,
52
+ }
53
+ )