secator 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (90) hide show
  1. secator/celery.py +160 -185
  2. secator/celery_utils.py +268 -0
  3. secator/cli.py +427 -176
  4. secator/config.py +114 -68
  5. secator/configs/workflows/host_recon.yaml +5 -3
  6. secator/configs/workflows/port_scan.yaml +7 -3
  7. secator/configs/workflows/subdomain_recon.yaml +2 -2
  8. secator/configs/workflows/url_bypass.yaml +10 -0
  9. secator/configs/workflows/url_dirsearch.yaml +1 -1
  10. secator/configs/workflows/url_vuln.yaml +1 -1
  11. secator/decorators.py +170 -92
  12. secator/definitions.py +11 -4
  13. secator/exporters/__init__.py +7 -5
  14. secator/exporters/console.py +10 -0
  15. secator/exporters/csv.py +27 -19
  16. secator/exporters/gdrive.py +16 -11
  17. secator/exporters/json.py +3 -1
  18. secator/exporters/table.py +30 -2
  19. secator/exporters/txt.py +20 -16
  20. secator/hooks/gcs.py +53 -0
  21. secator/hooks/mongodb.py +53 -27
  22. secator/installer.py +277 -60
  23. secator/output_types/__init__.py +29 -11
  24. secator/output_types/_base.py +11 -1
  25. secator/output_types/error.py +36 -0
  26. secator/output_types/exploit.py +12 -8
  27. secator/output_types/info.py +24 -0
  28. secator/output_types/ip.py +8 -1
  29. secator/output_types/port.py +9 -2
  30. secator/output_types/progress.py +5 -0
  31. secator/output_types/record.py +5 -3
  32. secator/output_types/stat.py +33 -0
  33. secator/output_types/subdomain.py +1 -1
  34. secator/output_types/tag.py +8 -6
  35. secator/output_types/target.py +2 -2
  36. secator/output_types/url.py +14 -11
  37. secator/output_types/user_account.py +6 -6
  38. secator/output_types/vulnerability.py +8 -6
  39. secator/output_types/warning.py +24 -0
  40. secator/report.py +56 -23
  41. secator/rich.py +44 -39
  42. secator/runners/_base.py +629 -638
  43. secator/runners/_helpers.py +5 -91
  44. secator/runners/celery.py +18 -0
  45. secator/runners/command.py +404 -214
  46. secator/runners/scan.py +8 -24
  47. secator/runners/task.py +21 -55
  48. secator/runners/workflow.py +41 -40
  49. secator/scans/__init__.py +28 -0
  50. secator/serializers/dataclass.py +6 -0
  51. secator/serializers/json.py +10 -5
  52. secator/serializers/regex.py +12 -4
  53. secator/tasks/_categories.py +147 -42
  54. secator/tasks/bbot.py +295 -0
  55. secator/tasks/bup.py +99 -0
  56. secator/tasks/cariddi.py +38 -49
  57. secator/tasks/dalfox.py +3 -0
  58. secator/tasks/dirsearch.py +14 -25
  59. secator/tasks/dnsx.py +49 -30
  60. secator/tasks/dnsxbrute.py +4 -1
  61. secator/tasks/feroxbuster.py +10 -20
  62. secator/tasks/ffuf.py +3 -2
  63. secator/tasks/fping.py +4 -4
  64. secator/tasks/gau.py +5 -0
  65. secator/tasks/gf.py +2 -2
  66. secator/tasks/gospider.py +4 -0
  67. secator/tasks/grype.py +11 -13
  68. secator/tasks/h8mail.py +32 -42
  69. secator/tasks/httpx.py +58 -21
  70. secator/tasks/katana.py +19 -23
  71. secator/tasks/maigret.py +27 -25
  72. secator/tasks/mapcidr.py +2 -3
  73. secator/tasks/msfconsole.py +22 -19
  74. secator/tasks/naabu.py +18 -2
  75. secator/tasks/nmap.py +82 -55
  76. secator/tasks/nuclei.py +13 -3
  77. secator/tasks/searchsploit.py +26 -11
  78. secator/tasks/subfinder.py +5 -1
  79. secator/tasks/wpscan.py +91 -94
  80. secator/template.py +61 -45
  81. secator/thread.py +24 -0
  82. secator/utils.py +417 -78
  83. secator/utils_test.py +48 -23
  84. secator/workflows/__init__.py +28 -0
  85. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/METADATA +59 -48
  86. secator-0.8.0.dist-info/RECORD +115 -0
  87. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/WHEEL +1 -1
  88. secator-0.6.0.dist-info/RECORD +0 -101
  89. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/entry_points.txt +0 -0
  90. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,19 +1,22 @@
1
1
  import json
2
2
  import os
3
+ import re
4
+
5
+ from functools import cache
3
6
 
4
7
  import requests
5
8
  from bs4 import BeautifulSoup
6
9
  from cpe import CPE
7
10
 
8
11
  from secator.definitions import (CIDR_RANGE, CVSS_SCORE, DELAY, DEPTH, DESCRIPTION, FILTER_CODES,
9
- FILTER_REGEX, FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID,
12
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID, IP,
10
13
  MATCH_CODES, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD, NAME, PATH, PROVIDER, PROXY,
11
14
  RATE_LIMIT, REFERENCES, RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, URL, USER_AGENT,
12
15
  USERNAME, WORDLIST)
13
16
  from secator.output_types import Ip, Port, Subdomain, Tag, Url, UserAccount, Vulnerability
14
17
  from secator.config import CONFIG
15
18
  from secator.runners import Command
16
- from secator.utils import debug
19
+ from secator.utils import debug, process_wordlist
17
20
 
18
21
 
19
22
  OPTS = {
@@ -36,7 +39,7 @@ OPTS = {
36
39
  THREADS: {'type': int, 'help': 'Number of threads to run', 'default': 50},
37
40
  TIMEOUT: {'type': int, 'help': 'Request timeout'},
38
41
  USER_AGENT: {'type': str, 'short': 'ua', 'help': 'User agent, e.g "Mozilla Firefox 1.0"'},
39
- WORDLIST: {'type': str, 'short': 'w', 'default': CONFIG.wordlists.defaults.http, 'help': 'Wordlist to use'}
42
+ WORDLIST: {'type': str, 'short': 'w', 'default': 'http', 'process': process_wordlist, 'help': 'Wordlist to use'}
40
43
  }
41
44
 
42
45
  OPTS_HTTP = [
@@ -106,7 +109,7 @@ class ReconIp(Recon):
106
109
 
107
110
 
108
111
  class ReconPort(Recon):
109
- input_type = HOST
112
+ input_type = IP
110
113
  output_types = [Port]
111
114
 
112
115
 
@@ -124,6 +127,7 @@ class Vuln(Command):
124
127
  if os.path.exists(cve_path):
125
128
  with open(cve_path, 'r') as f:
126
129
  return json.load(f)
130
+ debug(f'CVE {cve_id} not found in cache', sub='cve')
127
131
  return None
128
132
 
129
133
  # @staticmethod
@@ -179,12 +183,98 @@ class Vuln(Command):
179
183
  return tup1 == tup2
180
184
 
181
185
  @staticmethod
182
- def lookup_cve(cve_id, cpes=[]):
183
- """Search for a CVE in local db or using cve.circl.lu and return vulnerability data.
186
+ def get_cpe_fs(cpe):
187
+ """"Return formatted string for given CPE.
188
+
189
+ Args:
190
+ cpe (string): Input CPE
191
+
192
+ Returns:
193
+ string: CPE formatted string.
194
+ """
195
+ try:
196
+ return CPE(cpe).as_fs()
197
+ except NotImplementedError:
198
+ return None
199
+
200
+ @cache
201
+ @staticmethod
202
+ def lookup_cve_from_vulners_exploit(exploit_id, *cpes):
203
+ """Search for a CVE corresponding to an exploit by extracting the CVE id from the exploit HTML page.
204
+
205
+ Args:
206
+ exploit_id (str): Exploit ID.
207
+ cpes (tuple[str], Optional): CPEs to match for.
208
+
209
+ Returns:
210
+ dict: vulnerability data.
211
+ """
212
+ if CONFIG.runners.skip_exploit_search:
213
+ debug(f'Skip remote query for {exploit_id} since config.runners.skip_exploit_search is set.', sub='cve')
214
+ return None
215
+ if CONFIG.offline_mode:
216
+ debug(f'Skip remote query for {exploit_id} since config.offline_mode is set.', sub='cve')
217
+ return None
218
+ try:
219
+ resp = requests.get(f'https://vulners.com/githubexploit/{exploit_id}', timeout=5)
220
+ resp.raise_for_status()
221
+ soup = BeautifulSoup(resp.text, 'lxml')
222
+ title = soup.title.get_text(strip=True)
223
+ h1 = [h1.get_text(strip=True) for h1 in soup.find_all('h1')]
224
+ if '404' in h1:
225
+ raise requests.RequestException("404 [not found or rate limited]")
226
+ code = [code.get_text(strip=True) for code in soup.find_all('code')]
227
+ elems = [title] + h1 + code
228
+ content = '\n'.join(elems)
229
+ cve_regex = re.compile(r'(CVE(?:-|_)\d{4}(?:-|_)\d{4,7})', re.IGNORECASE)
230
+ matches = cve_regex.findall(str(content))
231
+ if not matches:
232
+ debug(f'{exploit_id}: No CVE found in https://vulners.com/githubexploit/{exploit_id}.', sub='cve')
233
+ return None
234
+ cve_id = matches[0].replace('_', '-').upper()
235
+ cve_data = Vuln.lookup_cve(cve_id, *cpes)
236
+ if cve_data:
237
+ return cve_data
238
+
239
+ except requests.RequestException as e:
240
+ debug(f'Failed remote query for {exploit_id} ({str(e)}).', sub='cve')
241
+ return None
242
+
243
+ @cache
244
+ @staticmethod
245
+ def lookup_cve_from_cve_circle(cve_id):
246
+ """Get CVE data from vulnerability.circl.lu.
247
+
248
+ Args:
249
+ cve_id (str): CVE id.
250
+
251
+ Returns:
252
+ dict | None: CVE data, None if no response or empty response.
253
+ """
254
+ try:
255
+ resp = requests.get(f'https://vulnerability.circl.lu/api/cve/{cve_id}', timeout=5)
256
+ resp.raise_for_status()
257
+ cve_info = resp.json()
258
+ if not cve_info:
259
+ debug(f'Empty response from https://vulnerability.circl.lu/api/cve/{cve_id}', sub='cve')
260
+ return None
261
+ cve_path = f'{CONFIG.dirs.data}/cves/{cve_id}.json'
262
+ with open(cve_path, 'w') as f:
263
+ f.write(json.dumps(cve_info, indent=2))
264
+ debug(f'Downloaded {cve_id} to {cve_path}', sub='cve')
265
+ return cve_info
266
+ except requests.RequestException as e:
267
+ debug(f'Failed remote query for {cve_id} ({str(e)}).', sub='cve')
268
+ return None
269
+
270
+ @cache
271
+ @staticmethod
272
+ def lookup_cve(cve_id, *cpes):
273
+ """Search for a CVE info and return vulnerability data.
184
274
 
185
275
  Args:
186
276
  cve_id (str): CVE ID in the form CVE-*
187
- cpes (str, Optional): CPEs to match for.
277
+ cpes (tuple[str], Optional): CPEs to match for.
188
278
 
189
279
  Returns:
190
280
  dict: vulnerability data.
@@ -199,73 +289,87 @@ class Vuln(Command):
199
289
  if CONFIG.offline_mode:
200
290
  debug(f'Skip remote query for {cve_id} since config.offline_mode is set.', sub='cve')
201
291
  return None
202
- try:
203
- resp = requests.get(f'https://cve.circl.lu/api/cve/{cve_id}', timeout=5)
204
- resp.raise_for_status()
205
- cve_info = resp.json()
206
- except requests.RequestException as e:
207
- debug(f'Failed remote query for {cve_id} ({str(e)}).', sub='cve')
292
+ cve_info = Vuln.lookup_cve_from_cve_circle(cve_id)
293
+ if not cve_info:
208
294
  return None
209
295
 
296
+ # Convert cve info to easy format
297
+ cve_id = cve_info['cveMetadata']['cveId']
298
+ cna = cve_info['containers']['cna']
299
+ metrics = cna.get('metrics', [])
300
+ cvss_score = 0
301
+ for metric in metrics:
302
+ for name, value in metric.items():
303
+ if 'cvss' in name:
304
+ cvss_score = metric[name]['baseScore']
305
+ description = cna.get('descriptions', [{}])[0].get('value')
306
+ cwe_id = cna.get('problemTypes', [{}])[0].get('descriptions', [{}])[0].get('cweId')
307
+ cpes_affected = []
308
+ for product in cna['affected']:
309
+ cpes_affected.extend(product.get('cpes', []))
310
+ references = [u['url'] for u in cna['references']]
311
+ cve_info = {
312
+ 'id': cve_id,
313
+ 'cwe_id': cwe_id,
314
+ 'cvss_score': cvss_score,
315
+ 'description': description,
316
+ 'cpes': cpes_affected,
317
+ 'references': references
318
+ }
319
+
210
320
  # Match the CPE string against the affected products CPE FS strings from the CVE data if a CPE was passed.
211
321
  # This allow to limit the number of False positives (high) that we get from nmap NSE vuln scripts like vulscan
212
322
  # and ensure we keep only right matches.
213
323
  # The check is not executed if no CPE was passed (sometimes nmap cannot properly detect a CPE) or if the CPE
214
324
  # version cannot be determined.
215
325
  cpe_match = False
216
- tags = []
326
+ tags = [cve_id]
217
327
  if cpes:
218
328
  for cpe in cpes:
219
- cpe_obj = CPE(cpe)
220
- cpe_fs = cpe_obj.as_fs()
329
+ cpe_fs = Vuln.get_cpe_fs(cpe)
330
+ if not cpe_fs:
331
+ debug(f'{cve_id}: Failed to parse CPE {cpe} with CPE parser', sub='cve.match', verbose=True)
332
+ tags.append('cpe-invalid')
333
+ continue
221
334
  # cpe_version = cpe_obj.get_version()[0]
222
- vulnerable_fs = cve_info['vulnerable_product']
223
- for fs in vulnerable_fs:
224
- # debug(f'{cve_id}: Testing {cpe_fs} against {fs}', sub='cve') # for hardcore debugging
225
- if Vuln.match_cpes(cpe_fs, fs):
335
+ for cpe_affected in cpes_affected:
336
+ cpe_affected_fs = Vuln.get_cpe_fs(cpe_affected)
337
+ if not cpe_affected_fs:
338
+ debug(f'{cve_id}: Failed to parse CPE {cpe} (from online data) with CPE parser', sub='cve.match', verbose=True)
339
+ continue
340
+ debug(f'{cve_id}: Testing {cpe_fs} against {cpe_affected_fs}', sub='cve.match', verbose=True)
341
+ cpe_match = Vuln.match_cpes(cpe_fs, cpe_affected_fs)
342
+ if cpe_match:
226
343
  debug(f'{cve_id}: CPE match found for {cpe}.', sub='cve')
227
- cpe_match = True
228
344
  tags.append('cpe-match')
229
345
  break
346
+
230
347
  if not cpe_match:
231
348
  debug(f'{cve_id}: no CPE match found for {cpe}.', sub='cve')
232
349
 
233
350
  # Parse CVE id and CVSS
234
351
  name = id = cve_info['id']
235
- cvss = cve_info.get('cvss') or 0
236
352
  # exploit_ids = cve_info.get('refmap', {}).get('exploit-db', [])
237
353
  # osvdb_ids = cve_info.get('refmap', {}).get('osvdb', [])
238
354
 
239
355
  # Get description
240
- description = cve_info.get('summary')
356
+ description = cve_info['description']
241
357
  if description is not None:
242
358
  description = description.replace(id, '').strip()
243
359
 
244
360
  # Get references
245
361
  references = cve_info.get(REFERENCES, [])
246
- cve_ref_url = f'https://cve.circl.lu/cve/{id}'
362
+ cve_ref_url = f'https://vulnerability.circl.lu/cve/{id}'
247
363
  references.append(cve_ref_url)
248
364
 
249
365
  # Get CWE ID
250
- vuln_cwe_id = cve_info.get('cwe')
251
- if vuln_cwe_id is None:
252
- tags.append(vuln_cwe_id)
253
-
254
- # Parse capecs for a better vuln name / type
255
- capecs = cve_info.get('capec', [])
256
- if capecs and len(capecs) > 0:
257
- name = capecs[0]['name']
258
-
259
- # Parse ovals for a better vuln name / type
260
- ovals = cve_info.get('oval', [])
261
- if ovals:
262
- if description == 'none':
263
- description = ovals[0]['title']
264
- family = ovals[0]['family']
265
- tags.append(family)
366
+ cwe_id = cve_info['cwe_id']
367
+ if cwe_id is not None:
368
+ tags.append(cwe_id)
266
369
 
267
370
  # Set vulnerability severity based on CVSS score
268
371
  severity = None
372
+ cvss = cve_info['cvss_score']
269
373
  if cvss:
270
374
  severity = Vuln.cvss_to_severity(cvss)
271
375
 
@@ -273,15 +377,16 @@ class Vuln(Command):
273
377
  vuln = {
274
378
  ID: id,
275
379
  NAME: name,
276
- PROVIDER: 'cve.circl.lu',
380
+ PROVIDER: 'vulnerability.circl.lu',
277
381
  SEVERITY: severity,
278
382
  CVSS_SCORE: cvss,
279
383
  TAGS: tags,
280
- REFERENCES: [f'https://cve.circl.lu/cve/{id}'] + references,
384
+ REFERENCES: [f'https://vulnerability.circl.lu/cve/{id}'] + references,
281
385
  DESCRIPTION: description,
282
386
  }
283
387
  return vuln
284
388
 
389
+ @cache
285
390
  @staticmethod
286
391
  def lookup_ghsa(ghsa_id):
287
392
  """Search for a GHSA on Github and and return associated CVE vulnerability data.
secator/tasks/bbot.py ADDED
@@ -0,0 +1,295 @@
1
+ import shutil
2
+
3
+ from secator.decorators import task
4
+ from secator.runners import Command
5
+ from secator.serializers import RegexSerializer
6
+ from secator.output_types import Vulnerability, Port, Url, Record, Ip, Tag, Error
7
+ from secator.serializers import JSONSerializer
8
+
9
+
10
+ BBOT_MODULES = [
11
+ "affiliates",
12
+ # "ajaxpro",
13
+ "anubisdb",
14
+ "asn",
15
+ "azure_realm",
16
+ "azure_tenant",
17
+ "badsecrets",
18
+ "bevigil",
19
+ "binaryedge",
20
+ # "bucket_aws",
21
+ "bucket_azure",
22
+ "bucket_digitalocean",
23
+ # "bucket_file_enum",
24
+ "bucket_firebase",
25
+ "bucket_google",
26
+ "builtwith",
27
+ "bypass403",
28
+ "c99",
29
+ "censys",
30
+ "certspotter",
31
+ # "chaos",
32
+ "columbus",
33
+ # "credshed",
34
+ # "crobat",
35
+ "crt",
36
+ # "dastardly",
37
+ # "dehashed",
38
+ "digitorus",
39
+ "dnscommonsrv",
40
+ "dnsdumpster",
41
+ # "dnszonetransfer",
42
+ "emailformat",
43
+ "ffuf",
44
+ "ffuf_shortnames",
45
+ # "filedownload",
46
+ "fingerprintx",
47
+ "fullhunt",
48
+ "generic_ssrf",
49
+ "git",
50
+ "telerik",
51
+ # "github_codesearch",
52
+ "github_org",
53
+ "gowitness",
54
+ "hackertarget",
55
+ "host_header",
56
+ "httpx",
57
+ "hunt",
58
+ "hunterio",
59
+ "iis_shortnames",
60
+ # "internetdb",
61
+ # "ip2location",
62
+ "ipneighbor",
63
+ "ipstack",
64
+ "leakix",
65
+ # "masscan",
66
+ # "massdns",
67
+ "myssl",
68
+ # "newsletters",
69
+ # "nmap",
70
+ # "nsec",
71
+ "ntlm",
72
+ "nuclei",
73
+ "oauth",
74
+ "otx",
75
+ "paramminer_cookies",
76
+ "paramminer_getparams",
77
+ "paramminer_headers",
78
+ "passivetotal",
79
+ "pgp",
80
+ # "postman",
81
+ "rapiddns",
82
+ # "riddler",
83
+ "robots",
84
+ "secretsdb",
85
+ "securitytrails",
86
+ "shodan_dns",
87
+ "sitedossier",
88
+ "skymem",
89
+ "smuggler",
90
+ "social",
91
+ "sslcert",
92
+ # "subdomain_hijack",
93
+ "subdomaincenter",
94
+ # "sublist3r",
95
+ "telerik",
96
+ # "threatminer",
97
+ "url_manipulation",
98
+ "urlscan",
99
+ "vhost",
100
+ "viewdns",
101
+ "virustotal",
102
+ # "wafw00f",
103
+ "wappalyzer",
104
+ "wayback",
105
+ "zoomeye"
106
+ ]
107
+ BBOT_PRESETS = [
108
+ 'cloud-enum',
109
+ 'code-enum',
110
+ 'dirbust-heavy',
111
+ 'dirbust-light',
112
+ 'dotnet-audit',
113
+ 'email-enum',
114
+ 'iis-shortnames',
115
+ 'kitchen-sink',
116
+ 'paramminer',
117
+ 'spider',
118
+ 'subdomain-enum',
119
+ 'web-basic',
120
+ 'web-screenshots',
121
+ 'web-thorough'
122
+ ]
123
+ BBOT_MODULES_STR = ' '.join(BBOT_MODULES)
124
+ BBOT_MAP_TYPES = {
125
+ 'IP_ADDRESS': Ip,
126
+ 'PROTOCOL': Port,
127
+ 'OPEN_TCP_PORT': Port,
128
+ 'URL': Url,
129
+ 'TECHNOLOGY': Tag,
130
+ 'ASN': Record,
131
+ 'DNS_NAME': Record,
132
+ 'WEBSCREENSHOT': Url,
133
+ 'VULNERABILITY': Vulnerability,
134
+ 'FINDING': Tag
135
+ }
136
+ BBOT_DESCRIPTION_REGEX = RegexSerializer(
137
+ regex=r'(?P<name>[\w ]+): \[(?P<value>[^\[\]]+)\]',
138
+ findall=True
139
+ )
140
+
141
+
142
+ def output_discriminator(self, item):
143
+ _type = item.get('type')
144
+ _message = item.get('message')
145
+ if not _type and _message:
146
+ return Error
147
+ elif _type not in BBOT_MAP_TYPES:
148
+ return None
149
+ return BBOT_MAP_TYPES[_type]
150
+
151
+
152
+ @task()
153
+ class bbot(Command):
154
+ """Multipurpose scanner."""
155
+ cmd = 'bbot -y --allow-deadly --force'
156
+ json_flag = '--json'
157
+ input_flag = '-t'
158
+ file_flag = None
159
+ version_flag = '--help'
160
+ opts = {
161
+ 'modules': {'type': str, 'short': 'm', 'default': '', 'help': ','.join(BBOT_MODULES)},
162
+ 'presets': {'type': str, 'short': 'ps', 'default': 'kitchen-sink', 'help': ','.join(BBOT_PRESETS), 'shlex': False},
163
+ }
164
+ opt_key_map = {
165
+ 'modules': 'm',
166
+ 'presets': 'p'
167
+ }
168
+ opt_value_map = {
169
+ 'presets': lambda x: ' '.join(x.split(','))
170
+ }
171
+ item_loaders = [JSONSerializer()]
172
+ output_types = [Vulnerability, Port, Url, Record, Ip]
173
+ output_discriminator = output_discriminator
174
+ output_map = {
175
+ Ip: {
176
+ 'ip': lambda x: x['data'],
177
+ 'host': lambda x: x['data'],
178
+ 'alive': lambda x: True,
179
+ '_source': lambda x: 'bbot-' + x['module']
180
+ },
181
+ Tag: {
182
+ 'name': 'name',
183
+ 'match': lambda x: x['data'].get('url') or x['data'].get('host'),
184
+ 'extra_data': 'extra_data',
185
+ '_source': lambda x: 'bbot-' + x['module']
186
+ },
187
+ Url: {
188
+ 'url': lambda x: x['data'].get('url') if isinstance(x['data'], dict) else x['data'],
189
+ 'host': lambda x: x['resolved_hosts'][0] if 'resolved_hosts' in x else '',
190
+ 'status_code': lambda x: bbot.extract_status_code(x),
191
+ 'title': lambda x: bbot.extract_title(x),
192
+ 'screenshot_path': lambda x: x['data']['path'] if isinstance(x['data'], dict) else '',
193
+ '_source': lambda x: 'bbot-' + x['module']
194
+ },
195
+ Port: {
196
+ 'port': lambda x: int(x['data']['port']) if 'port' in x['data'] else x['data'].split(':')[-1],
197
+ 'ip': lambda x: [_ for _ in x['resolved_hosts'] if not _.startswith('::')][0],
198
+ 'state': lambda x: 'OPEN',
199
+ 'service_name': lambda x: x['data']['protocol'] if 'protocol' in x['data'] else '',
200
+ 'cpes': lambda x: [],
201
+ 'host': lambda x: x['data']['host'] if isinstance(x['data'], dict) else x['data'].split(':')[0],
202
+ 'extra_data': 'extra_data',
203
+ '_source': lambda x: 'bbot-' + x['module']
204
+ },
205
+ Vulnerability: {
206
+ 'name': 'name',
207
+ 'match': lambda x: x['data'].get('url') or x['data']['host'],
208
+ 'extra_data': 'extra_data',
209
+ 'severity': lambda x: x['data']['severity'].lower()
210
+ },
211
+ Record: {
212
+ 'name': 'name',
213
+ 'type': 'type',
214
+ 'extra_data': 'extra_data'
215
+ },
216
+ Error: {
217
+ 'message': 'message'
218
+ }
219
+ }
220
+ install_cmd = 'pipx install bbot && pipx upgrade bbot'
221
+
222
+ @staticmethod
223
+ def on_json_loaded(self, item):
224
+ _type = item.get('type')
225
+
226
+ if not _type:
227
+ yield item
228
+ return
229
+
230
+ if _type not in BBOT_MAP_TYPES:
231
+ self._print(f'[bold orange3]Found unsupported bbot type: {_type}.[/] [bold green]Skipping.[/]')
232
+ return
233
+
234
+ if isinstance(item['data'], str):
235
+ item['name'] = item['data']
236
+ yield item
237
+ return
238
+
239
+ item['extra_data'] = item['data']
240
+
241
+ # Parse bbot description into extra_data
242
+ description = item['data'].get('description')
243
+ if description:
244
+ del item['data']['description']
245
+ match = BBOT_DESCRIPTION_REGEX.run(description)
246
+ for chunk in match:
247
+ key, val = tuple([c.strip() for c in chunk])
248
+ if ',' in val:
249
+ val = val.split(',')
250
+ key = '_'.join(key.split(' ')).lower()
251
+ item['extra_data'][key] = val
252
+
253
+ # Set technology as name for Tag
254
+ if item['type'] == 'TECHNOLOGY':
255
+ item['name'] = item['data']['technology']
256
+ del item['data']['technology']
257
+
258
+ # If 'name' key is present in 'data', set it as name
259
+ elif 'name' in item['data'].keys():
260
+ item['name'] = item['data']['name']
261
+ del item['data']['name']
262
+
263
+ # If 'name' key is present in 'extra_data', set it as name
264
+ elif 'extra_data' in item and 'name' in item['extra_data'].keys():
265
+ item['name'] = item['extra_data']['name']
266
+ del item['extra_data']['name']
267
+
268
+ # If 'discovery_context' and no name set yet, set it as name
269
+ else:
270
+ item['name'] = item['discovery_context']
271
+
272
+ # If a screenshot was saved, move it to secator output folder
273
+ if item['type'] == 'WEBSCREENSHOT':
274
+ path = item['data']['path']
275
+ name = path.split('/')[-1]
276
+ secator_path = f'{self.reports_folder}/.outputs/{name}'
277
+ shutil.copy(path, secator_path)
278
+ item['data']['path'] = secator_path
279
+
280
+ yield item
281
+
282
+ @staticmethod
283
+ def extract_title(item):
284
+ for tag in item['tags']:
285
+ if 'http-title' in tag:
286
+ title = ' '.join(tag.split('-')[2:])
287
+ return title
288
+ return ''
289
+
290
+ @staticmethod
291
+ def extract_status_code(item):
292
+ for tag in item['tags']:
293
+ if 'status-' in tag:
294
+ return int([tag.split('-')[-1]][0])
295
+ return 0
secator/tasks/bup.py ADDED
@@ -0,0 +1,99 @@
1
+ import json
2
+ import re
3
+
4
+ from secator.decorators import task
5
+ from secator.output_types import Url, Progress
6
+ from secator.definitions import (
7
+ HEADER, DELAY, FOLLOW_REDIRECT, METHOD, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT,
8
+ DEPTH, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, FILTER_REGEX, FILTER_CODES, FILTER_SIZE, FILTER_WORDS,
9
+ MATCH_CODES, OPT_NOT_SUPPORTED, URL
10
+ )
11
+ from secator.serializers import JSONSerializer
12
+ from secator.tasks._categories import Http
13
+
14
+
15
+ @task()
16
+ class bup(Http):
17
+ """40X bypasser."""
18
+ cmd = 'bup'
19
+ input_flag = '-u'
20
+ input_type = URL
21
+ json_flag = '--jsonl'
22
+ opt_prefix = '--'
23
+ opts = {
24
+ 'spoofport': {'type': int, 'short': 'sp', 'help': 'Port(s) to inject in port-specific headers'},
25
+ 'spoofip': {'type': str, 'short': 'si', 'help': 'IP(s) to inject in ip-specific headers'},
26
+ 'mode': {'type': str, 'help': 'Bypass modes.'},
27
+ }
28
+ opt_key_map = {
29
+ HEADER: 'header',
30
+ DELAY: OPT_NOT_SUPPORTED,
31
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
32
+ METHOD: OPT_NOT_SUPPORTED,
33
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
34
+ RETRIES: 'retry',
35
+ THREADS: 'threads',
36
+ TIMEOUT: 'timeout',
37
+ USER_AGENT: OPT_NOT_SUPPORTED,
38
+ DEPTH: OPT_NOT_SUPPORTED,
39
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
40
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
41
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
42
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
43
+ FILTER_CODES: OPT_NOT_SUPPORTED,
44
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
45
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
46
+ MATCH_CODES: OPT_NOT_SUPPORTED,
47
+ PROXY: 'proxy',
48
+ }
49
+ item_loaders = [JSONSerializer()]
50
+ output_types = [Url, Progress]
51
+ output_map = {
52
+ Url: {
53
+ 'url': 'request_url',
54
+ 'method': lambda x: bup.method_extractor(x),
55
+ 'headers': lambda x: bup.headers_extractor(x),
56
+ 'status_code': 'response_status_code',
57
+ 'content_type': 'response_content_type',
58
+ 'content_length': 'response_content_length',
59
+ 'title': 'response_title',
60
+ 'server': 'response_server_type',
61
+ 'lines': 'response_lines_count',
62
+ 'words': 'response_words_count',
63
+ 'stored_response_path': 'response_html_filename',
64
+ }
65
+ }
66
+ install_cmd = 'pipx install bypass-url-parser && pipx upgrade bypass-url-parser'
67
+
68
+ @staticmethod
69
+ def on_init(self):
70
+ self.cmd += f' -o {self.reports_folder}/.outputs/response'
71
+
72
+ @staticmethod
73
+ def on_line(self, line):
74
+ if 'Doing' in line:
75
+ progress_indicator = line.split(':')[-1]
76
+ current, total = tuple([int(c.strip()) for c in progress_indicator.split('/')])
77
+ return json.dumps({"duration": "unknown", "percent": int((current / total) * 100)})
78
+ elif 'batcat' in line: # ignore batcat lines as they're loaded as JSON
79
+ return None
80
+ return line
81
+
82
+ @staticmethod
83
+ def method_extractor(item):
84
+ payload = item['request_curl_payload']
85
+ match = re.match(r'-X\s+(\w+)', payload)
86
+ if match:
87
+ return match.group(1)
88
+ return 'GET'
89
+
90
+ @staticmethod
91
+ def headers_extractor(item):
92
+ headers_list = item['response_headers'].split('\n')[1:]
93
+ headers = {}
94
+ for header in headers_list:
95
+ split_headers = header.split(':')
96
+ key = split_headers[0]
97
+ value = ':'.join(split_headers[1:])
98
+ headers[key] = value
99
+ return headers