secator 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +453 -0
  4. secator/celery_signals.py +138 -0
  5. secator/celery_utils.py +320 -0
  6. secator/cli.py +2035 -0
  7. secator/cli_helper.py +395 -0
  8. secator/click.py +87 -0
  9. secator/config.py +670 -0
  10. secator/configs/__init__.py +0 -0
  11. secator/configs/profiles/__init__.py +0 -0
  12. secator/configs/profiles/aggressive.yaml +8 -0
  13. secator/configs/profiles/all_ports.yaml +7 -0
  14. secator/configs/profiles/full.yaml +31 -0
  15. secator/configs/profiles/http_headless.yaml +7 -0
  16. secator/configs/profiles/http_record.yaml +8 -0
  17. secator/configs/profiles/insane.yaml +8 -0
  18. secator/configs/profiles/paranoid.yaml +8 -0
  19. secator/configs/profiles/passive.yaml +11 -0
  20. secator/configs/profiles/polite.yaml +8 -0
  21. secator/configs/profiles/sneaky.yaml +8 -0
  22. secator/configs/profiles/tor.yaml +5 -0
  23. secator/configs/scans/__init__.py +0 -0
  24. secator/configs/scans/domain.yaml +31 -0
  25. secator/configs/scans/host.yaml +23 -0
  26. secator/configs/scans/network.yaml +30 -0
  27. secator/configs/scans/subdomain.yaml +27 -0
  28. secator/configs/scans/url.yaml +19 -0
  29. secator/configs/workflows/__init__.py +0 -0
  30. secator/configs/workflows/cidr_recon.yaml +48 -0
  31. secator/configs/workflows/code_scan.yaml +29 -0
  32. secator/configs/workflows/domain_recon.yaml +46 -0
  33. secator/configs/workflows/host_recon.yaml +95 -0
  34. secator/configs/workflows/subdomain_recon.yaml +120 -0
  35. secator/configs/workflows/url_bypass.yaml +15 -0
  36. secator/configs/workflows/url_crawl.yaml +98 -0
  37. secator/configs/workflows/url_dirsearch.yaml +62 -0
  38. secator/configs/workflows/url_fuzz.yaml +68 -0
  39. secator/configs/workflows/url_params_fuzz.yaml +66 -0
  40. secator/configs/workflows/url_secrets_hunt.yaml +23 -0
  41. secator/configs/workflows/url_vuln.yaml +91 -0
  42. secator/configs/workflows/user_hunt.yaml +29 -0
  43. secator/configs/workflows/wordpress.yaml +38 -0
  44. secator/cve.py +718 -0
  45. secator/decorators.py +7 -0
  46. secator/definitions.py +168 -0
  47. secator/exporters/__init__.py +14 -0
  48. secator/exporters/_base.py +3 -0
  49. secator/exporters/console.py +10 -0
  50. secator/exporters/csv.py +37 -0
  51. secator/exporters/gdrive.py +123 -0
  52. secator/exporters/json.py +16 -0
  53. secator/exporters/table.py +36 -0
  54. secator/exporters/txt.py +28 -0
  55. secator/hooks/__init__.py +0 -0
  56. secator/hooks/gcs.py +80 -0
  57. secator/hooks/mongodb.py +281 -0
  58. secator/installer.py +694 -0
  59. secator/loader.py +128 -0
  60. secator/output_types/__init__.py +49 -0
  61. secator/output_types/_base.py +108 -0
  62. secator/output_types/certificate.py +78 -0
  63. secator/output_types/domain.py +50 -0
  64. secator/output_types/error.py +42 -0
  65. secator/output_types/exploit.py +58 -0
  66. secator/output_types/info.py +24 -0
  67. secator/output_types/ip.py +47 -0
  68. secator/output_types/port.py +55 -0
  69. secator/output_types/progress.py +36 -0
  70. secator/output_types/record.py +36 -0
  71. secator/output_types/stat.py +41 -0
  72. secator/output_types/state.py +29 -0
  73. secator/output_types/subdomain.py +45 -0
  74. secator/output_types/tag.py +69 -0
  75. secator/output_types/target.py +38 -0
  76. secator/output_types/url.py +112 -0
  77. secator/output_types/user_account.py +41 -0
  78. secator/output_types/vulnerability.py +101 -0
  79. secator/output_types/warning.py +30 -0
  80. secator/report.py +140 -0
  81. secator/rich.py +130 -0
  82. secator/runners/__init__.py +14 -0
  83. secator/runners/_base.py +1240 -0
  84. secator/runners/_helpers.py +218 -0
  85. secator/runners/celery.py +18 -0
  86. secator/runners/command.py +1178 -0
  87. secator/runners/python.py +126 -0
  88. secator/runners/scan.py +87 -0
  89. secator/runners/task.py +81 -0
  90. secator/runners/workflow.py +168 -0
  91. secator/scans/__init__.py +29 -0
  92. secator/serializers/__init__.py +8 -0
  93. secator/serializers/dataclass.py +39 -0
  94. secator/serializers/json.py +45 -0
  95. secator/serializers/regex.py +25 -0
  96. secator/tasks/__init__.py +8 -0
  97. secator/tasks/_categories.py +487 -0
  98. secator/tasks/arjun.py +113 -0
  99. secator/tasks/arp.py +53 -0
  100. secator/tasks/arpscan.py +70 -0
  101. secator/tasks/bbot.py +372 -0
  102. secator/tasks/bup.py +118 -0
  103. secator/tasks/cariddi.py +193 -0
  104. secator/tasks/dalfox.py +87 -0
  105. secator/tasks/dirsearch.py +84 -0
  106. secator/tasks/dnsx.py +186 -0
  107. secator/tasks/feroxbuster.py +93 -0
  108. secator/tasks/ffuf.py +135 -0
  109. secator/tasks/fping.py +85 -0
  110. secator/tasks/gau.py +102 -0
  111. secator/tasks/getasn.py +60 -0
  112. secator/tasks/gf.py +36 -0
  113. secator/tasks/gitleaks.py +96 -0
  114. secator/tasks/gospider.py +84 -0
  115. secator/tasks/grype.py +109 -0
  116. secator/tasks/h8mail.py +75 -0
  117. secator/tasks/httpx.py +167 -0
  118. secator/tasks/jswhois.py +36 -0
  119. secator/tasks/katana.py +203 -0
  120. secator/tasks/maigret.py +87 -0
  121. secator/tasks/mapcidr.py +42 -0
  122. secator/tasks/msfconsole.py +179 -0
  123. secator/tasks/naabu.py +85 -0
  124. secator/tasks/nmap.py +487 -0
  125. secator/tasks/nuclei.py +151 -0
  126. secator/tasks/search_vulns.py +225 -0
  127. secator/tasks/searchsploit.py +109 -0
  128. secator/tasks/sshaudit.py +299 -0
  129. secator/tasks/subfinder.py +48 -0
  130. secator/tasks/testssl.py +283 -0
  131. secator/tasks/trivy.py +130 -0
  132. secator/tasks/trufflehog.py +240 -0
  133. secator/tasks/urlfinder.py +100 -0
  134. secator/tasks/wafw00f.py +106 -0
  135. secator/tasks/whois.py +34 -0
  136. secator/tasks/wpprobe.py +116 -0
  137. secator/tasks/wpscan.py +202 -0
  138. secator/tasks/x8.py +94 -0
  139. secator/tasks/xurlfind3r.py +83 -0
  140. secator/template.py +294 -0
  141. secator/thread.py +24 -0
  142. secator/tree.py +196 -0
  143. secator/utils.py +922 -0
  144. secator/utils_test.py +297 -0
  145. secator/workflows/__init__.py +29 -0
  146. secator-0.22.0.dist-info/METADATA +447 -0
  147. secator-0.22.0.dist-info/RECORD +150 -0
  148. secator-0.22.0.dist-info/WHEEL +4 -0
  149. secator-0.22.0.dist-info/entry_points.txt +2 -0
  150. secator-0.22.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,70 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import CIDR_RANGE, IP, HOST, SLUG
3
+ from secator.output_types import Ip, Warning, Error, Info
4
+ from secator.runners import Command
5
+
6
+
7
+ @task()
8
+ class arpscan(Command):
9
+ """Scan a CIDR range for alive hosts using ARP."""
10
+ cmd = 'arp-scan --plain --resolve --format="${ip}\t${name}\t${mac}\t${vendor}"'
11
+ input_types = [CIDR_RANGE, IP, HOST, SLUG]
12
+ output_types = [Ip]
13
+ input_flag = None
14
+ requires_sudo = True
15
+ file_copy_sudo = True # Copy the input file to /tmp since it cannot access the reports folder
16
+ file_flag = '-f'
17
+ version_flag = '-V'
18
+ tags = ['ip', 'recon']
19
+ default_inputs = ['discover']
20
+ opt_prefix = '--'
21
+ opts = {
22
+ 'resolve': {'is_flag': True, 'short': 'r', 'default': False, 'help': 'Resolve IP addresses to hostnames'},
23
+ 'interface': {'type': str, 'short': 'i', 'default': None, 'help': 'Interface to use'},
24
+ 'localnet': {'is_flag': True, 'short': 'l', 'default': False, 'help': 'Scan local network'},
25
+ 'ouifile': {'type': str, 'short': 'o', 'default': None, 'help': 'Use IEEE registry vendor mapping file.'},
26
+ 'macfile': {'type': str, 'short': 'm', 'default': None, 'help': 'Use custom vendor mapping file.'},
27
+ }
28
+ install_pre = {
29
+ '*': ['arp-scan'],
30
+ }
31
+ install_post = {
32
+ '*': 'sudo ln -s /usr/sbin/arp-scan /usr/local/bin/arp-scan'
33
+ }
34
+
35
+ @staticmethod
36
+ def validate_input(self, inputs):
37
+ if not inputs or 'discover' in inputs:
38
+ self.inputs = []
39
+ return True
40
+
41
+ @staticmethod
42
+ def on_cmd(self):
43
+ if not self.inputs:
44
+ self.add_result(Info(message='No input passed to arpscan, scanning local network'))
45
+ self.cmd += ' --localnet'
46
+
47
+ @staticmethod
48
+ def on_line(self, line):
49
+ if 'WARNING:' in line:
50
+ return Warning(message=line.split('WARNING:')[1].strip())
51
+ elif 'permission' in line:
52
+ return Error(message=line + "\n" + (
53
+ "You must [bold]run this task as root[/bold] to scan the network, or use "
54
+ "[green]sudo setcap cap_net_raw=eip /usr/sbin/arp-scan[/green] to grant the [bold]CAP_NET_RAW[/bold] capability "
55
+ "to the [bold]arp-scan[/bold] binary."))
56
+ else:
57
+ line_parts = line.strip().split('\t')
58
+ if len(line_parts) == 4:
59
+ return Ip(
60
+ ip=line_parts[0],
61
+ host=line_parts[1],
62
+ alive=True,
63
+ extra_data={
64
+ 'mac': line_parts[2],
65
+ 'vendor': line_parts[3],
66
+ 'protocol': 'arp',
67
+ },
68
+ _source=self.unique_name
69
+ )
70
+ return line
secator/tasks/bbot.py ADDED
@@ -0,0 +1,372 @@
1
+ import re
2
+ import shutil
3
+
4
+ from secator.config import CONFIG
5
+ from secator.decorators import task
6
+ from secator.definitions import FILENAME, HOST, IP, ORG_NAME, PORT, URL, USERNAME
7
+ from secator.runners import Command
8
+ from secator.serializers import RegexSerializer
9
+ from secator.output_types import Vulnerability, Port, Url, Record, Ip, Tag, Info, Error, UserAccount, Warning
10
+ from secator.serializers import JSONSerializer
11
+
12
+
13
+ BBOT_MODULES = [
14
+ "affiliates",
15
+ # "ajaxpro",
16
+ "anubisdb",
17
+ "asn",
18
+ "azure_realm",
19
+ "azure_tenant",
20
+ "badsecrets",
21
+ "bevigil",
22
+ "binaryedge",
23
+ # "bucket_aws",
24
+ "bucket_azure",
25
+ "bucket_digitalocean",
26
+ # "bucket_file_enum",
27
+ "bucket_firebase",
28
+ "bucket_google",
29
+ "builtwith",
30
+ "bypass403",
31
+ "c99",
32
+ "censys",
33
+ "certspotter",
34
+ # "chaos",
35
+ "columbus",
36
+ # "credshed",
37
+ # "crobat",
38
+ "crt",
39
+ # "dastardly",
40
+ # "dehashed",
41
+ "digitorus",
42
+ "dnscommonsrv",
43
+ "dnsdumpster",
44
+ # "dnszonetransfer",
45
+ "emailformat",
46
+ "ffuf",
47
+ "ffuf_shortnames",
48
+ # "filedownload",
49
+ "fingerprintx",
50
+ "fullhunt",
51
+ "generic_ssrf",
52
+ "git",
53
+ "telerik",
54
+ # "github_codesearch",
55
+ "github_org",
56
+ "gowitness",
57
+ "hackertarget",
58
+ "host_header",
59
+ "httpx",
60
+ "hunt",
61
+ "hunterio",
62
+ "iis_shortnames",
63
+ # "internetdb",
64
+ # "ip2location",
65
+ "ipneighbor",
66
+ "ipstack",
67
+ "leakix",
68
+ # "masscan",
69
+ # "massdns",
70
+ "myssl",
71
+ # "newsletters",
72
+ # "nmap",
73
+ # "nsec",
74
+ "ntlm",
75
+ "nuclei",
76
+ "oauth",
77
+ "otx",
78
+ "paramminer_cookies",
79
+ "paramminer_getparams",
80
+ "paramminer_headers",
81
+ "passivetotal",
82
+ "pgp",
83
+ # "postman",
84
+ "rapiddns",
85
+ # "riddler",
86
+ "robots",
87
+ "secretsdb",
88
+ "securitytrails",
89
+ "shodan_dns",
90
+ "sitedossier",
91
+ "skymem",
92
+ "smuggler",
93
+ "social",
94
+ "sslcert",
95
+ # "subdomain_hijack",
96
+ "subdomaincenter",
97
+ # "sublist3r",
98
+ "telerik",
99
+ # "threatminer",
100
+ "url_manipulation",
101
+ "urlscan",
102
+ "vhost",
103
+ "viewdns",
104
+ "virustotal",
105
+ # "wafw00f",
106
+ "wappalyzer",
107
+ "wayback",
108
+ "zoomeye"
109
+ ]
110
+ BBOT_PRESETS = [
111
+ 'cloud-enum',
112
+ 'code-enum',
113
+ 'dirbust-heavy',
114
+ 'dirbust-light',
115
+ 'dotnet-audit',
116
+ 'email-enum',
117
+ 'iis-shortnames',
118
+ 'kitchen-sink',
119
+ 'paramminer',
120
+ 'spider',
121
+ 'subdomain-enum',
122
+ 'web-basic',
123
+ 'web-screenshots',
124
+ 'web-thorough'
125
+ ]
126
+ BBOT_FLAGS = [
127
+ 'active',
128
+ 'affiliates',
129
+ 'aggressive',
130
+ 'baddns',
131
+ 'cloud-enum,'
132
+ 'code-enum,deadly',
133
+ 'email-enum',
134
+ 'iis-shortnames',
135
+ 'passive',
136
+ 'portscan',
137
+ 'report',
138
+ 'safe',
139
+ 'service-enum',
140
+ 'slow',
141
+ 'social-enum',
142
+ 'subdomain-enum',
143
+ 'subdomain-hijack',
144
+ 'web-basic',
145
+ 'web-paramminer',
146
+ 'web-screenshots',
147
+ 'web-thorough'
148
+ ]
149
+ BBOT_MODULES_STR = ' '.join(BBOT_MODULES)
150
+ BBOT_MAP_TYPES = {
151
+ 'IP_ADDRESS': Ip,
152
+ 'PROTOCOL': Port,
153
+ 'OPEN_TCP_PORT': Port,
154
+ 'URL': Url,
155
+ 'URL_HINT': Url,
156
+ 'ASN': Record,
157
+ 'DNS_NAME': Record,
158
+ 'WEBSCREENSHOT': Url,
159
+ 'VULNERABILITY': Vulnerability,
160
+ 'EMAIL_ADDRESS': UserAccount,
161
+ 'FINDING': Tag,
162
+ 'AZURE_TENANT': Tag,
163
+ 'STORAGE_BUCKET': Tag,
164
+ 'TECHNOLOGY': Tag,
165
+ }
166
+ BBOT_DESCRIPTION_REGEX = RegexSerializer(
167
+ regex=r'(?P<name>[\w ]+): \[(?P<value>[^\[\]]+)\]',
168
+ findall=True
169
+ )
170
+
171
+
172
+ def output_discriminator(self, item):
173
+ _type = item.get('type')
174
+ _message = item.get('message')
175
+ if not _type and _message:
176
+ return Error
177
+ elif _type not in BBOT_MAP_TYPES:
178
+ return None
179
+ return BBOT_MAP_TYPES[_type]
180
+
181
+
182
+ @task()
183
+ class bbot(Command):
184
+ """Multipurpose scanner."""
185
+ cmd = 'bbot -y --allow-deadly --force'
186
+ input_types = [HOST, IP, URL, PORT, ORG_NAME, USERNAME, FILENAME]
187
+ output_types = [Vulnerability, Port, Url, Record, Ip]
188
+ tags = ['vuln', 'scan']
189
+ json_flag = '--json'
190
+ input_flag = '-t'
191
+ file_flag = None
192
+ version_flag = '--help'
193
+ opts = {
194
+ 'modules': {'type': str, 'short': 'm', 'help': ','.join(BBOT_MODULES)},
195
+ 'presets': {'type': str, 'short': 'ps', 'help': ','.join(BBOT_PRESETS), 'shlex': False},
196
+ 'flags': {'type': str, 'short': 'fl', 'help': ','.join(BBOT_FLAGS)}
197
+ }
198
+ opt_key_map = {
199
+ 'modules': 'm',
200
+ 'presets': 'p',
201
+ 'flags': 'f'
202
+ }
203
+ opt_value_map = {
204
+ 'presets': lambda x: ' '.join(x.split(','))
205
+ }
206
+ item_loaders = [JSONSerializer()]
207
+ output_discriminator = output_discriminator
208
+ output_map = {
209
+ Ip: {
210
+ 'ip': lambda x: x['data'],
211
+ 'host': lambda x: x['data'],
212
+ 'alive': lambda x: True,
213
+ '_source': lambda x: 'bbot-' + x['module']
214
+ },
215
+ Tag: {
216
+ 'name': 'name',
217
+ 'category': lambda x: x.get('type', 'bbot'),
218
+ 'match': lambda x: x['data'].get('url') or x['data'].get('host') or '',
219
+ 'extra_data': 'extra_data',
220
+ '_source': lambda x: 'bbot-' + x['module']
221
+ },
222
+ Url: {
223
+ 'url': lambda x: x['data'].get('url') if isinstance(x['data'], dict) else x['data'],
224
+ 'host': lambda x: x['resolved_hosts'][0] if 'resolved_hosts' in x else '',
225
+ 'status_code': lambda x: bbot.extract_status_code(x),
226
+ 'title': lambda x: bbot.extract_title(x),
227
+ 'screenshot_path': lambda x: x['data']['path'] if isinstance(x['data'], dict) else '',
228
+ '_source': lambda x: 'bbot-' + x['module']
229
+ },
230
+ Port: {
231
+ 'port': lambda x: int(x['data']['port']) if 'port' in x['data'] else int(x['data'].split(':')[-1]),
232
+ 'ip': lambda x: [_ for _ in x['resolved_hosts'] if not _.startswith('::')][0],
233
+ 'state': lambda x: 'OPEN',
234
+ 'service_name': lambda x: x['data']['protocol'] if 'protocol' in x['data'] else '',
235
+ 'cpes': lambda x: [],
236
+ 'host': lambda x: x['data']['host'] if isinstance(x['data'], dict) else x['data'].split(':')[0],
237
+ 'extra_data': 'extra_data',
238
+ '_source': lambda x: 'bbot-' + x['module']
239
+ },
240
+ Vulnerability: {
241
+ 'name': 'name',
242
+ 'matched_at': lambda x: x['data'].get('url') or x['data'].get('host') or '',
243
+ 'extra_data': 'extra_data',
244
+ 'confidence': 'high',
245
+ 'severity': lambda x: x['data']['severity'].lower()
246
+ },
247
+ Record: {
248
+ 'name': 'name',
249
+ 'type': 'type',
250
+ 'extra_data': 'extra_data'
251
+ },
252
+ Error: {
253
+ 'message': 'message'
254
+ },
255
+ UserAccount: {
256
+ 'username': lambda x: x['data'].split('@')[0],
257
+ 'email': 'data',
258
+ 'site_name': 'host',
259
+ 'extra_data': 'extra_data',
260
+ }
261
+ }
262
+ install_pre = {
263
+ 'apk': ['python3-dev', 'linux-headers', 'musl-dev', 'gcc', 'git', 'openssl', 'unzip', 'tar', 'chromium'],
264
+ '*': ['gcc', 'git', 'openssl', 'unzip', 'tar', 'chromium']
265
+ }
266
+ install_version = '2.4.2'
267
+ install_cmd = 'pipx install bbot==[install_version] --force'
268
+ install_post = {
269
+ '*': f'rm -fr {CONFIG.dirs.share}/pipx/venvs/bbot/lib/python3.12/site-packages/ansible_collections/*'
270
+ }
271
+
272
+ @staticmethod
273
+ def on_json_loaded(self, item):
274
+ _type = item.get('type')
275
+
276
+ if not _type:
277
+ yield item
278
+ return
279
+
280
+ # Set scan name and base path for output
281
+ if _type == 'SCAN':
282
+ self.scan_config = item['data']
283
+ return
284
+
285
+ if _type not in BBOT_MAP_TYPES:
286
+ yield Warning(message=f'Found unsupported bbot type: {_type}. Skipping.')
287
+ self.debug(f'Found unsupported bbot type: {item}')
288
+ return
289
+
290
+ if isinstance(item['data'], str):
291
+ item['name'] = item['data']
292
+ yield item
293
+ return
294
+
295
+ item['extra_data'] = item['data']
296
+ if self.scan_config:
297
+ modules = self.scan_config.get('preset', {}).get('modules', [])
298
+ item['extra_data']['bbot_modules'] = modules
299
+
300
+ # Parse bbot description into extra_data
301
+ description = item['data'].get('description')
302
+ if description:
303
+ parts = description.split(':')
304
+ if len(parts) == 2:
305
+ description = parts[0].strip()
306
+ match = list(BBOT_DESCRIPTION_REGEX.run(description))
307
+ if match:
308
+ del item['data']['description']
309
+ for chunk in match:
310
+ key, val = tuple([c.strip() for c in chunk])
311
+ if ',' in val:
312
+ val = val.split(',')
313
+ key = '_'.join(key.split(' ')).lower()
314
+ item['extra_data'][key] = val
315
+ description = re.split(r'\s*(\(|\.|Detected.)', description.strip(), 1)[0].rstrip()
316
+
317
+ # Set tag name for objects mapping Tag
318
+ if item['type'] in ['AZURE_TENANT', 'STORAGE_BUCKET', 'TECHNOLOGY']:
319
+ item['name'] = ' '.join(item['type'].split('_')).lower().title()
320
+ keys = ['technology', 'tenant-names', 'url']
321
+ info = next((item['data'].get(key) for key in keys if key in item['data']))
322
+ if info:
323
+ item['extra_data']['info'] = info
324
+ for key in keys:
325
+ if key in item['data']:
326
+ del item['data'][key]
327
+
328
+ # If 'name' key is present in 'data', set it as name
329
+ elif 'name' in item['data'].keys():
330
+ item['name'] = item['data']['name']
331
+ del item['data']['name']
332
+
333
+ # If 'name' key is present in 'extra_data', set it as name
334
+ elif 'extra_data' in item and 'name' in item['extra_data'].keys():
335
+ item['name'] = item['extra_data']['name']
336
+ del item['extra_data']['name']
337
+
338
+ # If 'description' key is present in 'data', set it as name
339
+ elif description:
340
+ item['name'] = description
341
+ del item['data']['description']
342
+
343
+ # If 'discovery_context' and no name set yet, set it as name
344
+ else:
345
+ item['name'] = item['discovery_context']
346
+
347
+ # If a screenshot was saved, move it to secator output folder
348
+ if item['type'] == 'WEBSCREENSHOT':
349
+ from pathlib import Path
350
+ path = Path.home() / '.bbot' / 'scans' / self.scan_config['name'] / item['data']['path']
351
+ name = path.as_posix().split('/')[-1]
352
+ secator_path = f'{self.reports_folder}/.outputs/{name}'
353
+ yield Info(f'Copying screenshot {path} to {secator_path}')
354
+ shutil.copyfile(path, secator_path)
355
+ item['data']['path'] = secator_path
356
+
357
+ yield item
358
+
359
+ @staticmethod
360
+ def extract_title(item):
361
+ for tag in item['tags']:
362
+ if 'http-title' in tag:
363
+ title = ' '.join(tag.split('-')[2:])
364
+ return title
365
+ return ''
366
+
367
+ @staticmethod
368
+ def extract_status_code(item):
369
+ for tag in item['tags']:
370
+ if 'status-' in tag:
371
+ return int([tag.split('-')[-1]][0])
372
+ return 0
secator/tasks/bup.py ADDED
@@ -0,0 +1,118 @@
1
+ import json
2
+ import re
3
+ import shlex
4
+
5
+ from secator.decorators import task
6
+ from secator.output_types import Url, Progress
7
+ from secator.definitions import (
8
+ HEADER, DELAY, FOLLOW_REDIRECT, METHOD, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT,
9
+ DEPTH, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, FILTER_REGEX, FILTER_CODES, FILTER_SIZE, FILTER_WORDS,
10
+ MATCH_CODES, OPT_NOT_SUPPORTED, URL
11
+ )
12
+ from secator.serializers import JSONSerializer
13
+ from secator.tasks._categories import Http
14
+
15
+
16
+ @task()
17
+ class bup(Http):
18
+ """40X bypasser."""
19
+ cmd = 'bup'
20
+ input_types = [URL]
21
+ output_types = [Url, Progress]
22
+ tags = ['url', 'bypass']
23
+ input_flag = '-u'
24
+ file_flag = '-u'
25
+ json_flag = '--jsonl'
26
+ opt_prefix = '--'
27
+ opts = {
28
+ 'spoofport': {'type': int, 'short': 'sp', 'help': 'Port(s) to inject in port-specific headers'},
29
+ 'spoofip': {'type': str, 'short': 'si', 'help': 'IP(s) to inject in ip-specific headers'},
30
+ 'mode': {'type': str, 'help': 'Bypass modes (comma-delimited) amongst: all, mid_paths, end_paths, case_substitution, char_encode, http_methods, http_versions, http_headers_method, http_headers_scheme, http_headers_ip, http_headers_port, http_headers_url, user_agent'}, # noqa: E501
31
+ }
32
+ opt_key_map = {
33
+ HEADER: 'header',
34
+ DELAY: OPT_NOT_SUPPORTED,
35
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
36
+ METHOD: OPT_NOT_SUPPORTED,
37
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
38
+ RETRIES: 'retry',
39
+ THREADS: 'threads',
40
+ TIMEOUT: 'timeout',
41
+ USER_AGENT: OPT_NOT_SUPPORTED,
42
+ DEPTH: OPT_NOT_SUPPORTED,
43
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
44
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
45
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
46
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
47
+ FILTER_CODES: OPT_NOT_SUPPORTED,
48
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
49
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
50
+ MATCH_CODES: OPT_NOT_SUPPORTED,
51
+ PROXY: 'proxy',
52
+ }
53
+ item_loaders = [JSONSerializer()]
54
+ output_map = {
55
+ Url: {
56
+ 'url': 'request_url',
57
+ 'method': lambda x: bup.method_extractor(x),
58
+ 'request_headers': lambda x: bup.request_headers_extractor(x),
59
+ 'response_headers': lambda x: bup.response_headers_extractor(x),
60
+ 'status_code': 'response_status_code',
61
+ 'content_type': 'response_content_type',
62
+ 'content_length': 'response_content_length',
63
+ 'title': 'response_title',
64
+ 'server': 'response_server_type',
65
+ 'lines': 'response_lines_count',
66
+ 'words': 'response_words_count',
67
+ 'stored_response_path': 'response_html_filename',
68
+ }
69
+ }
70
+ install_version = '0.4.4'
71
+ install_cmd = 'pipx install bypass-url-parser==[install_version] --force'
72
+
73
+ @staticmethod
74
+ def on_init(self):
75
+ response_path = f'{self.reports_folder}/.outputs/response'
76
+ self.cmd += f' -o {shlex.quote(response_path)}'
77
+
78
+ @staticmethod
79
+ def on_line(self, line):
80
+ if 'Doing' in line:
81
+ progress_indicator = line.split(':')[-1]
82
+ current, total = tuple([int(c.strip()) for c in progress_indicator.split('/')])
83
+ return json.dumps({"duration": "unknown", "percent": int((current / total) * 100)})
84
+ elif 'batcat' in line: # ignore batcat lines as they're loaded as JSON
85
+ return None
86
+ return line
87
+
88
+ @staticmethod
89
+ def method_extractor(item):
90
+ payload = item['request_curl_payload']
91
+ match = re.match(r'-X\s+(\w+)', payload)
92
+ if match:
93
+ return match.group(1)
94
+ return 'GET'
95
+
96
+ @staticmethod
97
+ def request_headers_extractor(item):
98
+ headers = {}
99
+ match1 = list(re.finditer(r'-H\s*\'?([^\']*)\'?', str(item['request_curl_payload'])))
100
+ match2 = list(re.finditer(r'-H\s*\'?([^\']*)\"?', str(item['request_curl_cmd'])))
101
+ matches = match1
102
+ matches.extend(match2)
103
+ for match in matches:
104
+ header = match.group(1).split(':', 1)
105
+ if len(header) == 2:
106
+ headers[header[0].strip()] = header[1].strip()
107
+ return headers
108
+
109
+ @staticmethod
110
+ def response_headers_extractor(item):
111
+ headers_list = item['response_headers'].split('\n')[1:]
112
+ headers = {}
113
+ for header in headers_list:
114
+ split_headers = header.split(':')
115
+ key = split_headers[0]
116
+ value = ':'.join(split_headers[1:])
117
+ headers[key] = value
118
+ return headers