secator 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/celery.py +160 -185
- secator/celery_utils.py +268 -0
- secator/cli.py +327 -106
- secator/config.py +27 -11
- secator/configs/workflows/host_recon.yaml +5 -3
- secator/configs/workflows/port_scan.yaml +7 -3
- secator/configs/workflows/url_bypass.yaml +10 -0
- secator/configs/workflows/url_vuln.yaml +1 -1
- secator/decorators.py +169 -92
- secator/definitions.py +10 -3
- secator/exporters/__init__.py +7 -5
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +27 -19
- secator/exporters/gdrive.py +16 -11
- secator/exporters/json.py +3 -1
- secator/exporters/table.py +30 -2
- secator/exporters/txt.py +20 -16
- secator/hooks/gcs.py +53 -0
- secator/hooks/mongodb.py +53 -27
- secator/output_types/__init__.py +29 -11
- secator/output_types/_base.py +11 -1
- secator/output_types/error.py +36 -0
- secator/output_types/exploit.py +1 -1
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +7 -0
- secator/output_types/port.py +8 -1
- secator/output_types/progress.py +5 -0
- secator/output_types/record.py +3 -1
- secator/output_types/stat.py +33 -0
- secator/output_types/tag.py +6 -4
- secator/output_types/url.py +6 -3
- secator/output_types/vulnerability.py +3 -2
- secator/output_types/warning.py +24 -0
- secator/report.py +55 -23
- secator/rich.py +44 -39
- secator/runners/_base.py +622 -635
- secator/runners/_helpers.py +5 -91
- secator/runners/celery.py +18 -0
- secator/runners/command.py +364 -211
- secator/runners/scan.py +8 -24
- secator/runners/task.py +21 -55
- secator/runners/workflow.py +41 -40
- secator/scans/__init__.py +28 -0
- secator/serializers/dataclass.py +6 -0
- secator/serializers/json.py +10 -5
- secator/serializers/regex.py +12 -4
- secator/tasks/_categories.py +5 -2
- secator/tasks/bbot.py +293 -0
- secator/tasks/bup.py +98 -0
- secator/tasks/cariddi.py +38 -49
- secator/tasks/dalfox.py +3 -0
- secator/tasks/dirsearch.py +12 -23
- secator/tasks/dnsx.py +49 -30
- secator/tasks/dnsxbrute.py +2 -0
- secator/tasks/feroxbuster.py +8 -17
- secator/tasks/ffuf.py +3 -2
- secator/tasks/fping.py +3 -3
- secator/tasks/gau.py +5 -0
- secator/tasks/gf.py +2 -2
- secator/tasks/gospider.py +4 -0
- secator/tasks/grype.py +9 -9
- secator/tasks/h8mail.py +31 -41
- secator/tasks/httpx.py +58 -21
- secator/tasks/katana.py +18 -22
- secator/tasks/maigret.py +26 -24
- secator/tasks/mapcidr.py +2 -3
- secator/tasks/msfconsole.py +4 -16
- secator/tasks/naabu.py +3 -1
- secator/tasks/nmap.py +50 -35
- secator/tasks/nuclei.py +9 -2
- secator/tasks/searchsploit.py +17 -9
- secator/tasks/subfinder.py +5 -1
- secator/tasks/wpscan.py +79 -93
- secator/template.py +61 -45
- secator/thread.py +24 -0
- secator/utils.py +330 -80
- secator/utils_test.py +48 -23
- secator/workflows/__init__.py +28 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/METADATA +11 -5
- secator-0.7.0.dist-info/RECORD +115 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/WHEEL +1 -1
- secator-0.6.0.dist-info/RECORD +0 -101
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/entry_points.txt +0 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/licenses/LICENSE +0 -0
secator/tasks/bbot.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
|
|
3
|
+
from secator.decorators import task
|
|
4
|
+
from secator.runners import Command
|
|
5
|
+
from secator.serializers import RegexSerializer
|
|
6
|
+
from secator.output_types import Vulnerability, Port, Url, Record, Ip, Tag, Error
|
|
7
|
+
from secator.serializers import JSONSerializer
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
BBOT_MODULES = [
|
|
11
|
+
"affiliates",
|
|
12
|
+
# "ajaxpro",
|
|
13
|
+
"anubisdb",
|
|
14
|
+
"asn",
|
|
15
|
+
"azure_realm",
|
|
16
|
+
"azure_tenant",
|
|
17
|
+
"badsecrets",
|
|
18
|
+
"bevigil",
|
|
19
|
+
"binaryedge",
|
|
20
|
+
# "bucket_aws",
|
|
21
|
+
"bucket_azure",
|
|
22
|
+
"bucket_digitalocean",
|
|
23
|
+
# "bucket_file_enum",
|
|
24
|
+
"bucket_firebase",
|
|
25
|
+
"bucket_google",
|
|
26
|
+
"builtwith",
|
|
27
|
+
"bypass403",
|
|
28
|
+
"c99",
|
|
29
|
+
"censys",
|
|
30
|
+
"certspotter",
|
|
31
|
+
# "chaos",
|
|
32
|
+
"columbus",
|
|
33
|
+
# "credshed",
|
|
34
|
+
# "crobat",
|
|
35
|
+
"crt",
|
|
36
|
+
# "dastardly",
|
|
37
|
+
# "dehashed",
|
|
38
|
+
"digitorus",
|
|
39
|
+
"dnscommonsrv",
|
|
40
|
+
"dnsdumpster",
|
|
41
|
+
# "dnszonetransfer",
|
|
42
|
+
"emailformat",
|
|
43
|
+
"ffuf",
|
|
44
|
+
"ffuf_shortnames",
|
|
45
|
+
# "filedownload",
|
|
46
|
+
"fingerprintx",
|
|
47
|
+
"fullhunt",
|
|
48
|
+
"generic_ssrf",
|
|
49
|
+
"git",
|
|
50
|
+
"telerik",
|
|
51
|
+
# "github_codesearch",
|
|
52
|
+
"github_org",
|
|
53
|
+
"gowitness",
|
|
54
|
+
"hackertarget",
|
|
55
|
+
"host_header",
|
|
56
|
+
"httpx",
|
|
57
|
+
"hunt",
|
|
58
|
+
"hunterio",
|
|
59
|
+
"iis_shortnames",
|
|
60
|
+
# "internetdb",
|
|
61
|
+
# "ip2location",
|
|
62
|
+
"ipneighbor",
|
|
63
|
+
"ipstack",
|
|
64
|
+
"leakix",
|
|
65
|
+
# "masscan",
|
|
66
|
+
# "massdns",
|
|
67
|
+
"myssl",
|
|
68
|
+
# "newsletters",
|
|
69
|
+
# "nmap",
|
|
70
|
+
# "nsec",
|
|
71
|
+
"ntlm",
|
|
72
|
+
"nuclei",
|
|
73
|
+
"oauth",
|
|
74
|
+
"otx",
|
|
75
|
+
"paramminer_cookies",
|
|
76
|
+
"paramminer_getparams",
|
|
77
|
+
"paramminer_headers",
|
|
78
|
+
"passivetotal",
|
|
79
|
+
"pgp",
|
|
80
|
+
# "postman",
|
|
81
|
+
"rapiddns",
|
|
82
|
+
# "riddler",
|
|
83
|
+
"robots",
|
|
84
|
+
"secretsdb",
|
|
85
|
+
"securitytrails",
|
|
86
|
+
"shodan_dns",
|
|
87
|
+
"sitedossier",
|
|
88
|
+
"skymem",
|
|
89
|
+
"smuggler",
|
|
90
|
+
"social",
|
|
91
|
+
"sslcert",
|
|
92
|
+
# "subdomain_hijack",
|
|
93
|
+
"subdomaincenter",
|
|
94
|
+
# "sublist3r",
|
|
95
|
+
"telerik",
|
|
96
|
+
# "threatminer",
|
|
97
|
+
"url_manipulation",
|
|
98
|
+
"urlscan",
|
|
99
|
+
"vhost",
|
|
100
|
+
"viewdns",
|
|
101
|
+
"virustotal",
|
|
102
|
+
# "wafw00f",
|
|
103
|
+
"wappalyzer",
|
|
104
|
+
"wayback",
|
|
105
|
+
"zoomeye"
|
|
106
|
+
]
|
|
107
|
+
BBOT_PRESETS = [
|
|
108
|
+
'cloud-enum',
|
|
109
|
+
'code-enum',
|
|
110
|
+
'dirbust-heavy',
|
|
111
|
+
'dirbust-light',
|
|
112
|
+
'dotnet-audit',
|
|
113
|
+
'email-enum',
|
|
114
|
+
'iis-shortnames',
|
|
115
|
+
'kitchen-sink',
|
|
116
|
+
'paramminer',
|
|
117
|
+
'spider',
|
|
118
|
+
'subdomain-enum',
|
|
119
|
+
'web-basic',
|
|
120
|
+
'web-screenshots',
|
|
121
|
+
'web-thorough'
|
|
122
|
+
]
|
|
123
|
+
BBOT_MODULES_STR = ' '.join(BBOT_MODULES)
|
|
124
|
+
BBOT_MAP_TYPES = {
|
|
125
|
+
'IP_ADDRESS': Ip,
|
|
126
|
+
'PROTOCOL': Port,
|
|
127
|
+
'OPEN_TCP_PORT': Port,
|
|
128
|
+
'URL': Url,
|
|
129
|
+
'TECHNOLOGY': Tag,
|
|
130
|
+
'ASN': Record,
|
|
131
|
+
'DNS_NAME': Record,
|
|
132
|
+
'WEBSCREENSHOT': Url,
|
|
133
|
+
'VULNERABILITY': Vulnerability,
|
|
134
|
+
'FINDING': Tag
|
|
135
|
+
}
|
|
136
|
+
BBOT_DESCRIPTION_REGEX = RegexSerializer(
|
|
137
|
+
regex=r'(?P<name>[\w ]+): \[(?P<value>[^\[\]]+)\]',
|
|
138
|
+
findall=True
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def output_discriminator(self, item):
|
|
143
|
+
_type = item.get('type')
|
|
144
|
+
_message = item.get('message')
|
|
145
|
+
if not _type and _message:
|
|
146
|
+
return Error
|
|
147
|
+
elif _type not in BBOT_MAP_TYPES:
|
|
148
|
+
return None
|
|
149
|
+
return BBOT_MAP_TYPES[_type]
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@task()
|
|
153
|
+
class bbot(Command):
|
|
154
|
+
cmd = 'bbot -y --allow-deadly --force'
|
|
155
|
+
json_flag = '--json'
|
|
156
|
+
input_flag = '-t'
|
|
157
|
+
file_flag = None
|
|
158
|
+
opts = {
|
|
159
|
+
'modules': {'type': str, 'short': 'm', 'default': '', 'help': ','.join(BBOT_MODULES)},
|
|
160
|
+
'presets': {'type': str, 'short': 'ps', 'default': 'kitchen-sink', 'help': ','.join(BBOT_PRESETS), 'shlex': False},
|
|
161
|
+
}
|
|
162
|
+
opt_key_map = {
|
|
163
|
+
'modules': 'm',
|
|
164
|
+
'presets': 'p'
|
|
165
|
+
}
|
|
166
|
+
opt_value_map = {
|
|
167
|
+
'presets': lambda x: ' '.join(x.split(','))
|
|
168
|
+
}
|
|
169
|
+
item_loaders = [JSONSerializer()]
|
|
170
|
+
output_types = [Vulnerability, Port, Url, Record, Ip]
|
|
171
|
+
output_discriminator = output_discriminator
|
|
172
|
+
output_map = {
|
|
173
|
+
Ip: {
|
|
174
|
+
'ip': lambda x: x['data'],
|
|
175
|
+
'host': lambda x: x['data'],
|
|
176
|
+
'alive': lambda x: True,
|
|
177
|
+
'_source': lambda x: 'bbot-' + x['module']
|
|
178
|
+
},
|
|
179
|
+
Tag: {
|
|
180
|
+
'name': 'name',
|
|
181
|
+
'match': lambda x: x['data'].get('url') or x['data'].get('host'),
|
|
182
|
+
'extra_data': 'extra_data',
|
|
183
|
+
'_source': lambda x: 'bbot-' + x['module']
|
|
184
|
+
},
|
|
185
|
+
Url: {
|
|
186
|
+
'url': lambda x: x['data'].get('url') if isinstance(x['data'], dict) else x['data'],
|
|
187
|
+
'host': lambda x: x['resolved_hosts'][0] if 'resolved_hosts' in x else '',
|
|
188
|
+
'status_code': lambda x: bbot.extract_status_code(x),
|
|
189
|
+
'title': lambda x: bbot.extract_title(x),
|
|
190
|
+
'screenshot_path': lambda x: x['data']['path'] if isinstance(x['data'], dict) else '',
|
|
191
|
+
'_source': lambda x: 'bbot-' + x['module']
|
|
192
|
+
},
|
|
193
|
+
Port: {
|
|
194
|
+
'port': lambda x: int(x['data']['port']) if 'port' in x['data'] else x['data'].split(':')[-1],
|
|
195
|
+
'ip': lambda x: [_ for _ in x['resolved_hosts'] if not _.startswith('::')][0],
|
|
196
|
+
'state': lambda x: 'OPEN',
|
|
197
|
+
'service_name': lambda x: x['data']['protocol'] if 'protocol' in x['data'] else '',
|
|
198
|
+
'cpes': lambda x: [],
|
|
199
|
+
'host': lambda x: x['data']['host'] if isinstance(x['data'], dict) else x['data'].split(':')[0],
|
|
200
|
+
'extra_data': 'extra_data',
|
|
201
|
+
'_source': lambda x: 'bbot-' + x['module']
|
|
202
|
+
},
|
|
203
|
+
Vulnerability: {
|
|
204
|
+
'name': 'name',
|
|
205
|
+
'match': lambda x: x['data'].get('url') or x['data']['host'],
|
|
206
|
+
'extra_data': 'extra_data',
|
|
207
|
+
'severity': lambda x: x['data']['severity'].lower()
|
|
208
|
+
},
|
|
209
|
+
Record: {
|
|
210
|
+
'name': 'name',
|
|
211
|
+
'type': 'type',
|
|
212
|
+
'extra_data': 'extra_data'
|
|
213
|
+
},
|
|
214
|
+
Error: {
|
|
215
|
+
'message': 'message'
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
install_cmd = 'pipx install bbot && pipx upgrade bbot'
|
|
219
|
+
|
|
220
|
+
@staticmethod
|
|
221
|
+
def on_json_loaded(self, item):
|
|
222
|
+
_type = item.get('type')
|
|
223
|
+
|
|
224
|
+
if not _type:
|
|
225
|
+
yield item
|
|
226
|
+
return
|
|
227
|
+
|
|
228
|
+
if _type not in BBOT_MAP_TYPES:
|
|
229
|
+
self._print(f'[bold orange3]Found unsupported bbot type: {_type}.[/] [bold green]Skipping.[/]')
|
|
230
|
+
return
|
|
231
|
+
|
|
232
|
+
if isinstance(item['data'], str):
|
|
233
|
+
item['name'] = item['data']
|
|
234
|
+
yield item
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
item['extra_data'] = item['data']
|
|
238
|
+
|
|
239
|
+
# Parse bbot description into extra_data
|
|
240
|
+
description = item['data'].get('description')
|
|
241
|
+
if description:
|
|
242
|
+
del item['data']['description']
|
|
243
|
+
match = BBOT_DESCRIPTION_REGEX.run(description)
|
|
244
|
+
for chunk in match:
|
|
245
|
+
key, val = tuple([c.strip() for c in chunk])
|
|
246
|
+
if ',' in val:
|
|
247
|
+
val = val.split(',')
|
|
248
|
+
key = '_'.join(key.split(' ')).lower()
|
|
249
|
+
item['extra_data'][key] = val
|
|
250
|
+
|
|
251
|
+
# Set technology as name for Tag
|
|
252
|
+
if item['type'] == 'TECHNOLOGY':
|
|
253
|
+
item['name'] = item['data']['technology']
|
|
254
|
+
del item['data']['technology']
|
|
255
|
+
|
|
256
|
+
# If 'name' key is present in 'data', set it as name
|
|
257
|
+
elif 'name' in item['data'].keys():
|
|
258
|
+
item['name'] = item['data']['name']
|
|
259
|
+
del item['data']['name']
|
|
260
|
+
|
|
261
|
+
# If 'name' key is present in 'extra_data', set it as name
|
|
262
|
+
elif 'extra_data' in item and 'name' in item['extra_data'].keys():
|
|
263
|
+
item['name'] = item['extra_data']['name']
|
|
264
|
+
del item['extra_data']['name']
|
|
265
|
+
|
|
266
|
+
# If 'discovery_context' and no name set yet, set it as name
|
|
267
|
+
else:
|
|
268
|
+
item['name'] = item['discovery_context']
|
|
269
|
+
|
|
270
|
+
# If a screenshot was saved, move it to secator output folder
|
|
271
|
+
if item['type'] == 'WEBSCREENSHOT':
|
|
272
|
+
path = item['data']['path']
|
|
273
|
+
name = path.split('/')[-1]
|
|
274
|
+
secator_path = f'{self.reports_folder}/.outputs/{name}'
|
|
275
|
+
shutil.copy(path, secator_path)
|
|
276
|
+
item['data']['path'] = secator_path
|
|
277
|
+
|
|
278
|
+
yield item
|
|
279
|
+
|
|
280
|
+
@staticmethod
|
|
281
|
+
def extract_title(item):
|
|
282
|
+
for tag in item['tags']:
|
|
283
|
+
if 'http-title' in tag:
|
|
284
|
+
title = ' '.join(tag.split('-')[2:])
|
|
285
|
+
return title
|
|
286
|
+
return ''
|
|
287
|
+
|
|
288
|
+
@staticmethod
|
|
289
|
+
def extract_status_code(item):
|
|
290
|
+
for tag in item['tags']:
|
|
291
|
+
if 'status-' in tag:
|
|
292
|
+
return int([tag.split('-')[-1]][0])
|
|
293
|
+
return 0
|
secator/tasks/bup.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
from secator.decorators import task
|
|
5
|
+
from secator.output_types import Url, Progress
|
|
6
|
+
from secator.definitions import (
|
|
7
|
+
HEADER, DELAY, FOLLOW_REDIRECT, METHOD, PROXY, RATE_LIMIT, RETRIES, THREADS, TIMEOUT, USER_AGENT,
|
|
8
|
+
DEPTH, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, FILTER_REGEX, FILTER_CODES, FILTER_SIZE, FILTER_WORDS,
|
|
9
|
+
MATCH_CODES, OPT_NOT_SUPPORTED, URL
|
|
10
|
+
)
|
|
11
|
+
from secator.serializers import JSONSerializer
|
|
12
|
+
from secator.tasks._categories import Http
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@task()
|
|
16
|
+
class bup(Http):
|
|
17
|
+
cmd = 'bup'
|
|
18
|
+
input_flag = '-u'
|
|
19
|
+
input_type = URL
|
|
20
|
+
json_flag = '--jsonl'
|
|
21
|
+
opt_prefix = '--'
|
|
22
|
+
opts = {
|
|
23
|
+
'spoofport': {'type': int, 'short': 'sp', 'help': 'Port(s) to inject in port-specific headers'},
|
|
24
|
+
'spoofip': {'type': str, 'short': 'si', 'help': 'IP(s) to inject in ip-specific headers'},
|
|
25
|
+
'mode': {'type': str, 'help': 'Bypass modes.'},
|
|
26
|
+
}
|
|
27
|
+
opt_key_map = {
|
|
28
|
+
HEADER: 'header',
|
|
29
|
+
DELAY: OPT_NOT_SUPPORTED,
|
|
30
|
+
FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
|
|
31
|
+
METHOD: OPT_NOT_SUPPORTED,
|
|
32
|
+
RATE_LIMIT: OPT_NOT_SUPPORTED,
|
|
33
|
+
RETRIES: 'retry',
|
|
34
|
+
THREADS: 'threads',
|
|
35
|
+
TIMEOUT: 'timeout',
|
|
36
|
+
USER_AGENT: OPT_NOT_SUPPORTED,
|
|
37
|
+
DEPTH: OPT_NOT_SUPPORTED,
|
|
38
|
+
MATCH_REGEX: OPT_NOT_SUPPORTED,
|
|
39
|
+
MATCH_SIZE: OPT_NOT_SUPPORTED,
|
|
40
|
+
MATCH_WORDS: OPT_NOT_SUPPORTED,
|
|
41
|
+
FILTER_REGEX: OPT_NOT_SUPPORTED,
|
|
42
|
+
FILTER_CODES: OPT_NOT_SUPPORTED,
|
|
43
|
+
FILTER_SIZE: OPT_NOT_SUPPORTED,
|
|
44
|
+
FILTER_WORDS: OPT_NOT_SUPPORTED,
|
|
45
|
+
MATCH_CODES: OPT_NOT_SUPPORTED,
|
|
46
|
+
PROXY: 'proxy',
|
|
47
|
+
}
|
|
48
|
+
item_loaders = [JSONSerializer()]
|
|
49
|
+
output_types = [Url, Progress]
|
|
50
|
+
output_map = {
|
|
51
|
+
Url: {
|
|
52
|
+
'url': 'request_url',
|
|
53
|
+
'method': lambda x: bup.method_extractor(x),
|
|
54
|
+
'headers': lambda x: bup.headers_extractor(x),
|
|
55
|
+
'status_code': 'response_status_code',
|
|
56
|
+
'content_type': 'response_content_type',
|
|
57
|
+
'content_length': 'response_content_length',
|
|
58
|
+
'title': 'response_title',
|
|
59
|
+
'server': 'response_server_type',
|
|
60
|
+
'lines': 'response_lines_count',
|
|
61
|
+
'words': 'response_words_count',
|
|
62
|
+
'stored_response_path': 'response_html_filename',
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
install_cmd = 'pipx install bypass-url-parser && pipx upgrade bypass-url-parser'
|
|
66
|
+
|
|
67
|
+
@staticmethod
|
|
68
|
+
def on_init(self):
|
|
69
|
+
self.cmd += f' -o {self.reports_folder}/.outputs/response'
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def on_line(self, line):
|
|
73
|
+
if 'Doing' in line:
|
|
74
|
+
progress_indicator = line.split(':')[-1]
|
|
75
|
+
current, total = tuple([int(c.strip()) for c in progress_indicator.split('/')])
|
|
76
|
+
return json.dumps({"duration": "unknown", "percent": int((current / total) * 100)})
|
|
77
|
+
elif 'batcat' in line: # ignore batcat lines as they're loaded as JSON
|
|
78
|
+
return None
|
|
79
|
+
return line
|
|
80
|
+
|
|
81
|
+
@staticmethod
|
|
82
|
+
def method_extractor(item):
|
|
83
|
+
payload = item['request_curl_payload']
|
|
84
|
+
match = re.match(r'-X\s+(\w+)', payload)
|
|
85
|
+
if match:
|
|
86
|
+
return match.group(1)
|
|
87
|
+
return 'GET'
|
|
88
|
+
|
|
89
|
+
@staticmethod
|
|
90
|
+
def headers_extractor(item):
|
|
91
|
+
headers_list = item['response_headers'].split('\n')[1:]
|
|
92
|
+
headers = {}
|
|
93
|
+
for header in headers_list:
|
|
94
|
+
split_headers = header.split(':')
|
|
95
|
+
key = split_headers[0]
|
|
96
|
+
value = ':'.join(split_headers[1:])
|
|
97
|
+
headers[key] = value
|
|
98
|
+
return headers
|
secator/tasks/cariddi.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import json
|
|
2
|
-
|
|
3
1
|
from secator.decorators import task
|
|
4
2
|
from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
|
|
5
3
|
FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
|
|
@@ -8,6 +6,7 @@ from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
|
|
|
8
6
|
OPT_PIPE_INPUT, PROXY, RATE_LIMIT, RETRIES,
|
|
9
7
|
THREADS, TIMEOUT, URL, USER_AGENT)
|
|
10
8
|
from secator.output_types import Tag, Url
|
|
9
|
+
from secator.serializers import JSONSerializer
|
|
11
10
|
from secator.tasks._categories import HttpCrawler
|
|
12
11
|
|
|
13
12
|
|
|
@@ -41,7 +40,7 @@ class cariddi(HttpCrawler):
|
|
|
41
40
|
TIMEOUT: 't',
|
|
42
41
|
USER_AGENT: 'ua'
|
|
43
42
|
}
|
|
44
|
-
item_loaders = []
|
|
43
|
+
item_loaders = [JSONSerializer()]
|
|
45
44
|
install_cmd = 'go install -v github.com/edoardottt/cariddi/cmd/cariddi@latest'
|
|
46
45
|
install_github_handle = 'edoardottt/cariddi'
|
|
47
46
|
encoding = 'ansi'
|
|
@@ -51,53 +50,43 @@ class cariddi(HttpCrawler):
|
|
|
51
50
|
profile = 'cpu'
|
|
52
51
|
|
|
53
52
|
@staticmethod
|
|
54
|
-
def
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
errors = matches.get('errors', [])
|
|
64
|
-
secrets = matches.get('secrets', [])
|
|
65
|
-
infos = matches.get('infos', [])
|
|
66
|
-
|
|
67
|
-
for param in params:
|
|
68
|
-
param_name = param['name']
|
|
69
|
-
for attack in param['attacks']:
|
|
70
|
-
extra_data = {'param': param_name, 'source': 'url'}
|
|
71
|
-
item = {
|
|
72
|
-
'name': attack + ' param',
|
|
73
|
-
'match': url,
|
|
74
|
-
'extra_data': extra_data
|
|
75
|
-
}
|
|
76
|
-
items.append(item)
|
|
77
|
-
|
|
78
|
-
for error in errors:
|
|
79
|
-
match = error['match']
|
|
80
|
-
match = (match[:1000] + '...TRUNCATED') if len(match) > 1000 else match # truncate as this can be a very long match
|
|
81
|
-
error['extra_data'] = {'error': match, 'source': 'body'}
|
|
82
|
-
error['match'] = url
|
|
83
|
-
items.append(error)
|
|
53
|
+
def on_json_loaded(self, item):
|
|
54
|
+
url_item = {k: v for k, v in item.items() if k != 'matches'}
|
|
55
|
+
yield Url(**url_item)
|
|
56
|
+
url = url_item[URL]
|
|
57
|
+
matches = item.get('matches', {})
|
|
58
|
+
params = matches.get('parameters', [])
|
|
59
|
+
errors = matches.get('errors', [])
|
|
60
|
+
secrets = matches.get('secrets', [])
|
|
61
|
+
infos = matches.get('infos', [])
|
|
84
62
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
63
|
+
for param in params:
|
|
64
|
+
param_name = param['name']
|
|
65
|
+
for attack in param['attacks']:
|
|
66
|
+
extra_data = {'param': param_name, 'source': 'url'}
|
|
67
|
+
yield Tag(
|
|
68
|
+
name=f'{attack} param',
|
|
69
|
+
match=url,
|
|
70
|
+
extra_data=extra_data
|
|
71
|
+
)
|
|
90
72
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
info['extra_data'] = {'info': match, 'source': 'body'}
|
|
97
|
-
info['match'] = url
|
|
98
|
-
items.append(info)
|
|
73
|
+
for error in errors:
|
|
74
|
+
match = error['match']
|
|
75
|
+
error['extra_data'] = {'error': match, 'source': 'body'}
|
|
76
|
+
error['match'] = url
|
|
77
|
+
yield Tag(**error)
|
|
99
78
|
|
|
100
|
-
|
|
101
|
-
|
|
79
|
+
for secret in secrets:
|
|
80
|
+
match = secret['match']
|
|
81
|
+
secret['extra_data'] = {'secret': match, 'source': 'body'}
|
|
82
|
+
secret['match'] = url
|
|
83
|
+
yield Tag(**secret)
|
|
102
84
|
|
|
103
|
-
|
|
85
|
+
for info in infos:
|
|
86
|
+
CARIDDI_IGNORE_LIST = ['BTC address'] # TODO: make this a config option
|
|
87
|
+
if info['name'] in CARIDDI_IGNORE_LIST:
|
|
88
|
+
continue
|
|
89
|
+
match = info['match']
|
|
90
|
+
info['extra_data'] = {'info': match, 'source': 'body'}
|
|
91
|
+
info['match'] = url
|
|
92
|
+
yield Tag(**info)
|
secator/tasks/dalfox.py
CHANGED
|
@@ -7,6 +7,7 @@ from secator.definitions import (CONFIDENCE, DELAY, EXTRA_DATA, FOLLOW_REDIRECT,
|
|
|
7
7
|
SEVERITY, TAGS, THREADS, TIMEOUT, URL,
|
|
8
8
|
USER_AGENT)
|
|
9
9
|
from secator.output_types import Vulnerability
|
|
10
|
+
from secator.serializers import JSONSerializer
|
|
10
11
|
from secator.tasks._categories import VulnHttp
|
|
11
12
|
|
|
12
13
|
DALFOX_TYPE_MAP = {
|
|
@@ -23,6 +24,7 @@ class dalfox(VulnHttp):
|
|
|
23
24
|
input_type = URL
|
|
24
25
|
input_flag = 'url'
|
|
25
26
|
file_flag = 'file'
|
|
27
|
+
# input_chunk_size = 1
|
|
26
28
|
json_flag = '--format json'
|
|
27
29
|
version_flag = 'version'
|
|
28
30
|
opt_prefix = '--'
|
|
@@ -37,6 +39,7 @@ class dalfox(VulnHttp):
|
|
|
37
39
|
TIMEOUT: 'timeout',
|
|
38
40
|
USER_AGENT: 'user-agent'
|
|
39
41
|
}
|
|
42
|
+
item_loaders = [JSONSerializer()]
|
|
40
43
|
output_map = {
|
|
41
44
|
Vulnerability: {
|
|
42
45
|
ID: lambda x: None,
|
secator/tasks/dirsearch.py
CHANGED
|
@@ -10,7 +10,7 @@ from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, DELAY, DEPTH,
|
|
|
10
10
|
MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, OUTPUT_PATH, PROXY,
|
|
11
11
|
RATE_LIMIT, RETRIES, STATUS_CODE,
|
|
12
12
|
THREADS, TIMEOUT, USER_AGENT, WORDLIST)
|
|
13
|
-
from secator.output_types import Url
|
|
13
|
+
from secator.output_types import Url, Info, Error
|
|
14
14
|
from secator.tasks._categories import HttpFuzzer
|
|
15
15
|
|
|
16
16
|
|
|
@@ -58,31 +58,20 @@ class dirsearch(HttpFuzzer):
|
|
|
58
58
|
proxy_http = True
|
|
59
59
|
profile = 'io'
|
|
60
60
|
|
|
61
|
-
def yielder(self):
|
|
62
|
-
prev = self.print_item_count
|
|
63
|
-
self.print_item_count = False
|
|
64
|
-
list(super().yielder())
|
|
65
|
-
if self.return_code != 0:
|
|
66
|
-
return
|
|
67
|
-
self.results = []
|
|
68
|
-
if not self.output_json:
|
|
69
|
-
return
|
|
70
|
-
note = f'dirsearch JSON results saved to {self.output_path}'
|
|
71
|
-
if self.print_line:
|
|
72
|
-
self._print(note)
|
|
73
|
-
if os.path.exists(self.output_path):
|
|
74
|
-
with open(self.output_path, 'r') as f:
|
|
75
|
-
results = yaml.safe_load(f.read()).get('results', [])
|
|
76
|
-
for item in results:
|
|
77
|
-
item = self._process_item(item)
|
|
78
|
-
if not item:
|
|
79
|
-
continue
|
|
80
|
-
yield item
|
|
81
|
-
self.print_item_count = prev
|
|
82
|
-
|
|
83
61
|
@staticmethod
|
|
84
62
|
def on_init(self):
|
|
85
63
|
self.output_path = self.get_opt_value(OUTPUT_PATH)
|
|
86
64
|
if not self.output_path:
|
|
87
65
|
self.output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
|
|
88
66
|
self.cmd += f' -o {self.output_path}'
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def on_cmd_done(self):
|
|
70
|
+
if not os.path.exists(self.output_path):
|
|
71
|
+
yield Error(message=f'Could not find JSON results in {self.output_path}')
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
yield Info(message=f'JSON results saved to {self.output_path}')
|
|
75
|
+
with open(self.output_path, 'r') as f:
|
|
76
|
+
results = yaml.safe_load(f.read()).get('results', [])
|
|
77
|
+
yield from results
|
secator/tasks/dnsx.py
CHANGED
|
@@ -1,18 +1,20 @@
|
|
|
1
1
|
from secator.decorators import task
|
|
2
2
|
from secator.definitions import (OPT_PIPE_INPUT, RATE_LIMIT, RETRIES, THREADS)
|
|
3
|
-
from secator.output_types import Record
|
|
3
|
+
from secator.output_types import Record, Ip, Subdomain
|
|
4
|
+
from secator.output_types.ip import IpProtocol
|
|
4
5
|
from secator.tasks._categories import ReconDns
|
|
5
|
-
import
|
|
6
|
+
from secator.serializers import JSONSerializer
|
|
7
|
+
from secator.utils import extract_domain_info
|
|
6
8
|
|
|
7
9
|
|
|
8
10
|
@task()
|
|
9
11
|
class dnsx(ReconDns):
|
|
10
12
|
"""dnsx is a fast and multi-purpose DNS toolkit designed for running various retryabledns library."""
|
|
11
|
-
cmd = 'dnsx -resp -
|
|
13
|
+
cmd = 'dnsx -resp -recon'
|
|
12
14
|
json_flag = '-json'
|
|
13
15
|
input_flag = OPT_PIPE_INPUT
|
|
14
16
|
file_flag = OPT_PIPE_INPUT
|
|
15
|
-
output_types = [Record]
|
|
17
|
+
output_types = [Record, Ip, Subdomain]
|
|
16
18
|
opt_key_map = {
|
|
17
19
|
RATE_LIMIT: 'rate-limit',
|
|
18
20
|
RETRIES: 'retry',
|
|
@@ -23,35 +25,52 @@ class dnsx(ReconDns):
|
|
|
23
25
|
'resolver': {'type': str, 'short': 'r', 'help': 'List of resolvers to use (file or comma separated)'},
|
|
24
26
|
'wildcard_domain': {'type': str, 'short': 'wd', 'help': 'Domain name for wildcard filtering'},
|
|
25
27
|
}
|
|
26
|
-
|
|
28
|
+
item_loaders = [JSONSerializer()]
|
|
27
29
|
install_cmd = 'go install -v github.com/projectdiscovery/dnsx/cmd/dnsx@latest'
|
|
28
30
|
install_github_handle = 'projectdiscovery/dnsx'
|
|
29
31
|
profile = 'io'
|
|
30
32
|
|
|
31
33
|
@staticmethod
|
|
32
|
-
def
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
for _type in record_types:
|
|
41
|
-
values = item.get(_type, [])
|
|
42
|
-
for value in values:
|
|
43
|
-
name = value
|
|
44
|
-
extra_data = {}
|
|
45
|
-
if isinstance(value, dict):
|
|
46
|
-
name = value['name']
|
|
47
|
-
extra_data = {k: v for k, v in value.items() if k != 'name'}
|
|
48
|
-
items.append({
|
|
49
|
-
'host': host,
|
|
50
|
-
'name': name,
|
|
51
|
-
'type': _type.upper(),
|
|
52
|
-
'extra_data': extra_data
|
|
53
|
-
})
|
|
54
|
-
except json.decoder.JSONDecodeError:
|
|
55
|
-
pass
|
|
34
|
+
def on_json_loaded(self, item):
|
|
35
|
+
# Show full DNS response
|
|
36
|
+
quiet = self.get_opt_value('quiet')
|
|
37
|
+
if not quiet:
|
|
38
|
+
all = item['all']
|
|
39
|
+
for line in all:
|
|
40
|
+
yield line
|
|
41
|
+
yield '\n'
|
|
56
42
|
|
|
57
|
-
|
|
43
|
+
# Loop through record types and yield records
|
|
44
|
+
record_types = ['a', 'aaaa', 'cname', 'mx', 'ns', 'txt', 'srv', 'ptr', 'soa', 'axfr', 'caa']
|
|
45
|
+
host = item['host']
|
|
46
|
+
for _type in record_types:
|
|
47
|
+
values = item.get(_type, [])
|
|
48
|
+
for value in values:
|
|
49
|
+
name = value
|
|
50
|
+
extra_data = {}
|
|
51
|
+
if isinstance(value, dict):
|
|
52
|
+
name = value['name']
|
|
53
|
+
extra_data = {k: v for k, v in value.items() if k != 'name'}
|
|
54
|
+
if _type == 'a':
|
|
55
|
+
yield Ip(
|
|
56
|
+
host=host,
|
|
57
|
+
ip=name,
|
|
58
|
+
protocol=IpProtocol.IPv4
|
|
59
|
+
)
|
|
60
|
+
elif _type == 'aaaa':
|
|
61
|
+
yield Ip(
|
|
62
|
+
host=host,
|
|
63
|
+
ip=name,
|
|
64
|
+
protocol=IpProtocol.IPv6
|
|
65
|
+
)
|
|
66
|
+
elif _type == 'ptr':
|
|
67
|
+
yield Subdomain(
|
|
68
|
+
host=name,
|
|
69
|
+
domain=extract_domain_info(name, domain_only=True)
|
|
70
|
+
)
|
|
71
|
+
yield Record(
|
|
72
|
+
host=host,
|
|
73
|
+
name=name,
|
|
74
|
+
type=_type.upper(),
|
|
75
|
+
extra_data=extra_data
|
|
76
|
+
)
|