secator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/__init__.py +0 -0
- secator/celery.py +482 -0
- secator/cli.py +617 -0
- secator/config.py +137 -0
- secator/configs/__init__.py +0 -0
- secator/configs/profiles/__init__.py +0 -0
- secator/configs/profiles/aggressive.yaml +7 -0
- secator/configs/profiles/default.yaml +9 -0
- secator/configs/profiles/stealth.yaml +7 -0
- secator/configs/scans/__init__.py +0 -0
- secator/configs/scans/domain.yaml +18 -0
- secator/configs/scans/host.yaml +14 -0
- secator/configs/scans/network.yaml +17 -0
- secator/configs/scans/subdomain.yaml +8 -0
- secator/configs/scans/url.yaml +12 -0
- secator/configs/workflows/__init__.py +0 -0
- secator/configs/workflows/cidr_recon.yaml +28 -0
- secator/configs/workflows/code_scan.yaml +11 -0
- secator/configs/workflows/host_recon.yaml +41 -0
- secator/configs/workflows/port_scan.yaml +34 -0
- secator/configs/workflows/subdomain_recon.yaml +33 -0
- secator/configs/workflows/url_crawl.yaml +29 -0
- secator/configs/workflows/url_dirsearch.yaml +29 -0
- secator/configs/workflows/url_fuzz.yaml +35 -0
- secator/configs/workflows/url_nuclei.yaml +11 -0
- secator/configs/workflows/url_vuln.yaml +55 -0
- secator/configs/workflows/user_hunt.yaml +10 -0
- secator/configs/workflows/wordpress.yaml +14 -0
- secator/decorators.py +309 -0
- secator/definitions.py +165 -0
- secator/exporters/__init__.py +12 -0
- secator/exporters/_base.py +3 -0
- secator/exporters/csv.py +30 -0
- secator/exporters/gdrive.py +118 -0
- secator/exporters/json.py +15 -0
- secator/exporters/table.py +7 -0
- secator/exporters/txt.py +25 -0
- secator/hooks/__init__.py +0 -0
- secator/hooks/mongodb.py +212 -0
- secator/output_types/__init__.py +24 -0
- secator/output_types/_base.py +95 -0
- secator/output_types/exploit.py +50 -0
- secator/output_types/ip.py +33 -0
- secator/output_types/port.py +45 -0
- secator/output_types/progress.py +35 -0
- secator/output_types/record.py +34 -0
- secator/output_types/subdomain.py +42 -0
- secator/output_types/tag.py +46 -0
- secator/output_types/target.py +30 -0
- secator/output_types/url.py +76 -0
- secator/output_types/user_account.py +41 -0
- secator/output_types/vulnerability.py +97 -0
- secator/report.py +107 -0
- secator/rich.py +124 -0
- secator/runners/__init__.py +12 -0
- secator/runners/_base.py +833 -0
- secator/runners/_helpers.py +153 -0
- secator/runners/command.py +638 -0
- secator/runners/scan.py +65 -0
- secator/runners/task.py +106 -0
- secator/runners/workflow.py +135 -0
- secator/serializers/__init__.py +8 -0
- secator/serializers/dataclass.py +33 -0
- secator/serializers/json.py +15 -0
- secator/serializers/regex.py +17 -0
- secator/tasks/__init__.py +10 -0
- secator/tasks/_categories.py +304 -0
- secator/tasks/cariddi.py +102 -0
- secator/tasks/dalfox.py +65 -0
- secator/tasks/dirsearch.py +90 -0
- secator/tasks/dnsx.py +56 -0
- secator/tasks/dnsxbrute.py +34 -0
- secator/tasks/feroxbuster.py +91 -0
- secator/tasks/ffuf.py +86 -0
- secator/tasks/fping.py +44 -0
- secator/tasks/gau.py +47 -0
- secator/tasks/gf.py +33 -0
- secator/tasks/gospider.py +71 -0
- secator/tasks/grype.py +79 -0
- secator/tasks/h8mail.py +81 -0
- secator/tasks/httpx.py +99 -0
- secator/tasks/katana.py +133 -0
- secator/tasks/maigret.py +78 -0
- secator/tasks/mapcidr.py +32 -0
- secator/tasks/msfconsole.py +174 -0
- secator/tasks/naabu.py +52 -0
- secator/tasks/nmap.py +344 -0
- secator/tasks/nuclei.py +97 -0
- secator/tasks/searchsploit.py +52 -0
- secator/tasks/subfinder.py +40 -0
- secator/tasks/wpscan.py +179 -0
- secator/utils.py +445 -0
- secator/utils_test.py +183 -0
- secator-0.0.1.dist-info/LICENSE +60 -0
- secator-0.0.1.dist-info/METADATA +199 -0
- secator-0.0.1.dist-info/RECORD +114 -0
- secator-0.0.1.dist-info/WHEEL +5 -0
- secator-0.0.1.dist-info/entry_points.txt +2 -0
- secator-0.0.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/integration/__init__.py +0 -0
- tests/integration/inputs.py +42 -0
- tests/integration/outputs.py +392 -0
- tests/integration/test_scans.py +82 -0
- tests/integration/test_tasks.py +103 -0
- tests/integration/test_workflows.py +163 -0
- tests/performance/__init__.py +0 -0
- tests/performance/loadtester.py +56 -0
- tests/unit/__init__.py +0 -0
- tests/unit/test_celery.py +39 -0
- tests/unit/test_scans.py +0 -0
- tests/unit/test_serializers.py +51 -0
- tests/unit/test_tasks.py +348 -0
- tests/unit/test_workflows.py +96 -0
secator/config.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import glob
|
|
2
|
+
import os
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
from dotmap import DotMap
|
|
7
|
+
|
|
8
|
+
from secator.rich import console
|
|
9
|
+
from secator.definitions import CONFIGS_FOLDER, EXTRA_CONFIGS_FOLDER
|
|
10
|
+
|
|
11
|
+
CONFIGS_DIR_KEYS = ['workflow', 'scan', 'profile']
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def load_config(name):
|
|
15
|
+
"""Load a config by name.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
name: Name of the config, for instances profiles/aggressive or workflows/domain_scan.
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
dict: Loaded config.
|
|
22
|
+
"""
|
|
23
|
+
path = Path(CONFIGS_FOLDER) / f'{name}.yaml'
|
|
24
|
+
if not path.exists():
|
|
25
|
+
console.log(f'Config "{name}" could not be loaded.')
|
|
26
|
+
return
|
|
27
|
+
with path.open('r') as f:
|
|
28
|
+
return yaml.load(f.read(), Loader=yaml.Loader)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def find_configs():
|
|
32
|
+
results = {'scan': [], 'workflow': [], 'profile': []}
|
|
33
|
+
dirs_type = [CONFIGS_FOLDER]
|
|
34
|
+
if EXTRA_CONFIGS_FOLDER:
|
|
35
|
+
dirs_type.append(EXTRA_CONFIGS_FOLDER)
|
|
36
|
+
paths = []
|
|
37
|
+
for dir in dirs_type:
|
|
38
|
+
dir_paths = [
|
|
39
|
+
os.path.abspath(path)
|
|
40
|
+
for path in glob.glob(dir.rstrip('/') + '/**/*.y*ml', recursive=True)
|
|
41
|
+
]
|
|
42
|
+
paths.extend(dir_paths)
|
|
43
|
+
for path in paths:
|
|
44
|
+
with open(path, 'r') as f:
|
|
45
|
+
try:
|
|
46
|
+
config = yaml.load(f.read(), yaml.Loader)
|
|
47
|
+
type = config.get('type')
|
|
48
|
+
if type:
|
|
49
|
+
results[type].append(path)
|
|
50
|
+
except yaml.YAMLError as exc:
|
|
51
|
+
console.log(f'Unable to load config at {path}')
|
|
52
|
+
console.log(str(exc))
|
|
53
|
+
return results
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class ConfigLoader(DotMap):
|
|
57
|
+
|
|
58
|
+
def __init__(self, input={}, name=None, **kwargs):
|
|
59
|
+
if name:
|
|
60
|
+
name = name.replace('-', '_') # so that workflows have a nice '-' in CLI
|
|
61
|
+
config = self._load_from_name(name)
|
|
62
|
+
elif isinstance(input, str):
|
|
63
|
+
config = self._load_from_file(input)
|
|
64
|
+
else:
|
|
65
|
+
config = input
|
|
66
|
+
super().__init__(config)
|
|
67
|
+
|
|
68
|
+
def _load_from_file(self, path):
|
|
69
|
+
if not os.path.exists(path):
|
|
70
|
+
console.log(f'Config path {path} does not exists', style='bold red')
|
|
71
|
+
return
|
|
72
|
+
if path and os.path.exists(path):
|
|
73
|
+
with open(path, 'r') as f:
|
|
74
|
+
return yaml.load(f.read(), Loader=yaml.Loader)
|
|
75
|
+
|
|
76
|
+
def _load_from_name(self, name):
|
|
77
|
+
return load_config(name)
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def load_all(cls):
|
|
81
|
+
configs = find_configs()
|
|
82
|
+
return ConfigLoader({
|
|
83
|
+
key: [ConfigLoader(path) for path in configs[key]]
|
|
84
|
+
for key in CONFIGS_DIR_KEYS
|
|
85
|
+
})
|
|
86
|
+
|
|
87
|
+
def get_tasks_class(self):
|
|
88
|
+
from secator.runners import Task
|
|
89
|
+
tasks = []
|
|
90
|
+
for name, conf in self.tasks.items():
|
|
91
|
+
if name == '_group':
|
|
92
|
+
group_conf = ConfigLoader(input={'tasks': conf})
|
|
93
|
+
tasks.extend(group_conf.get_tasks_class())
|
|
94
|
+
else:
|
|
95
|
+
tasks.append(Task.get_task_class(name))
|
|
96
|
+
return tasks
|
|
97
|
+
|
|
98
|
+
def get_workflows(self):
|
|
99
|
+
return [ConfigLoader(name=f'workflows/{name}') for name, _ in self.workflows.items()]
|
|
100
|
+
|
|
101
|
+
def get_workflow_supported_opts(self):
|
|
102
|
+
opts = {}
|
|
103
|
+
tasks = self.get_tasks_class()
|
|
104
|
+
for task_cls in tasks:
|
|
105
|
+
task_opts = task_cls.get_supported_opts()
|
|
106
|
+
for name, conf in task_opts.items():
|
|
107
|
+
supported = opts.get(name, {}).get('supported', False)
|
|
108
|
+
opts[name] = conf
|
|
109
|
+
opts[name]['supported'] = conf['supported'] or supported
|
|
110
|
+
return opts
|
|
111
|
+
|
|
112
|
+
def get_scan_supported_opts(self):
|
|
113
|
+
opts = {}
|
|
114
|
+
workflows = self.get_workflows()
|
|
115
|
+
for workflow in workflows:
|
|
116
|
+
workflow_opts = workflow.get_workflow_supported_opts()
|
|
117
|
+
for name, conf in workflow_opts.items():
|
|
118
|
+
supported = opts.get(name, {}).get('supported', False)
|
|
119
|
+
opts[name] = conf
|
|
120
|
+
opts[name]['supported'] = conf['supported'] or supported
|
|
121
|
+
return opts
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def supported_opts(self):
|
|
125
|
+
return self.get_supported_opts()
|
|
126
|
+
|
|
127
|
+
def get_supported_opts(self):
|
|
128
|
+
opts = {}
|
|
129
|
+
if self.type == 'workflow':
|
|
130
|
+
opts = self.get_workflow_supported_opts()
|
|
131
|
+
elif self.type == 'scan':
|
|
132
|
+
opts = self.get_scan_supported_opts()
|
|
133
|
+
elif self.type == 'task':
|
|
134
|
+
tasks = self.get_tasks_class()
|
|
135
|
+
if tasks:
|
|
136
|
+
opts = tasks[0].get_supported_opts()
|
|
137
|
+
return dict(sorted(opts.items()))
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
type: scan
|
|
2
|
+
name: domain
|
|
3
|
+
description: Domain scan
|
|
4
|
+
profile: default
|
|
5
|
+
input_types:
|
|
6
|
+
- host
|
|
7
|
+
workflows:
|
|
8
|
+
subdomain_recon:
|
|
9
|
+
host_recon:
|
|
10
|
+
targets_:
|
|
11
|
+
- target.name
|
|
12
|
+
- subdomain.host
|
|
13
|
+
url_crawl:
|
|
14
|
+
targets_:
|
|
15
|
+
- url.url
|
|
16
|
+
url_vuln:
|
|
17
|
+
targets_:
|
|
18
|
+
- url.url
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
type: scan
|
|
2
|
+
name: network
|
|
3
|
+
description: Internal network scan
|
|
4
|
+
profile: default
|
|
5
|
+
input_types:
|
|
6
|
+
- cidr_range
|
|
7
|
+
workflows:
|
|
8
|
+
cidr_recon:
|
|
9
|
+
url_nuclei:
|
|
10
|
+
targets_:
|
|
11
|
+
- url.url
|
|
12
|
+
url_crawl:
|
|
13
|
+
targets_:
|
|
14
|
+
- url.url
|
|
15
|
+
url_vuln:
|
|
16
|
+
targets_:
|
|
17
|
+
- url.url
|
|
File without changes
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: cidr_recon
|
|
3
|
+
alias: cidrrec
|
|
4
|
+
description: Local network recon
|
|
5
|
+
tags: [recon, cidr, network]
|
|
6
|
+
input_types:
|
|
7
|
+
- cidr_range
|
|
8
|
+
tasks:
|
|
9
|
+
mapcidr:
|
|
10
|
+
description: Find CIDR range IPs
|
|
11
|
+
fping:
|
|
12
|
+
description: Check for alive IPs
|
|
13
|
+
targets_: ip.ip
|
|
14
|
+
naabu:
|
|
15
|
+
description: Scan alive IPs' ports
|
|
16
|
+
targets_:
|
|
17
|
+
- type: ip
|
|
18
|
+
field: ip
|
|
19
|
+
condition: item.alive
|
|
20
|
+
httpx:
|
|
21
|
+
description: Probe HTTP services on open ports
|
|
22
|
+
targets_:
|
|
23
|
+
- type: port
|
|
24
|
+
field: '{ip}:{port}'
|
|
25
|
+
results:
|
|
26
|
+
- type: ip
|
|
27
|
+
condition: item.alive
|
|
28
|
+
- type: url
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: host_recon
|
|
3
|
+
alias: hostrec
|
|
4
|
+
description: Host recon
|
|
5
|
+
tags: [recon, network, http]
|
|
6
|
+
input_types:
|
|
7
|
+
- host
|
|
8
|
+
- cidr_range
|
|
9
|
+
tasks:
|
|
10
|
+
naabu:
|
|
11
|
+
description: Find open ports
|
|
12
|
+
nmap:
|
|
13
|
+
description: Search for vulnerabilities on open ports
|
|
14
|
+
targets_: port.host
|
|
15
|
+
ports_: port.port
|
|
16
|
+
httpx:
|
|
17
|
+
description: Probe HTTP services on open ports
|
|
18
|
+
targets_:
|
|
19
|
+
- type: port
|
|
20
|
+
field: '{host}:{port}'
|
|
21
|
+
condition: item._source == 'nmap'
|
|
22
|
+
_group:
|
|
23
|
+
nuclei/network:
|
|
24
|
+
description: Scan network and SSL vulnerabilities
|
|
25
|
+
tags: [network, ssl]
|
|
26
|
+
nuclei/url:
|
|
27
|
+
description: Search for vulnerabilities on alive HTTP services
|
|
28
|
+
exclude_tags: [network, ssl, file, dns, osint, token-spray, headers]
|
|
29
|
+
targets_:
|
|
30
|
+
- type: url
|
|
31
|
+
field: url
|
|
32
|
+
condition: item.status_code != 0
|
|
33
|
+
results:
|
|
34
|
+
- type: port
|
|
35
|
+
condition: item._source == 'nmap'
|
|
36
|
+
|
|
37
|
+
- type: vulnerability
|
|
38
|
+
# condition: item.confidence == 'high'
|
|
39
|
+
|
|
40
|
+
- type: url
|
|
41
|
+
condition: item.status_code != 0
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: port_scan
|
|
3
|
+
alias: pscan
|
|
4
|
+
description: Port scan
|
|
5
|
+
tags: [recon, network, http, vuln]
|
|
6
|
+
input_types:
|
|
7
|
+
- host
|
|
8
|
+
tasks:
|
|
9
|
+
naabu:
|
|
10
|
+
description: Find open ports
|
|
11
|
+
nmap:
|
|
12
|
+
description: Search for vulnerabilities on open ports
|
|
13
|
+
targets_: port.host
|
|
14
|
+
ports_: port.port
|
|
15
|
+
_group:
|
|
16
|
+
searchsploit:
|
|
17
|
+
description: Search for related exploits
|
|
18
|
+
targets_:
|
|
19
|
+
- type: port
|
|
20
|
+
field: '{host}~{service_name}'
|
|
21
|
+
condition: item._source == 'nmap' and len(item.service_name.split('/')) > 1
|
|
22
|
+
httpx:
|
|
23
|
+
description: Probe HTTP services on open ports
|
|
24
|
+
targets_:
|
|
25
|
+
- type: port
|
|
26
|
+
field: '{host}:{port}'
|
|
27
|
+
condition: item._source == 'nmap'
|
|
28
|
+
results:
|
|
29
|
+
- type: port
|
|
30
|
+
|
|
31
|
+
- type: url
|
|
32
|
+
condition: item.status_code != 0
|
|
33
|
+
|
|
34
|
+
- type: vulnerability
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: subdomain_recon
|
|
3
|
+
alias: subrec
|
|
4
|
+
description: Subdomain discovery
|
|
5
|
+
tags: [recon, dns, takeovers]
|
|
6
|
+
input_types:
|
|
7
|
+
- host
|
|
8
|
+
tasks:
|
|
9
|
+
subfinder:
|
|
10
|
+
description: List subdomains (passive)
|
|
11
|
+
# TODO: add subdomain bruteforcers
|
|
12
|
+
# gobuster:
|
|
13
|
+
# input: vhost
|
|
14
|
+
# domain_:
|
|
15
|
+
# - target.name
|
|
16
|
+
# wordlist: /usr/share/seclists/Discovery/DNS/combined_subdomains.txt
|
|
17
|
+
# gobuster:
|
|
18
|
+
# input: dns
|
|
19
|
+
# domain_:
|
|
20
|
+
# - target.name
|
|
21
|
+
# wordlist: /usr/share/seclists/Discovery/DNS/combined_subdomains.txt
|
|
22
|
+
_group:
|
|
23
|
+
nuclei:
|
|
24
|
+
description: Check for subdomain takeovers
|
|
25
|
+
targets_:
|
|
26
|
+
- target.name
|
|
27
|
+
- subdomain.host
|
|
28
|
+
tags: [takeover, dns]
|
|
29
|
+
httpx:
|
|
30
|
+
description: Run HTTP probes on subdomains
|
|
31
|
+
targets_:
|
|
32
|
+
- target.name
|
|
33
|
+
- subdomain.host
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: url_crawl
|
|
3
|
+
alias: urlcrawl
|
|
4
|
+
description: URL crawl (fast)
|
|
5
|
+
tags: [http, crawl]
|
|
6
|
+
options:
|
|
7
|
+
match_codes: 200,204,301,302,307,401,403,405,500
|
|
8
|
+
input_types:
|
|
9
|
+
- url
|
|
10
|
+
tasks:
|
|
11
|
+
_group:
|
|
12
|
+
# gau:
|
|
13
|
+
# description: Search for passive URLs
|
|
14
|
+
# gospider:
|
|
15
|
+
# description: Crawl URLs
|
|
16
|
+
cariddi:
|
|
17
|
+
description: Hunt URLs patterns
|
|
18
|
+
katana:
|
|
19
|
+
description: Crawl URLs
|
|
20
|
+
httpx:
|
|
21
|
+
description: Run HTTP probes on crawled URLs
|
|
22
|
+
targets_:
|
|
23
|
+
type: url
|
|
24
|
+
field: url
|
|
25
|
+
results:
|
|
26
|
+
- type: url
|
|
27
|
+
condition: item._source == 'httpx'
|
|
28
|
+
|
|
29
|
+
- type: tag
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: url_dirsearch
|
|
3
|
+
alias: dirfind
|
|
4
|
+
description: URL directory search
|
|
5
|
+
tags: [http, dir]
|
|
6
|
+
input_types:
|
|
7
|
+
- url
|
|
8
|
+
tasks:
|
|
9
|
+
ffuf:
|
|
10
|
+
description: Search for HTTP directories
|
|
11
|
+
wordlist: /usr/share/seclists/Discovery/Web-Content/directory-list-2.3-small.txt
|
|
12
|
+
targets_:
|
|
13
|
+
- type: target
|
|
14
|
+
field: '{name}/FUZZ'
|
|
15
|
+
cariddi:
|
|
16
|
+
description: Crawl HTTP directories for content
|
|
17
|
+
targets_:
|
|
18
|
+
- target.name
|
|
19
|
+
- url.url
|
|
20
|
+
httpx:
|
|
21
|
+
description: Run HTTP probes on crawled URLs
|
|
22
|
+
follow_redirects: True
|
|
23
|
+
targets_:
|
|
24
|
+
- type: url
|
|
25
|
+
field: url
|
|
26
|
+
condition: item.status_code == 0
|
|
27
|
+
results:
|
|
28
|
+
- type: url
|
|
29
|
+
condition: item.status_code != 0
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: url_fuzz
|
|
3
|
+
alias: urlfuzz
|
|
4
|
+
description: URL fuzz (slow)
|
|
5
|
+
tags: [http, fuzz]
|
|
6
|
+
input_types:
|
|
7
|
+
- url
|
|
8
|
+
# options:
|
|
9
|
+
# match_codes: 200,204,301,302,307,401,403,405,500
|
|
10
|
+
tasks:
|
|
11
|
+
_group:
|
|
12
|
+
# dirsearch:
|
|
13
|
+
# description: Fuzz URLs
|
|
14
|
+
# feroxbuster:
|
|
15
|
+
# description: Fuzz URLs
|
|
16
|
+
ffuf:
|
|
17
|
+
description: Fuzz URLs
|
|
18
|
+
targets_:
|
|
19
|
+
- type: target
|
|
20
|
+
field: '{name}/FUZZ'
|
|
21
|
+
httpx:
|
|
22
|
+
description: Run HTTP probes on crawled URLs
|
|
23
|
+
targets_:
|
|
24
|
+
type: url
|
|
25
|
+
field: url
|
|
26
|
+
katana:
|
|
27
|
+
description: Run crawler on found directories
|
|
28
|
+
targets_:
|
|
29
|
+
type: url
|
|
30
|
+
field: url
|
|
31
|
+
condition: "'Index of' in item.title"
|
|
32
|
+
results:
|
|
33
|
+
- type: url
|
|
34
|
+
condition: item._source == 'httpx'
|
|
35
|
+
# TODO: add deduplication based on the 'url' field
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: url_nuclei
|
|
3
|
+
alias: url_nuclei
|
|
4
|
+
description: URL vulnerability scan (nuclei)
|
|
5
|
+
tags: [http, nuclei]
|
|
6
|
+
input_types:
|
|
7
|
+
- url
|
|
8
|
+
tasks:
|
|
9
|
+
nuclei:
|
|
10
|
+
description: Search for HTTP vulns
|
|
11
|
+
exclude_tags: [network, ssl, file, dns, osint, token-spray, headers]
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: url_vuln
|
|
3
|
+
alias: url_vuln
|
|
4
|
+
description: URL vulnerability scan (gf, dalfox)
|
|
5
|
+
tags: [http, vulnerability]
|
|
6
|
+
input_types:
|
|
7
|
+
- url
|
|
8
|
+
tasks:
|
|
9
|
+
_group:
|
|
10
|
+
gf/xss:
|
|
11
|
+
description: Hunt XSS params
|
|
12
|
+
pattern: xss
|
|
13
|
+
gf/lfi:
|
|
14
|
+
description: Hunt LFI params
|
|
15
|
+
pattern: lfi
|
|
16
|
+
gf/ssrf:
|
|
17
|
+
description: Hunt SSRF params
|
|
18
|
+
pattern: ssrf
|
|
19
|
+
gf/rce:
|
|
20
|
+
description: Hunt RCE params
|
|
21
|
+
pattern: rce
|
|
22
|
+
gf/interestingparams:
|
|
23
|
+
description: Hunt interest params
|
|
24
|
+
pattern: interestingparams
|
|
25
|
+
gf/idor:
|
|
26
|
+
description: Hunt Idor params
|
|
27
|
+
pattern: idor
|
|
28
|
+
gf/debug_logic:
|
|
29
|
+
description: Hunt debug params
|
|
30
|
+
pattern: debug_logic
|
|
31
|
+
|
|
32
|
+
dalfox:
|
|
33
|
+
description: Attack XSS vulnerabilities
|
|
34
|
+
targets_:
|
|
35
|
+
- type: tag
|
|
36
|
+
field: match
|
|
37
|
+
condition: item._source == "gf"
|
|
38
|
+
|
|
39
|
+
# TODO: Add support for SQLMap
|
|
40
|
+
# sqlmap:
|
|
41
|
+
# description: Attack SQLI vulnerabilities
|
|
42
|
+
# targets_:
|
|
43
|
+
# - type: tag
|
|
44
|
+
# field: match
|
|
45
|
+
# condition: item.name in ['sqli']
|
|
46
|
+
|
|
47
|
+
# TODO: Make this work, need transform functions to replace a parameter fetched dynamically by the keyword 'FUZZ'
|
|
48
|
+
# ffuf:
|
|
49
|
+
# description: Attack LFI vulnerabilities
|
|
50
|
+
# targets_:
|
|
51
|
+
# - type: tag
|
|
52
|
+
# field: match
|
|
53
|
+
# transform:
|
|
54
|
+
# qsreplace: FUZZ
|
|
55
|
+
# condition: item.name in ['lfi']
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
type: workflow
|
|
2
|
+
name: wordpress
|
|
3
|
+
alias: wordpress
|
|
4
|
+
description: Wordpress vulnerability scan
|
|
5
|
+
tags: [http, wordpress, vulnerability]
|
|
6
|
+
input_types:
|
|
7
|
+
- url
|
|
8
|
+
tasks:
|
|
9
|
+
_group:
|
|
10
|
+
nuclei:
|
|
11
|
+
description: Nuclei Wordpress scan
|
|
12
|
+
tags: wordpress
|
|
13
|
+
wpscan:
|
|
14
|
+
description: WPScan
|