secator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/__init__.py +0 -0
- secator/celery.py +482 -0
- secator/cli.py +617 -0
- secator/config.py +137 -0
- secator/configs/__init__.py +0 -0
- secator/configs/profiles/__init__.py +0 -0
- secator/configs/profiles/aggressive.yaml +7 -0
- secator/configs/profiles/default.yaml +9 -0
- secator/configs/profiles/stealth.yaml +7 -0
- secator/configs/scans/__init__.py +0 -0
- secator/configs/scans/domain.yaml +18 -0
- secator/configs/scans/host.yaml +14 -0
- secator/configs/scans/network.yaml +17 -0
- secator/configs/scans/subdomain.yaml +8 -0
- secator/configs/scans/url.yaml +12 -0
- secator/configs/workflows/__init__.py +0 -0
- secator/configs/workflows/cidr_recon.yaml +28 -0
- secator/configs/workflows/code_scan.yaml +11 -0
- secator/configs/workflows/host_recon.yaml +41 -0
- secator/configs/workflows/port_scan.yaml +34 -0
- secator/configs/workflows/subdomain_recon.yaml +33 -0
- secator/configs/workflows/url_crawl.yaml +29 -0
- secator/configs/workflows/url_dirsearch.yaml +29 -0
- secator/configs/workflows/url_fuzz.yaml +35 -0
- secator/configs/workflows/url_nuclei.yaml +11 -0
- secator/configs/workflows/url_vuln.yaml +55 -0
- secator/configs/workflows/user_hunt.yaml +10 -0
- secator/configs/workflows/wordpress.yaml +14 -0
- secator/decorators.py +309 -0
- secator/definitions.py +165 -0
- secator/exporters/__init__.py +12 -0
- secator/exporters/_base.py +3 -0
- secator/exporters/csv.py +30 -0
- secator/exporters/gdrive.py +118 -0
- secator/exporters/json.py +15 -0
- secator/exporters/table.py +7 -0
- secator/exporters/txt.py +25 -0
- secator/hooks/__init__.py +0 -0
- secator/hooks/mongodb.py +212 -0
- secator/output_types/__init__.py +24 -0
- secator/output_types/_base.py +95 -0
- secator/output_types/exploit.py +50 -0
- secator/output_types/ip.py +33 -0
- secator/output_types/port.py +45 -0
- secator/output_types/progress.py +35 -0
- secator/output_types/record.py +34 -0
- secator/output_types/subdomain.py +42 -0
- secator/output_types/tag.py +46 -0
- secator/output_types/target.py +30 -0
- secator/output_types/url.py +76 -0
- secator/output_types/user_account.py +41 -0
- secator/output_types/vulnerability.py +97 -0
- secator/report.py +107 -0
- secator/rich.py +124 -0
- secator/runners/__init__.py +12 -0
- secator/runners/_base.py +833 -0
- secator/runners/_helpers.py +153 -0
- secator/runners/command.py +638 -0
- secator/runners/scan.py +65 -0
- secator/runners/task.py +106 -0
- secator/runners/workflow.py +135 -0
- secator/serializers/__init__.py +8 -0
- secator/serializers/dataclass.py +33 -0
- secator/serializers/json.py +15 -0
- secator/serializers/regex.py +17 -0
- secator/tasks/__init__.py +10 -0
- secator/tasks/_categories.py +304 -0
- secator/tasks/cariddi.py +102 -0
- secator/tasks/dalfox.py +65 -0
- secator/tasks/dirsearch.py +90 -0
- secator/tasks/dnsx.py +56 -0
- secator/tasks/dnsxbrute.py +34 -0
- secator/tasks/feroxbuster.py +91 -0
- secator/tasks/ffuf.py +86 -0
- secator/tasks/fping.py +44 -0
- secator/tasks/gau.py +47 -0
- secator/tasks/gf.py +33 -0
- secator/tasks/gospider.py +71 -0
- secator/tasks/grype.py +79 -0
- secator/tasks/h8mail.py +81 -0
- secator/tasks/httpx.py +99 -0
- secator/tasks/katana.py +133 -0
- secator/tasks/maigret.py +78 -0
- secator/tasks/mapcidr.py +32 -0
- secator/tasks/msfconsole.py +174 -0
- secator/tasks/naabu.py +52 -0
- secator/tasks/nmap.py +344 -0
- secator/tasks/nuclei.py +97 -0
- secator/tasks/searchsploit.py +52 -0
- secator/tasks/subfinder.py +40 -0
- secator/tasks/wpscan.py +179 -0
- secator/utils.py +445 -0
- secator/utils_test.py +183 -0
- secator-0.0.1.dist-info/LICENSE +60 -0
- secator-0.0.1.dist-info/METADATA +199 -0
- secator-0.0.1.dist-info/RECORD +114 -0
- secator-0.0.1.dist-info/WHEEL +5 -0
- secator-0.0.1.dist-info/entry_points.txt +2 -0
- secator-0.0.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/integration/__init__.py +0 -0
- tests/integration/inputs.py +42 -0
- tests/integration/outputs.py +392 -0
- tests/integration/test_scans.py +82 -0
- tests/integration/test_tasks.py +103 -0
- tests/integration/test_workflows.py +163 -0
- tests/performance/__init__.py +0 -0
- tests/performance/loadtester.py +56 -0
- tests/unit/__init__.py +0 -0
- tests/unit/test_celery.py +39 -0
- tests/unit/test_scans.py +0 -0
- tests/unit/test_serializers.py +51 -0
- tests/unit/test_tasks.py +348 -0
- tests/unit/test_workflows.py +96 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from furl import furl
|
|
2
|
+
|
|
3
|
+
from secator.decorators import task
|
|
4
|
+
from secator.definitions import (CONTENT_LENGTH, DELAY, DEPTH, FILTER_CODES,
|
|
5
|
+
FILTER_REGEX, FILTER_SIZE, FILTER_WORDS,
|
|
6
|
+
FOLLOW_REDIRECT, HEADER, MATCH_CODES,
|
|
7
|
+
MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD,
|
|
8
|
+
OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
|
|
9
|
+
STATUS_CODE, THREADS, TIMEOUT, URL, USER_AGENT)
|
|
10
|
+
from secator.output_types import Url
|
|
11
|
+
from secator.tasks._categories import HttpCrawler
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@task()
|
|
15
|
+
class gospider(HttpCrawler):
|
|
16
|
+
"""Fast web spider written in Go."""
|
|
17
|
+
cmd = 'gospider --js'
|
|
18
|
+
file_flag = '-S'
|
|
19
|
+
input_flag = '-s'
|
|
20
|
+
json_flag = '--json'
|
|
21
|
+
opt_prefix = '--'
|
|
22
|
+
opt_key_map = {
|
|
23
|
+
HEADER: 'header',
|
|
24
|
+
DELAY: 'delay',
|
|
25
|
+
DEPTH: 'depth',
|
|
26
|
+
FILTER_CODES: OPT_NOT_SUPPORTED,
|
|
27
|
+
FILTER_REGEX: OPT_NOT_SUPPORTED,
|
|
28
|
+
FILTER_SIZE: OPT_NOT_SUPPORTED,
|
|
29
|
+
FILTER_WORDS: OPT_NOT_SUPPORTED,
|
|
30
|
+
FOLLOW_REDIRECT: 'no-redirect',
|
|
31
|
+
MATCH_CODES: OPT_NOT_SUPPORTED,
|
|
32
|
+
MATCH_REGEX: OPT_NOT_SUPPORTED,
|
|
33
|
+
MATCH_SIZE: OPT_NOT_SUPPORTED,
|
|
34
|
+
MATCH_WORDS: OPT_NOT_SUPPORTED,
|
|
35
|
+
METHOD: OPT_NOT_SUPPORTED,
|
|
36
|
+
PROXY: 'proxy',
|
|
37
|
+
RATE_LIMIT: OPT_NOT_SUPPORTED,
|
|
38
|
+
RETRIES: OPT_NOT_SUPPORTED,
|
|
39
|
+
THREADS: 'threads',
|
|
40
|
+
TIMEOUT: 'timeout',
|
|
41
|
+
USER_AGENT: 'user-agent',
|
|
42
|
+
}
|
|
43
|
+
opt_value_map = {
|
|
44
|
+
FOLLOW_REDIRECT: lambda x: not x,
|
|
45
|
+
DELAY: lambda x: round(x) if isinstance(x, float) else x
|
|
46
|
+
}
|
|
47
|
+
output_map = {
|
|
48
|
+
Url: {
|
|
49
|
+
URL: 'output',
|
|
50
|
+
STATUS_CODE: 'status',
|
|
51
|
+
CONTENT_LENGTH: 'length',
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
install_cmd = 'go install -v github.com/jaeles-project/gospider@latest'
|
|
55
|
+
ignore_return_code = True
|
|
56
|
+
proxychains = False
|
|
57
|
+
proxy_socks5 = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
|
|
58
|
+
proxy_http = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
|
|
59
|
+
profile = 'cpu'
|
|
60
|
+
|
|
61
|
+
@staticmethod
|
|
62
|
+
def validate_item(self, item):
|
|
63
|
+
"""Keep only items that match the same host."""
|
|
64
|
+
try:
|
|
65
|
+
netloc_in = furl(item['input']).netloc
|
|
66
|
+
netloc_out = furl(item['output']).netloc
|
|
67
|
+
if netloc_in != netloc_out:
|
|
68
|
+
return False
|
|
69
|
+
except ValueError: # gospider returns invalid URLs for output sometimes
|
|
70
|
+
return False
|
|
71
|
+
return True
|
secator/tasks/grype.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
|
|
2
|
+
from secator.decorators import task
|
|
3
|
+
from secator.definitions import (DELAY, FOLLOW_REDIRECT, HEADER,
|
|
4
|
+
OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
|
|
5
|
+
THREADS, TIMEOUT, USER_AGENT)
|
|
6
|
+
from secator.output_types import Vulnerability
|
|
7
|
+
from secator.tasks._categories import VulnCode
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def grype_item_loader(self, line):
|
|
11
|
+
"""Load vulnerabilty dicts from grype line output."""
|
|
12
|
+
split = [i for i in line.split(' ') if i]
|
|
13
|
+
if not len(split) in [5, 6] or split[0] == 'NAME':
|
|
14
|
+
return None
|
|
15
|
+
version_fixed = None
|
|
16
|
+
if len(split) == 5: # no version fixed
|
|
17
|
+
product, version, product_type, vuln_id, severity = tuple(split)
|
|
18
|
+
elif len(split) == 6:
|
|
19
|
+
product, version, version_fixed, product_type, vuln_id, severity = tuple(split)
|
|
20
|
+
extra_data = {
|
|
21
|
+
'lang': product_type,
|
|
22
|
+
'product': product,
|
|
23
|
+
'version': version,
|
|
24
|
+
}
|
|
25
|
+
if version_fixed:
|
|
26
|
+
extra_data['version_fixed'] = version_fixed
|
|
27
|
+
data = {
|
|
28
|
+
'id': vuln_id,
|
|
29
|
+
'name': vuln_id,
|
|
30
|
+
'matched_at': self.input,
|
|
31
|
+
'confidence': 'medium',
|
|
32
|
+
'severity': severity.lower(),
|
|
33
|
+
'provider': 'grype',
|
|
34
|
+
'cvss_score': -1,
|
|
35
|
+
'tags': [],
|
|
36
|
+
}
|
|
37
|
+
if vuln_id.startswith('GHSA'):
|
|
38
|
+
data['provider'] = 'github.com'
|
|
39
|
+
data['references'] = [f'https://github.com/advisories/{vuln_id}']
|
|
40
|
+
data['tags'].extend(['cve', 'ghsa'])
|
|
41
|
+
vuln = VulnCode.lookup_ghsa(vuln_id)
|
|
42
|
+
if vuln:
|
|
43
|
+
data.update(vuln)
|
|
44
|
+
data['severity'] = data['severity'] or severity.lower()
|
|
45
|
+
extra_data['ghsa_id'] = vuln_id
|
|
46
|
+
elif vuln_id.startswith('CVE'):
|
|
47
|
+
vuln = VulnCode.lookup_cve(vuln_id)
|
|
48
|
+
if vuln:
|
|
49
|
+
vuln['tags'].append('cve')
|
|
50
|
+
data.update(vuln)
|
|
51
|
+
data['severity'] = data['severity'] or severity.lower()
|
|
52
|
+
data['extra_data'] = extra_data
|
|
53
|
+
return data
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@task()
|
|
57
|
+
class grype(VulnCode):
|
|
58
|
+
"""Vulnerability scanner for container images and filesystems."""
|
|
59
|
+
cmd = 'grype --quiet'
|
|
60
|
+
input_flag = ''
|
|
61
|
+
file_flag = OPT_NOT_SUPPORTED
|
|
62
|
+
json_flag = None
|
|
63
|
+
opt_prefix = '--'
|
|
64
|
+
opt_key_map = {
|
|
65
|
+
HEADER: OPT_NOT_SUPPORTED,
|
|
66
|
+
DELAY: OPT_NOT_SUPPORTED,
|
|
67
|
+
FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
|
|
68
|
+
PROXY: OPT_NOT_SUPPORTED,
|
|
69
|
+
RATE_LIMIT: OPT_NOT_SUPPORTED,
|
|
70
|
+
RETRIES: OPT_NOT_SUPPORTED,
|
|
71
|
+
THREADS: OPT_NOT_SUPPORTED,
|
|
72
|
+
TIMEOUT: OPT_NOT_SUPPORTED,
|
|
73
|
+
USER_AGENT: OPT_NOT_SUPPORTED
|
|
74
|
+
}
|
|
75
|
+
output_types = [Vulnerability]
|
|
76
|
+
install_cmd = (
|
|
77
|
+
'curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sudo sh -s -- -b /usr/local/bin'
|
|
78
|
+
)
|
|
79
|
+
item_loaders = [grype_item_loader]
|
secator/tasks/h8mail.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
from secator.decorators import task
|
|
5
|
+
from secator.definitions import (EMAIL, DATA_FOLDER)
|
|
6
|
+
from secator.tasks._categories import OSInt
|
|
7
|
+
from secator.utils import get_file_timestamp
|
|
8
|
+
from secator.output_types import UserAccount
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@task()
|
|
12
|
+
class h8mail(OSInt):
|
|
13
|
+
"""Email information and password lookup tool."""
|
|
14
|
+
cmd = 'h8mail'
|
|
15
|
+
json_flag = '--json '
|
|
16
|
+
input_flag = '--targets'
|
|
17
|
+
input_type = EMAIL
|
|
18
|
+
file_flag = '-domain'
|
|
19
|
+
opt_prefix = '--'
|
|
20
|
+
opt_key_map = {
|
|
21
|
+
|
|
22
|
+
}
|
|
23
|
+
opts = {
|
|
24
|
+
'config': {'type': str, 'help': 'Configuration file for API keys'},
|
|
25
|
+
'local_breach': {'type': str, 'short': 'lb', 'help': 'Local breach file'}
|
|
26
|
+
}
|
|
27
|
+
output_map = {
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
install_cmd = 'pip3 install h8mail'
|
|
31
|
+
|
|
32
|
+
@staticmethod
|
|
33
|
+
def on_start(self):
|
|
34
|
+
output_path = self.get_opt_value('output_path')
|
|
35
|
+
if not output_path:
|
|
36
|
+
timestr = get_file_timestamp()
|
|
37
|
+
output_path = f'{DATA_FOLDER}/h8mail_{timestr}.json'
|
|
38
|
+
self.output_path = output_path
|
|
39
|
+
self.cmd = self.cmd.replace('--json', f'--json {self.output_path}')
|
|
40
|
+
|
|
41
|
+
def yielder(self):
|
|
42
|
+
prev = self.print_item_count
|
|
43
|
+
self.print_item_count = False
|
|
44
|
+
list(super().yielder())
|
|
45
|
+
if self.return_code != 0:
|
|
46
|
+
return
|
|
47
|
+
self.results = []
|
|
48
|
+
if os.path.exists(self.output_path):
|
|
49
|
+
with open(self.output_path, 'r') as f:
|
|
50
|
+
data = json.load(f)
|
|
51
|
+
if self.orig: # original h8mail output
|
|
52
|
+
yield data
|
|
53
|
+
return
|
|
54
|
+
targets = data['targets']
|
|
55
|
+
for target in targets:
|
|
56
|
+
email = target['target']
|
|
57
|
+
target_data = target.get('data', [])
|
|
58
|
+
pwn_num = target['pwn_num']
|
|
59
|
+
if not pwn_num > 0:
|
|
60
|
+
continue
|
|
61
|
+
if len(target_data) > 0:
|
|
62
|
+
entries = target_data[0]
|
|
63
|
+
for entry in entries:
|
|
64
|
+
source, site_name = tuple(entry.split(':'))
|
|
65
|
+
yield UserAccount(**{
|
|
66
|
+
"site_name": site_name,
|
|
67
|
+
"username": email.split('@')[0],
|
|
68
|
+
"email": email,
|
|
69
|
+
"extra_data": {
|
|
70
|
+
'source': source
|
|
71
|
+
},
|
|
72
|
+
})
|
|
73
|
+
else:
|
|
74
|
+
yield UserAccount(**{
|
|
75
|
+
"username": email.split('@')[0],
|
|
76
|
+
"email": email,
|
|
77
|
+
"extra_data": {
|
|
78
|
+
'source': self.get_opt_value('local_breach')
|
|
79
|
+
},
|
|
80
|
+
})
|
|
81
|
+
self.print_item_count = prev
|
secator/tasks/httpx.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import uuid
|
|
3
|
+
|
|
4
|
+
from secator.decorators import task
|
|
5
|
+
from secator.definitions import (DEFAULT_HTTPX_FLAGS,
|
|
6
|
+
DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
|
|
7
|
+
FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
|
|
8
|
+
FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
|
|
9
|
+
MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
|
|
10
|
+
MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, PROXY,
|
|
11
|
+
RATE_LIMIT, RETRIES, TASKS_FOLDER, THREADS,
|
|
12
|
+
TIMEOUT, URL, USER_AGENT)
|
|
13
|
+
from secator.tasks._categories import Http
|
|
14
|
+
from secator.utils import sanitize_url
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@task()
|
|
18
|
+
class httpx(Http):
|
|
19
|
+
"""Fast and multi-purpose HTTP toolkit."""
|
|
20
|
+
cmd = f'httpx {DEFAULT_HTTPX_FLAGS}'
|
|
21
|
+
file_flag = '-l'
|
|
22
|
+
input_flag = '-u'
|
|
23
|
+
json_flag = '-json'
|
|
24
|
+
opts = {
|
|
25
|
+
# 'silent': {'is_flag': True, 'default': False, 'help': 'Silent mode'},
|
|
26
|
+
# 'td': {'is_flag': True, 'default': True, 'help': 'Tech detection'},
|
|
27
|
+
'irr': {'is_flag': True, 'default': False, 'help': 'Include http request / response'},
|
|
28
|
+
'fep': {'is_flag': True, 'default': False, 'help': 'Error Page Classifier and Filtering'},
|
|
29
|
+
'favicon': {'is_flag': True, 'default': False, 'help': 'Favicon hash'},
|
|
30
|
+
'jarm': {'is_flag': True, 'default': False, 'help': 'Jarm fingerprint'},
|
|
31
|
+
'asn': {'is_flag': True, 'default': False, 'help': 'ASN detection'},
|
|
32
|
+
'cdn': {'is_flag': True, 'default': False, 'help': 'CDN detection'},
|
|
33
|
+
'debug_resp': {'is_flag': True, 'default': False, 'help': 'Debug response'},
|
|
34
|
+
'screenshot': {'is_flag': True, 'default': False, 'help': 'Screenshot response'}
|
|
35
|
+
}
|
|
36
|
+
opt_key_map = {
|
|
37
|
+
HEADER: 'header',
|
|
38
|
+
DELAY: 'delay',
|
|
39
|
+
DEPTH: OPT_NOT_SUPPORTED,
|
|
40
|
+
FILTER_CODES: 'filter-code',
|
|
41
|
+
FILTER_REGEX: 'filter-regex',
|
|
42
|
+
FILTER_SIZE: 'filter-length',
|
|
43
|
+
FILTER_WORDS: 'filter-word-count',
|
|
44
|
+
FOLLOW_REDIRECT: 'follow-redirects',
|
|
45
|
+
MATCH_CODES: 'match-code',
|
|
46
|
+
MATCH_REGEX: 'match-regex',
|
|
47
|
+
MATCH_SIZE: 'match-length',
|
|
48
|
+
MATCH_WORDS: 'match-word-count',
|
|
49
|
+
METHOD: 'x',
|
|
50
|
+
PROXY: 'proxy',
|
|
51
|
+
RATE_LIMIT: 'rate-limit',
|
|
52
|
+
RETRIES: 'retries',
|
|
53
|
+
THREADS: 'threads',
|
|
54
|
+
TIMEOUT: 'timeout',
|
|
55
|
+
USER_AGENT: OPT_NOT_SUPPORTED,
|
|
56
|
+
}
|
|
57
|
+
opt_value_map = {
|
|
58
|
+
DELAY: lambda x: str(x) + 's' if x else None,
|
|
59
|
+
}
|
|
60
|
+
install_cmd = 'go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest'
|
|
61
|
+
proxychains = False
|
|
62
|
+
proxy_socks5 = True
|
|
63
|
+
proxy_http = True
|
|
64
|
+
profile = 'cpu'
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def on_item_pre_convert(self, item):
|
|
68
|
+
for k, v in item.items():
|
|
69
|
+
if k == 'time':
|
|
70
|
+
response_time = float(''.join(ch for ch in v if not ch.isalpha()))
|
|
71
|
+
if v[-2:] == 'ms':
|
|
72
|
+
response_time = response_time / 1000
|
|
73
|
+
item[k] = response_time
|
|
74
|
+
elif k == URL:
|
|
75
|
+
item[k] = sanitize_url(v)
|
|
76
|
+
item[URL] = item.get('final_url') or item[URL]
|
|
77
|
+
return item
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def on_init(self):
|
|
81
|
+
debug_resp = self.get_opt_value('debug_resp')
|
|
82
|
+
if debug_resp:
|
|
83
|
+
self.cmd = self.cmd.replace('-silent', '')
|
|
84
|
+
if DEFAULT_STORE_HTTP_RESPONSES:
|
|
85
|
+
_id = uuid.uuid4()
|
|
86
|
+
output_path = f'{TASKS_FOLDER}/{_id}'
|
|
87
|
+
self.output_response_path = f'{output_path}/response'
|
|
88
|
+
self.output_screenshot_path = f'{output_path}/screenshot'
|
|
89
|
+
os.makedirs(self.output_response_path, exist_ok=True)
|
|
90
|
+
os.makedirs(self.output_screenshot_path, exist_ok=True)
|
|
91
|
+
self.cmd += f' -sr -srd {output_path}'
|
|
92
|
+
|
|
93
|
+
@staticmethod
|
|
94
|
+
def on_end(self):
|
|
95
|
+
if DEFAULT_STORE_HTTP_RESPONSES:
|
|
96
|
+
if os.path.exists(self.output_response_path + '/index.txt'):
|
|
97
|
+
os.remove(self.output_response_path + '/index.txt')
|
|
98
|
+
if os.path.exists(self.output_screenshot_path + '/index.txt'):
|
|
99
|
+
os.remove(self.output_screenshot_path + '/index_screenshot.txt')
|
secator/tasks/katana.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from urllib.parse import urlparse
|
|
5
|
+
|
|
6
|
+
from secator.decorators import task
|
|
7
|
+
from secator.definitions import (CONTENT_TYPE, DEFAULT_KATANA_FLAGS,
|
|
8
|
+
DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
|
|
9
|
+
FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
|
|
10
|
+
FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST,
|
|
11
|
+
MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
|
|
12
|
+
MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, PROXY,
|
|
13
|
+
RATE_LIMIT, RETRIES, STATUS_CODE,
|
|
14
|
+
STORED_RESPONSE_PATH, TASKS_FOLDER, TECH,
|
|
15
|
+
THREADS, TIME, TIMEOUT, URL, USER_AGENT, WEBSERVER, CONTENT_LENGTH)
|
|
16
|
+
from secator.output_types import Url, Tag
|
|
17
|
+
from secator.tasks._categories import HttpCrawler
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@task()
|
|
21
|
+
class katana(HttpCrawler):
|
|
22
|
+
"""Next-generation crawling and spidering framework."""
|
|
23
|
+
# TODO: add -fx for form detection and extract 'forms' from the output with custom item_loader
|
|
24
|
+
# TODO: add -jsluice for JS parsing
|
|
25
|
+
cmd = f'katana {DEFAULT_KATANA_FLAGS}'
|
|
26
|
+
file_flag = '-list'
|
|
27
|
+
input_flag = '-u'
|
|
28
|
+
json_flag = '-jsonl'
|
|
29
|
+
opts = {
|
|
30
|
+
'headless': {'is_flag': True, 'short': 'hl', 'help': 'Headless mode'},
|
|
31
|
+
'system_chrome': {'is_flag': True, 'short': 'sc', 'help': 'Use local installed chrome browser'},
|
|
32
|
+
'form_extraction': {'is_flag': True, 'short': 'fx', 'help': 'Detect forms'}
|
|
33
|
+
}
|
|
34
|
+
opt_key_map = {
|
|
35
|
+
HEADER: 'headers',
|
|
36
|
+
DELAY: 'delay',
|
|
37
|
+
DEPTH: 'depth',
|
|
38
|
+
FILTER_CODES: OPT_NOT_SUPPORTED,
|
|
39
|
+
FILTER_REGEX: OPT_NOT_SUPPORTED,
|
|
40
|
+
FILTER_SIZE: OPT_NOT_SUPPORTED,
|
|
41
|
+
FILTER_WORDS: OPT_NOT_SUPPORTED,
|
|
42
|
+
FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
|
|
43
|
+
MATCH_CODES: OPT_NOT_SUPPORTED,
|
|
44
|
+
MATCH_REGEX: OPT_NOT_SUPPORTED,
|
|
45
|
+
MATCH_SIZE: OPT_NOT_SUPPORTED,
|
|
46
|
+
MATCH_WORDS: OPT_NOT_SUPPORTED,
|
|
47
|
+
METHOD: OPT_NOT_SUPPORTED,
|
|
48
|
+
PROXY: 'proxy',
|
|
49
|
+
RATE_LIMIT: 'rate-limit',
|
|
50
|
+
RETRIES: 'retry',
|
|
51
|
+
THREADS: 'concurrency',
|
|
52
|
+
TIMEOUT: 'timeout',
|
|
53
|
+
USER_AGENT: OPT_NOT_SUPPORTED
|
|
54
|
+
}
|
|
55
|
+
opt_value_map = {
|
|
56
|
+
DELAY: lambda x: int(x) if isinstance(x, float) else x
|
|
57
|
+
}
|
|
58
|
+
output_map = {
|
|
59
|
+
Url: {
|
|
60
|
+
URL: lambda x: x['request']['endpoint'],
|
|
61
|
+
HOST: lambda x: urlparse(x['request']['endpoint']).netloc,
|
|
62
|
+
TIME: 'timestamp',
|
|
63
|
+
METHOD: lambda x: x['request']['method'],
|
|
64
|
+
STATUS_CODE: lambda x: x['response'].get('status_code'),
|
|
65
|
+
CONTENT_TYPE: lambda x: x['response'].get('headers', {}).get('content_type', ';').split(';')[0],
|
|
66
|
+
CONTENT_LENGTH: lambda x: x['response'].get('headers', {}).get('content_length', 0),
|
|
67
|
+
WEBSERVER: lambda x: x['response'].get('headers', {}).get('server', ''),
|
|
68
|
+
TECH: lambda x: x['response'].get('technologies', []),
|
|
69
|
+
STORED_RESPONSE_PATH: lambda x: x['response'].get('stored_response_path', '')
|
|
70
|
+
# TAGS: lambda x: x['response'].get('server')
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
item_loaders = []
|
|
74
|
+
install_cmd = 'go install -v github.com/projectdiscovery/katana/cmd/katana@latest'
|
|
75
|
+
proxychains = False
|
|
76
|
+
proxy_socks5 = True
|
|
77
|
+
proxy_http = True
|
|
78
|
+
profile = 'io'
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def item_loader(self, item):
|
|
82
|
+
try:
|
|
83
|
+
item = json.loads(item)
|
|
84
|
+
except json.JSONDecodeError:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
# form detection
|
|
88
|
+
forms = item.get('response', {}).get('forms', [])
|
|
89
|
+
if forms:
|
|
90
|
+
for form in forms:
|
|
91
|
+
method = form['method']
|
|
92
|
+
yield Url(form['action'], host=urlparse(item['request']['endpoint']).netloc, method=method)
|
|
93
|
+
yield Tag(
|
|
94
|
+
name='form',
|
|
95
|
+
match=form['action'],
|
|
96
|
+
extra_data={
|
|
97
|
+
'method': form['method'],
|
|
98
|
+
'enctype': form.get('enctype', ''),
|
|
99
|
+
'parameters': ','.join(form.get('parameters', []))
|
|
100
|
+
}
|
|
101
|
+
)
|
|
102
|
+
yield item
|
|
103
|
+
|
|
104
|
+
@staticmethod
|
|
105
|
+
def on_init(self):
|
|
106
|
+
debug_resp = self.get_opt_value('debug_resp')
|
|
107
|
+
if debug_resp:
|
|
108
|
+
self.cmd = self.cmd.replace('-silent', '')
|
|
109
|
+
if DEFAULT_STORE_HTTP_RESPONSES:
|
|
110
|
+
_id = uuid.uuid4()
|
|
111
|
+
output_path = f'{TASKS_FOLDER}/{_id}'
|
|
112
|
+
self.output_response_path = output_path
|
|
113
|
+
os.makedirs(self.output_response_path, exist_ok=True)
|
|
114
|
+
self.cmd += f' -sr -srd {output_path}'
|
|
115
|
+
|
|
116
|
+
@staticmethod
|
|
117
|
+
def on_end(self):
|
|
118
|
+
if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(self.output_response_path + '/index.txt'):
|
|
119
|
+
os.remove(self.output_response_path + '/index.txt')
|
|
120
|
+
|
|
121
|
+
@staticmethod
|
|
122
|
+
def on_item(self, item):
|
|
123
|
+
if not isinstance(item, Url):
|
|
124
|
+
return item
|
|
125
|
+
if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(item.stored_response_path):
|
|
126
|
+
with open(item.stored_response_path, 'r') as fin:
|
|
127
|
+
data = fin.read().splitlines(True)
|
|
128
|
+
first_line = data[0]
|
|
129
|
+
with open(item.stored_response_path, 'w') as fout:
|
|
130
|
+
fout.writelines(data[1:])
|
|
131
|
+
fout.writelines('\n')
|
|
132
|
+
fout.writelines(first_line)
|
|
133
|
+
return item
|
secator/tasks/maigret.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
|
|
6
|
+
from secator.decorators import task
|
|
7
|
+
from secator.definitions import (DELAY, EXTRA_DATA, OPT_NOT_SUPPORTED, PROXY,
|
|
8
|
+
RATE_LIMIT, RETRIES, SITE_NAME, THREADS,
|
|
9
|
+
TIMEOUT, URL, USERNAME)
|
|
10
|
+
from secator.output_types import UserAccount
|
|
11
|
+
from secator.tasks._categories import ReconUser
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@task()
|
|
17
|
+
class maigret(ReconUser):
|
|
18
|
+
"""Collect a dossier on a person by username."""
|
|
19
|
+
cmd = 'maigret'
|
|
20
|
+
file_flag = None
|
|
21
|
+
input_flag = None
|
|
22
|
+
json_flag = '--json ndjson'
|
|
23
|
+
opt_prefix = '--'
|
|
24
|
+
opts = {
|
|
25
|
+
'site': {'type': str, 'help': 'Sites to check'},
|
|
26
|
+
}
|
|
27
|
+
opt_key_map = {
|
|
28
|
+
DELAY: OPT_NOT_SUPPORTED,
|
|
29
|
+
PROXY: 'proxy',
|
|
30
|
+
RATE_LIMIT: OPT_NOT_SUPPORTED,
|
|
31
|
+
RETRIES: 'retries',
|
|
32
|
+
TIMEOUT: 'timeout',
|
|
33
|
+
THREADS: OPT_NOT_SUPPORTED
|
|
34
|
+
}
|
|
35
|
+
input_type = USERNAME
|
|
36
|
+
output_types = [UserAccount]
|
|
37
|
+
output_map = {
|
|
38
|
+
UserAccount: {
|
|
39
|
+
SITE_NAME: 'sitename',
|
|
40
|
+
URL: lambda x: x['status']['url'],
|
|
41
|
+
EXTRA_DATA: lambda x: x['status'].get('ids', {})
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
install_cmd = 'pip3 install maigret'
|
|
45
|
+
socks5_proxy = True
|
|
46
|
+
profile = 'io'
|
|
47
|
+
|
|
48
|
+
def yielder(self):
|
|
49
|
+
prev = self.print_item_count
|
|
50
|
+
self.print_item_count = False
|
|
51
|
+
yield from super().yielder()
|
|
52
|
+
if self.return_code != 0:
|
|
53
|
+
return
|
|
54
|
+
self.results = []
|
|
55
|
+
if not self.output_path:
|
|
56
|
+
match = re.search('JSON ndjson report for .* saved in (.*)', self.output)
|
|
57
|
+
if match is None:
|
|
58
|
+
logger.warning('JSON output file not found in command output.')
|
|
59
|
+
return
|
|
60
|
+
self.output_path = match.group(1)
|
|
61
|
+
note = f'maigret JSON results saved to {self.output_path}'
|
|
62
|
+
if self.print_line:
|
|
63
|
+
self._print(note)
|
|
64
|
+
if os.path.exists(self.output_path):
|
|
65
|
+
with open(self.output_path, 'r') as f:
|
|
66
|
+
data = [json.loads(line) for line in f.read().splitlines()]
|
|
67
|
+
for item in data:
|
|
68
|
+
yield item
|
|
69
|
+
self.print_item_count = prev
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def on_init(self):
|
|
73
|
+
output_path = self.get_opt_value('output_path')
|
|
74
|
+
self.output_path = output_path
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def validate_item(self, item):
|
|
78
|
+
return item['http_status'] == 200
|
secator/tasks/mapcidr.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import validators
|
|
2
|
+
|
|
3
|
+
from secator.decorators import task
|
|
4
|
+
from secator.definitions import (CIDR_RANGE, OPT_NOT_SUPPORTED, PROXY,
|
|
5
|
+
RATE_LIMIT, RETRIES, THREADS, TIMEOUT)
|
|
6
|
+
from secator.output_types import Ip
|
|
7
|
+
from secator.tasks._categories import ReconIp
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@task()
|
|
11
|
+
class mapcidr(ReconIp):
|
|
12
|
+
"""Utility program to perform multiple operations for a given subnet/cidr ranges."""
|
|
13
|
+
cmd = 'mapcidr -silent'
|
|
14
|
+
input_flag = '-cidr'
|
|
15
|
+
file_flag = '-cl'
|
|
16
|
+
install_cmd = 'go install -v github.com/projectdiscovery/mapcidr/cmd/mapcidr@latest'
|
|
17
|
+
input_type = CIDR_RANGE
|
|
18
|
+
output_types = [Ip]
|
|
19
|
+
opt_key_map = {
|
|
20
|
+
THREADS: OPT_NOT_SUPPORTED,
|
|
21
|
+
PROXY: OPT_NOT_SUPPORTED,
|
|
22
|
+
RATE_LIMIT: OPT_NOT_SUPPORTED,
|
|
23
|
+
RETRIES: OPT_NOT_SUPPORTED,
|
|
24
|
+
TIMEOUT: OPT_NOT_SUPPORTED,
|
|
25
|
+
THREADS: OPT_NOT_SUPPORTED,
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
@staticmethod
|
|
29
|
+
def item_loader(self, line):
|
|
30
|
+
if validators.ipv4(line) or validators.ipv6(line):
|
|
31
|
+
return {'ip': line, 'alive': False}
|
|
32
|
+
return None
|