secator 0.1.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (99) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +421 -0
  4. secator/cli.py +927 -0
  5. secator/config.py +137 -0
  6. secator/configs/__init__.py +0 -0
  7. secator/configs/profiles/__init__.py +0 -0
  8. secator/configs/profiles/aggressive.yaml +7 -0
  9. secator/configs/profiles/default.yaml +9 -0
  10. secator/configs/profiles/stealth.yaml +7 -0
  11. secator/configs/scans/__init__.py +0 -0
  12. secator/configs/scans/domain.yaml +18 -0
  13. secator/configs/scans/host.yaml +14 -0
  14. secator/configs/scans/network.yaml +17 -0
  15. secator/configs/scans/subdomain.yaml +8 -0
  16. secator/configs/scans/url.yaml +12 -0
  17. secator/configs/workflows/__init__.py +0 -0
  18. secator/configs/workflows/cidr_recon.yaml +28 -0
  19. secator/configs/workflows/code_scan.yaml +11 -0
  20. secator/configs/workflows/host_recon.yaml +41 -0
  21. secator/configs/workflows/port_scan.yaml +34 -0
  22. secator/configs/workflows/subdomain_recon.yaml +33 -0
  23. secator/configs/workflows/url_crawl.yaml +29 -0
  24. secator/configs/workflows/url_dirsearch.yaml +29 -0
  25. secator/configs/workflows/url_fuzz.yaml +35 -0
  26. secator/configs/workflows/url_nuclei.yaml +11 -0
  27. secator/configs/workflows/url_vuln.yaml +55 -0
  28. secator/configs/workflows/user_hunt.yaml +10 -0
  29. secator/configs/workflows/wordpress.yaml +14 -0
  30. secator/decorators.py +346 -0
  31. secator/definitions.py +183 -0
  32. secator/exporters/__init__.py +12 -0
  33. secator/exporters/_base.py +3 -0
  34. secator/exporters/csv.py +29 -0
  35. secator/exporters/gdrive.py +118 -0
  36. secator/exporters/json.py +14 -0
  37. secator/exporters/table.py +7 -0
  38. secator/exporters/txt.py +24 -0
  39. secator/hooks/__init__.py +0 -0
  40. secator/hooks/mongodb.py +212 -0
  41. secator/output_types/__init__.py +24 -0
  42. secator/output_types/_base.py +95 -0
  43. secator/output_types/exploit.py +50 -0
  44. secator/output_types/ip.py +33 -0
  45. secator/output_types/port.py +45 -0
  46. secator/output_types/progress.py +35 -0
  47. secator/output_types/record.py +34 -0
  48. secator/output_types/subdomain.py +42 -0
  49. secator/output_types/tag.py +46 -0
  50. secator/output_types/target.py +30 -0
  51. secator/output_types/url.py +76 -0
  52. secator/output_types/user_account.py +41 -0
  53. secator/output_types/vulnerability.py +97 -0
  54. secator/report.py +95 -0
  55. secator/rich.py +123 -0
  56. secator/runners/__init__.py +12 -0
  57. secator/runners/_base.py +873 -0
  58. secator/runners/_helpers.py +154 -0
  59. secator/runners/command.py +674 -0
  60. secator/runners/scan.py +67 -0
  61. secator/runners/task.py +107 -0
  62. secator/runners/workflow.py +137 -0
  63. secator/serializers/__init__.py +8 -0
  64. secator/serializers/dataclass.py +33 -0
  65. secator/serializers/json.py +15 -0
  66. secator/serializers/regex.py +17 -0
  67. secator/tasks/__init__.py +10 -0
  68. secator/tasks/_categories.py +304 -0
  69. secator/tasks/cariddi.py +102 -0
  70. secator/tasks/dalfox.py +66 -0
  71. secator/tasks/dirsearch.py +88 -0
  72. secator/tasks/dnsx.py +56 -0
  73. secator/tasks/dnsxbrute.py +34 -0
  74. secator/tasks/feroxbuster.py +89 -0
  75. secator/tasks/ffuf.py +85 -0
  76. secator/tasks/fping.py +44 -0
  77. secator/tasks/gau.py +43 -0
  78. secator/tasks/gf.py +34 -0
  79. secator/tasks/gospider.py +71 -0
  80. secator/tasks/grype.py +78 -0
  81. secator/tasks/h8mail.py +80 -0
  82. secator/tasks/httpx.py +104 -0
  83. secator/tasks/katana.py +128 -0
  84. secator/tasks/maigret.py +78 -0
  85. secator/tasks/mapcidr.py +32 -0
  86. secator/tasks/msfconsole.py +176 -0
  87. secator/tasks/naabu.py +52 -0
  88. secator/tasks/nmap.py +341 -0
  89. secator/tasks/nuclei.py +97 -0
  90. secator/tasks/searchsploit.py +53 -0
  91. secator/tasks/subfinder.py +40 -0
  92. secator/tasks/wpscan.py +177 -0
  93. secator/utils.py +404 -0
  94. secator/utils_test.py +183 -0
  95. secator-0.1.0.dist-info/METADATA +379 -0
  96. secator-0.1.0.dist-info/RECORD +99 -0
  97. secator-0.1.0.dist-info/WHEEL +5 -0
  98. secator-0.1.0.dist-info/entry_points.txt +2 -0
  99. secator-0.1.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,71 @@
1
+ from furl import furl
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (CONTENT_LENGTH, DELAY, DEPTH, FILTER_CODES,
5
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS,
6
+ FOLLOW_REDIRECT, HEADER, MATCH_CODES,
7
+ MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD,
8
+ OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
9
+ STATUS_CODE, THREADS, TIMEOUT, URL, USER_AGENT)
10
+ from secator.output_types import Url
11
+ from secator.tasks._categories import HttpCrawler
12
+
13
+
14
+ @task()
15
+ class gospider(HttpCrawler):
16
+ """Fast web spider written in Go."""
17
+ cmd = 'gospider --js'
18
+ file_flag = '-S'
19
+ input_flag = '-s'
20
+ json_flag = '--json'
21
+ opt_prefix = '--'
22
+ opt_key_map = {
23
+ HEADER: 'header',
24
+ DELAY: 'delay',
25
+ DEPTH: 'depth',
26
+ FILTER_CODES: OPT_NOT_SUPPORTED,
27
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
28
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
29
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
30
+ FOLLOW_REDIRECT: 'no-redirect',
31
+ MATCH_CODES: OPT_NOT_SUPPORTED,
32
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
33
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
34
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
35
+ METHOD: OPT_NOT_SUPPORTED,
36
+ PROXY: 'proxy',
37
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
38
+ RETRIES: OPT_NOT_SUPPORTED,
39
+ THREADS: 'threads',
40
+ TIMEOUT: 'timeout',
41
+ USER_AGENT: 'user-agent',
42
+ }
43
+ opt_value_map = {
44
+ FOLLOW_REDIRECT: lambda x: not x,
45
+ DELAY: lambda x: round(x) if isinstance(x, float) else x
46
+ }
47
+ output_map = {
48
+ Url: {
49
+ URL: 'output',
50
+ STATUS_CODE: 'status',
51
+ CONTENT_LENGTH: 'length',
52
+ }
53
+ }
54
+ install_cmd = 'go install -v github.com/jaeles-project/gospider@latest'
55
+ ignore_return_code = True
56
+ proxychains = False
57
+ proxy_socks5 = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
58
+ proxy_http = True # with leaks... https://github.com/jaeles-project/gospider/issues/61
59
+ profile = 'cpu'
60
+
61
+ @staticmethod
62
+ def validate_item(self, item):
63
+ """Keep only items that match the same host."""
64
+ try:
65
+ netloc_in = furl(item['input']).netloc
66
+ netloc_out = furl(item['output']).netloc
67
+ if netloc_in != netloc_out:
68
+ return False
69
+ except ValueError: # gospider returns invalid URLs for output sometimes
70
+ return False
71
+ return True
secator/tasks/grype.py ADDED
@@ -0,0 +1,78 @@
1
+
2
+ from secator.decorators import task
3
+ from secator.definitions import (DELAY, FOLLOW_REDIRECT, HEADER,
4
+ OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
5
+ THREADS, TIMEOUT, USER_AGENT)
6
+ from secator.output_types import Vulnerability
7
+ from secator.tasks._categories import VulnCode
8
+
9
+
10
+ @task()
11
+ class grype(VulnCode):
12
+ """Vulnerability scanner for container images and filesystems."""
13
+ cmd = 'grype --quiet'
14
+ input_flag = ''
15
+ file_flag = OPT_NOT_SUPPORTED
16
+ json_flag = None
17
+ opt_prefix = '--'
18
+ opt_key_map = {
19
+ HEADER: OPT_NOT_SUPPORTED,
20
+ DELAY: OPT_NOT_SUPPORTED,
21
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
22
+ PROXY: OPT_NOT_SUPPORTED,
23
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
24
+ RETRIES: OPT_NOT_SUPPORTED,
25
+ THREADS: OPT_NOT_SUPPORTED,
26
+ TIMEOUT: OPT_NOT_SUPPORTED,
27
+ USER_AGENT: OPT_NOT_SUPPORTED
28
+ }
29
+ output_types = [Vulnerability]
30
+ install_cmd = (
31
+ 'curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sudo sh -s -- -b /usr/local/bin'
32
+ )
33
+
34
+ @staticmethod
35
+ def item_loader(self, line):
36
+ """Load vulnerabilty dicts from grype line output."""
37
+ split = [i for i in line.split(' ') if i]
38
+ if not len(split) in [5, 6] or split[0] == 'NAME':
39
+ return None
40
+ version_fixed = None
41
+ if len(split) == 5: # no version fixed
42
+ product, version, product_type, vuln_id, severity = tuple(split)
43
+ elif len(split) == 6:
44
+ product, version, version_fixed, product_type, vuln_id, severity = tuple(split)
45
+ extra_data = {
46
+ 'lang': product_type,
47
+ 'product': product,
48
+ 'version': version,
49
+ }
50
+ if version_fixed:
51
+ extra_data['version_fixed'] = version_fixed
52
+ data = {
53
+ 'id': vuln_id,
54
+ 'name': vuln_id,
55
+ 'matched_at': self.input,
56
+ 'confidence': 'medium',
57
+ 'severity': severity.lower(),
58
+ 'provider': 'grype',
59
+ 'cvss_score': -1,
60
+ 'tags': [],
61
+ }
62
+ if vuln_id.startswith('GHSA'):
63
+ data['provider'] = 'github.com'
64
+ data['references'] = [f'https://github.com/advisories/{vuln_id}']
65
+ data['tags'].extend(['cve', 'ghsa'])
66
+ vuln = VulnCode.lookup_ghsa(vuln_id)
67
+ if vuln:
68
+ data.update(vuln)
69
+ data['severity'] = data['severity'] or severity.lower()
70
+ extra_data['ghsa_id'] = vuln_id
71
+ elif vuln_id.startswith('CVE'):
72
+ vuln = VulnCode.lookup_cve(vuln_id)
73
+ if vuln:
74
+ vuln['tags'].append('cve')
75
+ data.update(vuln)
76
+ data['severity'] = data['severity'] or severity.lower()
77
+ data['extra_data'] = extra_data
78
+ return data
@@ -0,0 +1,80 @@
1
+ import os
2
+ import json
3
+
4
+ from secator.decorators import task
5
+ from secator.definitions import EMAIL, OUTPUT_PATH
6
+ from secator.tasks._categories import OSInt
7
+ from secator.output_types import UserAccount
8
+
9
+
10
+ @task()
11
+ class h8mail(OSInt):
12
+ """Email information and password lookup tool."""
13
+ cmd = 'h8mail'
14
+ json_flag = '--json '
15
+ input_flag = '--targets'
16
+ input_type = EMAIL
17
+ file_flag = '-domain'
18
+ version_flag = '--help'
19
+ opt_prefix = '--'
20
+ opt_key_map = {
21
+
22
+ }
23
+ opts = {
24
+ 'config': {'type': str, 'help': 'Configuration file for API keys'},
25
+ 'local_breach': {'type': str, 'short': 'lb', 'help': 'Local breach file'}
26
+ }
27
+ output_map = {
28
+ }
29
+
30
+ install_cmd = 'pipx install h8mail'
31
+
32
+ @staticmethod
33
+ def on_start(self):
34
+ output_path = self.get_opt_value(OUTPUT_PATH)
35
+ if not output_path:
36
+ output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
37
+ self.output_path = output_path
38
+ self.cmd = self.cmd.replace('--json', f'--json {self.output_path}')
39
+
40
+ def yielder(self):
41
+ prev = self.print_item_count
42
+ self.print_item_count = False
43
+ list(super().yielder())
44
+ if self.return_code != 0:
45
+ return
46
+ self.results = []
47
+ if os.path.exists(self.output_path):
48
+ with open(self.output_path, 'r') as f:
49
+ data = json.load(f)
50
+ if self.orig: # original h8mail output
51
+ yield data
52
+ return
53
+ targets = data['targets']
54
+ for target in targets:
55
+ email = target['target']
56
+ target_data = target.get('data', [])
57
+ pwn_num = target['pwn_num']
58
+ if not pwn_num > 0:
59
+ continue
60
+ if len(target_data) > 0:
61
+ entries = target_data[0]
62
+ for entry in entries:
63
+ source, site_name = tuple(entry.split(':'))
64
+ yield UserAccount(**{
65
+ "site_name": site_name,
66
+ "username": email.split('@')[0],
67
+ "email": email,
68
+ "extra_data": {
69
+ 'source': source
70
+ },
71
+ })
72
+ else:
73
+ yield UserAccount(**{
74
+ "username": email.split('@')[0],
75
+ "email": email,
76
+ "extra_data": {
77
+ 'source': self.get_opt_value('local_breach')
78
+ },
79
+ })
80
+ self.print_item_count = prev
secator/tasks/httpx.py ADDED
@@ -0,0 +1,104 @@
1
+ import os
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (DEFAULT_HTTPX_FLAGS,
5
+ DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
6
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
7
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
8
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
9
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, PROXY,
10
+ RATE_LIMIT, RETRIES, THREADS,
11
+ TIMEOUT, URL, USER_AGENT)
12
+ from secator.tasks._categories import Http
13
+ from secator.utils import sanitize_url
14
+
15
+
16
+ @task()
17
+ class httpx(Http):
18
+ """Fast and multi-purpose HTTP toolkit."""
19
+ cmd = f'httpx {DEFAULT_HTTPX_FLAGS}'
20
+ file_flag = '-l'
21
+ input_flag = '-u'
22
+ json_flag = '-json'
23
+ opts = {
24
+ # 'silent': {'is_flag': True, 'default': False, 'help': 'Silent mode'},
25
+ # 'td': {'is_flag': True, 'default': True, 'help': 'Tech detection'},
26
+ # 'irr': {'is_flag': True, 'default': False, 'help': 'Include http request / response'},
27
+ 'fep': {'is_flag': True, 'default': False, 'help': 'Error Page Classifier and Filtering'},
28
+ 'favicon': {'is_flag': True, 'default': False, 'help': 'Favicon hash'},
29
+ 'jarm': {'is_flag': True, 'default': False, 'help': 'Jarm fingerprint'},
30
+ 'asn': {'is_flag': True, 'default': False, 'help': 'ASN detection'},
31
+ 'cdn': {'is_flag': True, 'default': False, 'help': 'CDN detection'},
32
+ 'debug_resp': {'is_flag': True, 'default': False, 'help': 'Debug response'},
33
+ 'vhost': {'is_flag': True, 'default': False, 'help': 'Probe and display server supporting VHOST'},
34
+ 'screenshot': {'is_flag': True, 'short': 'ss', 'default': False, 'help': 'Screenshot response'},
35
+ 'system_chrome': {'is_flag': True, 'default': False, 'help': 'Use local installed Chrome for screenshot'},
36
+ 'headless_options': {'is_flag': False, 'short': 'ho', 'default': None, 'help': 'Headless Chrome additional options'},
37
+ }
38
+ opt_key_map = {
39
+ HEADER: 'header',
40
+ DELAY: 'delay',
41
+ DEPTH: OPT_NOT_SUPPORTED,
42
+ FILTER_CODES: 'filter-code',
43
+ FILTER_REGEX: 'filter-regex',
44
+ FILTER_SIZE: 'filter-length',
45
+ FILTER_WORDS: 'filter-word-count',
46
+ FOLLOW_REDIRECT: 'follow-redirects',
47
+ MATCH_CODES: 'match-code',
48
+ MATCH_REGEX: 'match-regex',
49
+ MATCH_SIZE: 'match-length',
50
+ MATCH_WORDS: 'match-word-count',
51
+ METHOD: 'x',
52
+ PROXY: 'proxy',
53
+ RATE_LIMIT: 'rate-limit',
54
+ RETRIES: 'retries',
55
+ THREADS: 'threads',
56
+ TIMEOUT: 'timeout',
57
+ USER_AGENT: OPT_NOT_SUPPORTED,
58
+ }
59
+ opt_value_map = {
60
+ DELAY: lambda x: str(x) + 's' if x else None,
61
+ }
62
+ install_cmd = 'go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest'
63
+ proxychains = False
64
+ proxy_socks5 = True
65
+ proxy_http = True
66
+ profile = 'cpu'
67
+
68
+ @staticmethod
69
+ def on_init(self):
70
+ debug_resp = self.get_opt_value('debug_resp')
71
+ if debug_resp:
72
+ self.cmd = self.cmd.replace('-silent', '')
73
+ if DEFAULT_STORE_HTTP_RESPONSES:
74
+ self.output_response_path = f'{self.reports_folder}/response'
75
+ self.output_screenshot_path = f'{self.reports_folder}/screenshot'
76
+ os.makedirs(self.output_response_path, exist_ok=True)
77
+ os.makedirs(self.output_screenshot_path, exist_ok=True)
78
+ self.cmd += f' -sr -srd {self.reports_folder}'
79
+
80
+ # Remove screenshot bytes and body bytes when screenshot
81
+ screenshot = self.get_opt_value('screenshot')
82
+ if screenshot:
83
+ self.cmd += ' -esb -ehb'
84
+
85
+ @staticmethod
86
+ def on_item_pre_convert(self, item):
87
+ for k, v in item.items():
88
+ if k == 'time':
89
+ response_time = float(''.join(ch for ch in v if not ch.isalpha()))
90
+ if v[-2:] == 'ms':
91
+ response_time = response_time / 1000
92
+ item[k] = response_time
93
+ elif k == URL:
94
+ item[k] = sanitize_url(v)
95
+ item[URL] = item.get('final_url') or item[URL]
96
+ return item
97
+
98
+ @staticmethod
99
+ def on_end(self):
100
+ if DEFAULT_STORE_HTTP_RESPONSES:
101
+ if os.path.exists(self.output_response_path + '/index.txt'):
102
+ os.remove(self.output_response_path + '/index.txt')
103
+ if os.path.exists(self.output_screenshot_path + '/index.txt'):
104
+ os.remove(self.output_screenshot_path + '/index_screenshot.txt')
@@ -0,0 +1,128 @@
1
+ import os
2
+ import json
3
+ from urllib.parse import urlparse
4
+
5
+ from secator.decorators import task
6
+ from secator.definitions import (CONTENT_TYPE, DEFAULT_KATANA_FLAGS,
7
+ DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
8
+ FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
9
+ FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST,
10
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
11
+ MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, PROXY,
12
+ RATE_LIMIT, RETRIES, STATUS_CODE,
13
+ STORED_RESPONSE_PATH, TECH,
14
+ THREADS, TIME, TIMEOUT, URL, USER_AGENT, WEBSERVER, CONTENT_LENGTH)
15
+ from secator.output_types import Url, Tag
16
+ from secator.tasks._categories import HttpCrawler
17
+
18
+
19
+ @task()
20
+ class katana(HttpCrawler):
21
+ """Next-generation crawling and spidering framework."""
22
+ # TODO: add -fx for form detection and extract 'forms' from the output with custom item_loader
23
+ # TODO: add -jsluice for JS parsing
24
+ cmd = f'katana {DEFAULT_KATANA_FLAGS}'
25
+ file_flag = '-list'
26
+ input_flag = '-u'
27
+ json_flag = '-jsonl'
28
+ opts = {
29
+ 'headless': {'is_flag': True, 'short': 'hl', 'help': 'Headless mode'},
30
+ 'system_chrome': {'is_flag': True, 'short': 'sc', 'help': 'Use local installed chrome browser'},
31
+ 'form_extraction': {'is_flag': True, 'short': 'fx', 'help': 'Detect forms'}
32
+ }
33
+ opt_key_map = {
34
+ HEADER: 'headers',
35
+ DELAY: 'delay',
36
+ DEPTH: 'depth',
37
+ FILTER_CODES: OPT_NOT_SUPPORTED,
38
+ FILTER_REGEX: OPT_NOT_SUPPORTED,
39
+ FILTER_SIZE: OPT_NOT_SUPPORTED,
40
+ FILTER_WORDS: OPT_NOT_SUPPORTED,
41
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
42
+ MATCH_CODES: OPT_NOT_SUPPORTED,
43
+ MATCH_REGEX: OPT_NOT_SUPPORTED,
44
+ MATCH_SIZE: OPT_NOT_SUPPORTED,
45
+ MATCH_WORDS: OPT_NOT_SUPPORTED,
46
+ METHOD: OPT_NOT_SUPPORTED,
47
+ PROXY: 'proxy',
48
+ RATE_LIMIT: 'rate-limit',
49
+ RETRIES: 'retry',
50
+ THREADS: 'concurrency',
51
+ TIMEOUT: 'timeout',
52
+ USER_AGENT: OPT_NOT_SUPPORTED
53
+ }
54
+ opt_value_map = {
55
+ DELAY: lambda x: int(x) if isinstance(x, float) else x
56
+ }
57
+ output_map = {
58
+ Url: {
59
+ URL: lambda x: x['request']['endpoint'],
60
+ HOST: lambda x: urlparse(x['request']['endpoint']).netloc,
61
+ TIME: 'timestamp',
62
+ METHOD: lambda x: x['request']['method'],
63
+ STATUS_CODE: lambda x: x['response'].get('status_code'),
64
+ CONTENT_TYPE: lambda x: x['response'].get('headers', {}).get('content_type', ';').split(';')[0],
65
+ CONTENT_LENGTH: lambda x: x['response'].get('headers', {}).get('content_length', 0),
66
+ WEBSERVER: lambda x: x['response'].get('headers', {}).get('server', ''),
67
+ TECH: lambda x: x['response'].get('technologies', []),
68
+ STORED_RESPONSE_PATH: lambda x: x['response'].get('stored_response_path', '')
69
+ # TAGS: lambda x: x['response'].get('server')
70
+ }
71
+ }
72
+ item_loaders = []
73
+ install_cmd = 'go install -v github.com/projectdiscovery/katana/cmd/katana@latest'
74
+ proxychains = False
75
+ proxy_socks5 = True
76
+ proxy_http = True
77
+ profile = 'io'
78
+
79
+ @staticmethod
80
+ def item_loader(self, item):
81
+ try:
82
+ item = json.loads(item)
83
+ except json.JSONDecodeError:
84
+ return None
85
+
86
+ # form detection
87
+ forms = item.get('response', {}).get('forms', [])
88
+ if forms:
89
+ for form in forms:
90
+ method = form['method']
91
+ yield Url(form['action'], host=urlparse(item['request']['endpoint']).netloc, method=method)
92
+ yield Tag(
93
+ name='form',
94
+ match=form['action'],
95
+ extra_data={
96
+ 'method': form['method'],
97
+ 'enctype': form.get('enctype', ''),
98
+ 'parameters': ','.join(form.get('parameters', []))
99
+ }
100
+ )
101
+ yield item
102
+
103
+ @staticmethod
104
+ def on_init(self):
105
+ debug_resp = self.get_opt_value('debug_resp')
106
+ if debug_resp:
107
+ self.cmd = self.cmd.replace('-silent', '')
108
+ if DEFAULT_STORE_HTTP_RESPONSES:
109
+ self.cmd += f' -sr -srd {self.reports_folder}'
110
+
111
+ @staticmethod
112
+ def on_item(self, item):
113
+ if not isinstance(item, Url):
114
+ return item
115
+ if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(item.stored_response_path):
116
+ with open(item.stored_response_path, 'r', encoding='latin-1') as fin:
117
+ data = fin.read().splitlines(True)
118
+ first_line = data[0]
119
+ with open(item.stored_response_path, 'w', encoding='latin-1') as fout:
120
+ fout.writelines(data[1:])
121
+ fout.writelines('\n')
122
+ fout.writelines(first_line)
123
+ return item
124
+
125
+ @staticmethod
126
+ def on_end(self):
127
+ if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(self.reports_folder + '/index.txt'):
128
+ os.remove(self.reports_folder + '/index.txt')
@@ -0,0 +1,78 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import re
5
+
6
+ from secator.decorators import task
7
+ from secator.definitions import (DELAY, EXTRA_DATA, OPT_NOT_SUPPORTED, OUTPUT_PATH, PROXY,
8
+ RATE_LIMIT, RETRIES, SITE_NAME, THREADS,
9
+ TIMEOUT, URL, USERNAME)
10
+ from secator.output_types import UserAccount
11
+ from secator.tasks._categories import ReconUser
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @task()
17
+ class maigret(ReconUser):
18
+ """Collect a dossier on a person by username."""
19
+ cmd = 'maigret'
20
+ file_flag = None
21
+ input_flag = None
22
+ json_flag = '--json ndjson'
23
+ opt_prefix = '--'
24
+ opts = {
25
+ 'site': {'type': str, 'help': 'Sites to check'},
26
+ }
27
+ opt_key_map = {
28
+ DELAY: OPT_NOT_SUPPORTED,
29
+ PROXY: 'proxy',
30
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
31
+ RETRIES: 'retries',
32
+ TIMEOUT: 'timeout',
33
+ THREADS: OPT_NOT_SUPPORTED
34
+ }
35
+ input_type = USERNAME
36
+ output_types = [UserAccount]
37
+ output_map = {
38
+ UserAccount: {
39
+ SITE_NAME: 'sitename',
40
+ URL: lambda x: x['status']['url'],
41
+ EXTRA_DATA: lambda x: x['status'].get('ids', {})
42
+ }
43
+ }
44
+ install_cmd = 'pipx install git+https://github.com/soxoj/maigret@6be2f409e58056b1ca8571a8151e53bef107dedc'
45
+ socks5_proxy = True
46
+ profile = 'io'
47
+
48
+ def yielder(self):
49
+ prev = self.print_item_count
50
+ self.print_item_count = False
51
+ yield from super().yielder()
52
+ if self.return_code != 0:
53
+ return
54
+ self.results = []
55
+ if not self.output_path:
56
+ match = re.search('JSON ndjson report for .* saved in (.*)', self.output)
57
+ if match is None:
58
+ logger.warning('JSON output file not found in command output.')
59
+ return
60
+ self.output_path = match.group(1)
61
+ note = f'maigret JSON results saved to {self.output_path}'
62
+ if self.print_line:
63
+ self._print(note)
64
+ if os.path.exists(self.output_path):
65
+ with open(self.output_path, 'r') as f:
66
+ data = [json.loads(line) for line in f.read().splitlines()]
67
+ for item in data:
68
+ yield item
69
+ self.print_item_count = prev
70
+
71
+ @staticmethod
72
+ def on_init(self):
73
+ output_path = self.get_opt_value(OUTPUT_PATH)
74
+ self.output_path = output_path
75
+
76
+ @staticmethod
77
+ def validate_item(self, item):
78
+ return item['http_status'] == 200
@@ -0,0 +1,32 @@
1
+ import validators
2
+
3
+ from secator.decorators import task
4
+ from secator.definitions import (CIDR_RANGE, OPT_NOT_SUPPORTED, PROXY,
5
+ RATE_LIMIT, RETRIES, THREADS, TIMEOUT)
6
+ from secator.output_types import Ip
7
+ from secator.tasks._categories import ReconIp
8
+
9
+
10
+ @task()
11
+ class mapcidr(ReconIp):
12
+ """Utility program to perform multiple operations for a given subnet/cidr ranges."""
13
+ cmd = 'mapcidr -silent'
14
+ input_flag = '-cidr'
15
+ file_flag = '-cl'
16
+ install_cmd = 'go install -v github.com/projectdiscovery/mapcidr/cmd/mapcidr@latest'
17
+ input_type = CIDR_RANGE
18
+ output_types = [Ip]
19
+ opt_key_map = {
20
+ THREADS: OPT_NOT_SUPPORTED,
21
+ PROXY: OPT_NOT_SUPPORTED,
22
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
23
+ RETRIES: OPT_NOT_SUPPORTED,
24
+ TIMEOUT: OPT_NOT_SUPPORTED,
25
+ THREADS: OPT_NOT_SUPPORTED,
26
+ }
27
+
28
+ @staticmethod
29
+ def item_loader(self, line):
30
+ if validators.ipv4(line) or validators.ipv6(line):
31
+ return {'ip': line, 'alive': False}
32
+ return None