secator 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (114) hide show
  1. secator/__init__.py +0 -0
  2. secator/celery.py +482 -0
  3. secator/cli.py +617 -0
  4. secator/config.py +137 -0
  5. secator/configs/__init__.py +0 -0
  6. secator/configs/profiles/__init__.py +0 -0
  7. secator/configs/profiles/aggressive.yaml +7 -0
  8. secator/configs/profiles/default.yaml +9 -0
  9. secator/configs/profiles/stealth.yaml +7 -0
  10. secator/configs/scans/__init__.py +0 -0
  11. secator/configs/scans/domain.yaml +18 -0
  12. secator/configs/scans/host.yaml +14 -0
  13. secator/configs/scans/network.yaml +17 -0
  14. secator/configs/scans/subdomain.yaml +8 -0
  15. secator/configs/scans/url.yaml +12 -0
  16. secator/configs/workflows/__init__.py +0 -0
  17. secator/configs/workflows/cidr_recon.yaml +28 -0
  18. secator/configs/workflows/code_scan.yaml +11 -0
  19. secator/configs/workflows/host_recon.yaml +41 -0
  20. secator/configs/workflows/port_scan.yaml +34 -0
  21. secator/configs/workflows/subdomain_recon.yaml +33 -0
  22. secator/configs/workflows/url_crawl.yaml +29 -0
  23. secator/configs/workflows/url_dirsearch.yaml +29 -0
  24. secator/configs/workflows/url_fuzz.yaml +35 -0
  25. secator/configs/workflows/url_nuclei.yaml +11 -0
  26. secator/configs/workflows/url_vuln.yaml +55 -0
  27. secator/configs/workflows/user_hunt.yaml +10 -0
  28. secator/configs/workflows/wordpress.yaml +14 -0
  29. secator/decorators.py +309 -0
  30. secator/definitions.py +165 -0
  31. secator/exporters/__init__.py +12 -0
  32. secator/exporters/_base.py +3 -0
  33. secator/exporters/csv.py +30 -0
  34. secator/exporters/gdrive.py +118 -0
  35. secator/exporters/json.py +15 -0
  36. secator/exporters/table.py +7 -0
  37. secator/exporters/txt.py +25 -0
  38. secator/hooks/__init__.py +0 -0
  39. secator/hooks/mongodb.py +212 -0
  40. secator/output_types/__init__.py +24 -0
  41. secator/output_types/_base.py +95 -0
  42. secator/output_types/exploit.py +50 -0
  43. secator/output_types/ip.py +33 -0
  44. secator/output_types/port.py +45 -0
  45. secator/output_types/progress.py +35 -0
  46. secator/output_types/record.py +34 -0
  47. secator/output_types/subdomain.py +42 -0
  48. secator/output_types/tag.py +46 -0
  49. secator/output_types/target.py +30 -0
  50. secator/output_types/url.py +76 -0
  51. secator/output_types/user_account.py +41 -0
  52. secator/output_types/vulnerability.py +97 -0
  53. secator/report.py +107 -0
  54. secator/rich.py +124 -0
  55. secator/runners/__init__.py +12 -0
  56. secator/runners/_base.py +833 -0
  57. secator/runners/_helpers.py +153 -0
  58. secator/runners/command.py +638 -0
  59. secator/runners/scan.py +65 -0
  60. secator/runners/task.py +106 -0
  61. secator/runners/workflow.py +135 -0
  62. secator/serializers/__init__.py +8 -0
  63. secator/serializers/dataclass.py +33 -0
  64. secator/serializers/json.py +15 -0
  65. secator/serializers/regex.py +17 -0
  66. secator/tasks/__init__.py +10 -0
  67. secator/tasks/_categories.py +304 -0
  68. secator/tasks/cariddi.py +102 -0
  69. secator/tasks/dalfox.py +65 -0
  70. secator/tasks/dirsearch.py +90 -0
  71. secator/tasks/dnsx.py +56 -0
  72. secator/tasks/dnsxbrute.py +34 -0
  73. secator/tasks/feroxbuster.py +91 -0
  74. secator/tasks/ffuf.py +86 -0
  75. secator/tasks/fping.py +44 -0
  76. secator/tasks/gau.py +47 -0
  77. secator/tasks/gf.py +33 -0
  78. secator/tasks/gospider.py +71 -0
  79. secator/tasks/grype.py +79 -0
  80. secator/tasks/h8mail.py +81 -0
  81. secator/tasks/httpx.py +99 -0
  82. secator/tasks/katana.py +133 -0
  83. secator/tasks/maigret.py +78 -0
  84. secator/tasks/mapcidr.py +32 -0
  85. secator/tasks/msfconsole.py +174 -0
  86. secator/tasks/naabu.py +52 -0
  87. secator/tasks/nmap.py +344 -0
  88. secator/tasks/nuclei.py +97 -0
  89. secator/tasks/searchsploit.py +52 -0
  90. secator/tasks/subfinder.py +40 -0
  91. secator/tasks/wpscan.py +179 -0
  92. secator/utils.py +445 -0
  93. secator/utils_test.py +183 -0
  94. secator-0.0.1.dist-info/LICENSE +60 -0
  95. secator-0.0.1.dist-info/METADATA +199 -0
  96. secator-0.0.1.dist-info/RECORD +114 -0
  97. secator-0.0.1.dist-info/WHEEL +5 -0
  98. secator-0.0.1.dist-info/entry_points.txt +2 -0
  99. secator-0.0.1.dist-info/top_level.txt +2 -0
  100. tests/__init__.py +0 -0
  101. tests/integration/__init__.py +0 -0
  102. tests/integration/inputs.py +42 -0
  103. tests/integration/outputs.py +392 -0
  104. tests/integration/test_scans.py +82 -0
  105. tests/integration/test_tasks.py +103 -0
  106. tests/integration/test_workflows.py +163 -0
  107. tests/performance/__init__.py +0 -0
  108. tests/performance/loadtester.py +56 -0
  109. tests/unit/__init__.py +0 -0
  110. tests/unit/test_celery.py +39 -0
  111. tests/unit/test_scans.py +0 -0
  112. tests/unit/test_serializers.py +51 -0
  113. tests/unit/test_tasks.py +348 -0
  114. tests/unit/test_workflows.py +96 -0
@@ -0,0 +1,52 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (CVES, EXTRA_DATA, ID, MATCHED_AT, NAME,
3
+ PROVIDER, REFERENCE, TAGS)
4
+ from secator.output_types import Exploit
5
+ from secator.runners import Command
6
+
7
+
8
+ @task()
9
+ class searchsploit(Command):
10
+ """Exploit-DB command line search tool."""
11
+ cmd = 'searchsploit'
12
+ input_flag = None
13
+ json_flag = '--json'
14
+ opts = {
15
+ 'strict': {'short': 's', 'is_flag': True, 'default': False, 'help': 'Strict match'}
16
+ }
17
+ opt_key_map = {}
18
+ output_types = [Exploit]
19
+ output_map = {
20
+ Exploit: {
21
+ NAME: lambda x: '-'.join(x['Title'].split('-')[1:]).strip(),
22
+ PROVIDER: lambda x: 'EDB',
23
+ ID: 'EDB-ID',
24
+ CVES: lambda x: [c for c in x['Codes'].split(';') if c.startswith('CVE-')],
25
+ REFERENCE: lambda x: f'https://exploit-db.com/exploits/{x["EDB-ID"]}',
26
+ EXTRA_DATA: lambda x: {'verified': x['Verified']}
27
+ }
28
+ }
29
+ install_cmd = 'sudo snap install searchsploit'
30
+ proxychains = False
31
+ proxy_socks5 = False
32
+ proxy_http = False
33
+ input_chunk_size = 1
34
+ profile = 'io'
35
+
36
+ @staticmethod
37
+ def before_init(self):
38
+ _in = self.input
39
+ self.matched_at = None
40
+ if '~' in _in:
41
+ split = _in.split('~')
42
+ self.matched_at = split[0]
43
+ self.input = split[1]
44
+ if isinstance(self.input, str):
45
+ self.input = self.input.replace('httpd', '').replace('/', ' ')
46
+
47
+ @staticmethod
48
+ def on_item_pre_convert(self, item):
49
+ if self.matched_at:
50
+ item[MATCHED_AT] = self.matched_at
51
+ item[TAGS] = [self.input.replace('\'', '')]
52
+ return item
@@ -0,0 +1,40 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (DELAY, DOMAIN, OPT_NOT_SUPPORTED, PROXY,
3
+ RATE_LIMIT, RETRIES, THREADS, TIMEOUT)
4
+ from secator.output_types import Subdomain
5
+ from secator.tasks._categories import ReconDns
6
+
7
+
8
+ @task()
9
+ class subfinder(ReconDns):
10
+ """Fast passive subdomain enumeration tool."""
11
+ cmd = 'subfinder -silent -cs'
12
+ file_flag = '-dL'
13
+ input_flag = '-d'
14
+ json_flag = '-json'
15
+ opt_key_map = {
16
+ DELAY: OPT_NOT_SUPPORTED,
17
+ PROXY: 'proxy',
18
+ RATE_LIMIT: 'rate-limit',
19
+ RETRIES: OPT_NOT_SUPPORTED,
20
+ TIMEOUT: 'timeout',
21
+ THREADS: 't'
22
+ }
23
+ opt_value_map = {
24
+ PROXY: lambda x: x.replace('http://', '').replace('https://', '') if x else None
25
+ }
26
+ output_map = {
27
+ Subdomain: {
28
+ DOMAIN: 'input',
29
+ }
30
+ }
31
+ output_types = [Subdomain]
32
+ install_cmd = 'go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest'
33
+ proxychains = False
34
+ proxy_http = True
35
+ proxy_socks5 = False
36
+ profile = 'io'
37
+
38
+ @staticmethod
39
+ def validate_item(self, item):
40
+ return item['input'] != 'localhost'
@@ -0,0 +1,179 @@
1
+ import json
2
+ import os
3
+
4
+ from secator.decorators import task
5
+ from secator.definitions import (CONFIDENCE, CVSS_SCORE, DELAY, DESCRIPTION,
6
+ EXTRA_DATA, FOLLOW_REDIRECT, HEADER, ID,
7
+ MATCHED_AT, NAME, OPT_NOT_SUPPORTED, PROVIDER,
8
+ PROXY, RATE_LIMIT, REFERENCES, RETRIES,
9
+ SEVERITY, TAGS, DATA_FOLDER, THREADS, TIMEOUT,
10
+ URL, USER_AGENT)
11
+ from secator.output_types import Tag, Vulnerability
12
+ from secator.tasks._categories import VulnHttp
13
+ from secator.utils import get_file_timestamp
14
+
15
+
16
+ @task()
17
+ class wpscan(VulnHttp):
18
+ """Wordpress security scanner."""
19
+ cmd = 'wpscan --random-user-agent --force --verbose'
20
+ file_flag = None
21
+ input_flag = '--url'
22
+ input_type = URL
23
+ json_flag = '-f json'
24
+ opt_prefix = '--'
25
+ opts = {
26
+ 'cookie_string': {'type': str, 'short': 'cookie', 'help': 'Cookie string, format: cookie1=value1;...'},
27
+ 'api_token': {'type': str, 'short': 'token', 'help': 'WPScan API Token to display vulnerability data'},
28
+ 'wp_content_dir': {'type': str, 'short': 'wcd', 'help': 'wp-content directory if custom or not detected'},
29
+ 'wp_plugins_dir': {'type': str, 'short': 'wpd', 'help': 'wp-plugins directory if custom or not detected'},
30
+ 'passwords': {'type': str, 'help': 'List of passwords to use during the password attack.'},
31
+ 'usernames': {'type': str, 'help': 'List of usernames to use during the password attack.'},
32
+ 'login_uri': {'type': str, 'short': 'lu', 'help': 'URI of the login page if different from /wp-login.php'},
33
+ 'detection_mode': {'type': str, 'short': 'dm', 'help': 'Detection mode between mixed, passive, and aggressive'}
34
+ }
35
+ opt_key_map = {
36
+ HEADER: OPT_NOT_SUPPORTED,
37
+ DELAY: 'throttle',
38
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
39
+ PROXY: 'proxy',
40
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
41
+ RETRIES: OPT_NOT_SUPPORTED,
42
+ TIMEOUT: 'request-timeout',
43
+ THREADS: 'max-threads',
44
+ USER_AGENT: 'user-agent',
45
+ }
46
+ opt_value_map = {
47
+ DELAY: lambda x: x * 1000
48
+ }
49
+ output_map = {
50
+ Vulnerability: {
51
+ ID: lambda x: '',
52
+ NAME: lambda x: x['to_s'].split(':')[0],
53
+ DESCRIPTION: lambda x: '',
54
+ SEVERITY: lambda x: 'info',
55
+ CONFIDENCE: lambda x: 'high' if x.get('confidence', 0) == 100 else 'low',
56
+ CVSS_SCORE: lambda x: 0,
57
+ MATCHED_AT: lambda x: x['url'],
58
+ TAGS: lambda x: [x['type']],
59
+ REFERENCES: lambda x: x.get('references', {}).get('url', []),
60
+ EXTRA_DATA: lambda x: {
61
+ 'data': x.get('interesting_entries', []),
62
+ 'found_by': x.get('found_by', ''),
63
+ 'confirmed_by': x.get('confirmed_by', {}),
64
+ 'metasploit': x.get('references', {}).get('metasploit', [])
65
+ },
66
+ PROVIDER: 'wpscan',
67
+ },
68
+ }
69
+ output_types = [Vulnerability, Tag]
70
+ install_cmd = 'sudo gem install wpscan'
71
+ proxychains = False
72
+ proxy_http = True
73
+ proxy_socks5 = False
74
+ ignore_return_code = True
75
+ profile = 'io'
76
+
77
+ @staticmethod
78
+ def on_init(self):
79
+ output_path = self.get_opt_value('output_path')
80
+ if not output_path:
81
+ timestr = get_file_timestamp()
82
+ output_path = f'{DATA_FOLDER}/wpscan_{timestr}.json'
83
+ self.output_path = output_path
84
+ self.cmd += f' -o {self.output_path}'
85
+
86
+ def yielder(self):
87
+ prev = self.print_item_count
88
+ self.print_item_count = False
89
+ yield from super().yielder()
90
+ if self.return_code != 0:
91
+ return
92
+ self.results = []
93
+ if not self.output_json:
94
+ return
95
+
96
+ note = f'wpscan JSON results saved to {self.output_path}'
97
+ if self.print_line:
98
+ self._print(note)
99
+
100
+ if os.path.exists(self.output_path):
101
+ with open(self.output_path, 'r') as f:
102
+ data = json.load(f)
103
+
104
+ if self.orig:
105
+ yield data
106
+ return
107
+
108
+ # Get URL
109
+ target = data.get('target_url', self.targets)
110
+
111
+ # Wordpress version
112
+ version = data.get('version', {})
113
+ if version:
114
+ wp_version = version['number']
115
+ wp_version_status = version['status']
116
+ if wp_version_status == 'outdated':
117
+ vuln = version
118
+ vuln.update({
119
+ 'url': target,
120
+ 'to_s': 'Wordpress outdated version',
121
+ 'type': wp_version,
122
+ 'references': {},
123
+ })
124
+ yield vuln
125
+
126
+ # Main theme
127
+ main_theme = data.get('main_theme', {})
128
+ if main_theme:
129
+ version = main_theme.get('version', {})
130
+ slug = main_theme['slug']
131
+ location = main_theme['location']
132
+ if version:
133
+ number = version['number']
134
+ latest_version = main_theme.get('latest_version')
135
+ yield Tag(
136
+ name=f'Wordpress theme - {slug} {number}',
137
+ match=target,
138
+ extra_data={
139
+ 'url': location,
140
+ 'latest_version': latest_version
141
+ }
142
+ )
143
+ if (latest_version and number < latest_version):
144
+ yield Vulnerability(
145
+ matched_at=target,
146
+ name=f'Wordpress theme - {slug} {number} outdated',
147
+ severity='info'
148
+ )
149
+
150
+ # Interesting findings
151
+ interesting_findings = data.get('interesting_findings', [])
152
+ for item in interesting_findings:
153
+ yield item
154
+
155
+ # Plugins
156
+ plugins = data.get('plugins', {})
157
+ for _, data in plugins.items():
158
+ version = data.get('version', {})
159
+ slug = data['slug']
160
+ location = data['location']
161
+ if version:
162
+ number = version['number']
163
+ latest_version = data.get('latest_version')
164
+ yield Tag(
165
+ name=f'Wordpress plugin - {slug} {number}',
166
+ match=target,
167
+ extra_data={
168
+ 'url': location,
169
+ 'latest_version': latest_version
170
+ }
171
+ )
172
+ if (latest_version and number < latest_version):
173
+ yield Vulnerability(
174
+ matched_at=target,
175
+ name=f'Wordpress plugin - {slug} {number} outdated',
176
+ severity='info'
177
+ )
178
+
179
+ self.print_item_count = prev
secator/utils.py ADDED
@@ -0,0 +1,445 @@
1
+ import importlib
2
+ import inspect
3
+ import itertools
4
+ import logging
5
+ import mimetypes
6
+ import operator
7
+ import os
8
+ import re
9
+ import select
10
+ import sys
11
+ import warnings
12
+ from datetime import datetime
13
+ from importlib import import_module
14
+ from inspect import isclass
15
+ from pathlib import Path
16
+ from pkgutil import iter_modules
17
+ from urllib.parse import urlparse
18
+
19
+ import netifaces
20
+ import yaml
21
+ from furl import furl
22
+ from rich.markdown import Markdown
23
+
24
+ from secator.definitions import DEFAULT_STDIN_TIMEOUT, DEBUG, DEBUG_COMPONENT
25
+ from secator.rich import console
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class TaskError(ValueError):
31
+ pass
32
+
33
+
34
+ def setup_logging(level):
35
+ """Setup logging.
36
+
37
+ Args:
38
+ level: logging level.
39
+
40
+ Returns:
41
+ logging.Logger: logger.
42
+ """
43
+ logger = logging.getLogger('secator')
44
+ logger.setLevel(level)
45
+ ch = logging.StreamHandler()
46
+ ch.setLevel(level)
47
+ formatter = logging.Formatter('%(message)s')
48
+ ch.setFormatter(formatter)
49
+ logger.addHandler(ch)
50
+ return logger
51
+
52
+
53
+ def expand_input(input):
54
+ """Expand user-provided input on the CLI:
55
+ - If input is a path, read the file and return the lines.
56
+ - If it's a comma-separated list, return the list.
57
+ - Otherwise, return the original input.
58
+
59
+ Args:
60
+ input (str): Input.
61
+
62
+ Returns:
63
+ str: Input.
64
+ """
65
+ if input is None: # read from stdin
66
+ console.print('Waiting for input on stdin ...', style='bold yellow')
67
+ rlist, _, _ = select.select([sys.stdin], [], [], DEFAULT_STDIN_TIMEOUT)
68
+ if rlist:
69
+ data = sys.stdin.read().splitlines()
70
+ else:
71
+ console.print(
72
+ 'No input passed on stdin. Showing help page.',
73
+ style='bold red')
74
+ return None
75
+ return data
76
+ elif os.path.exists(input):
77
+ if os.path.isfile(input):
78
+ with open(input, 'r') as f:
79
+ data = f.read().splitlines()
80
+ return data
81
+ return input
82
+ elif isinstance(input, str):
83
+ input = input.split(',')
84
+
85
+ # If the list is only one item, return it instead of the list
86
+ # Usefull for commands that can take only one input at a time.
87
+ if isinstance(input, list) and len(input) == 1:
88
+ return input[0]
89
+
90
+ return input
91
+
92
+
93
+ def sanitize_url(http_url):
94
+ """Removes HTTP(s) ports 80 and 443 from HTTP(s) URL because it's ugly.
95
+
96
+ Args:
97
+ http_url (str): Input HTTP URL.
98
+
99
+ Returns:
100
+ str: Stripped HTTP URL.
101
+ """
102
+ url = urlparse(http_url)
103
+ if url.netloc.endswith(':80'):
104
+ url = url._replace(netloc=url.netloc.replace(':80', ''))
105
+ elif url.netloc.endswith(':443'):
106
+ url = url._replace(netloc=url.netloc.replace(':443', ''))
107
+ return url.geturl().rstrip('/')
108
+
109
+
110
+ def match_extensions(response, allowed_ext=['.html']):
111
+ """Check if a URL is a file from the HTTP response by looking at the content_type and the URL.
112
+
113
+ Args:
114
+ response (dict): httpx response.
115
+
116
+ Returns:
117
+ bool: True if is a file, False otherwise.
118
+ """
119
+ content_type = response.get('content_type', '').split(';')[0]
120
+ url = response.get('final_url') or response['url']
121
+ ext = mimetypes.guess_extension(content_type)
122
+ ext2 = os.path.splitext(urlparse(url).path)[1]
123
+ if (ext and ext in allowed_ext) or (ext2 and ext2 in allowed_ext):
124
+ return True
125
+ return False
126
+
127
+
128
+ def filter_urls(urls, **remove_parts):
129
+ """Filter a list of URLs using `furl`.
130
+
131
+ Args:
132
+ urls (list): List of URLs to filter.
133
+ remove_parts (dict): Dict of URL pieces to remove.
134
+
135
+ Example:
136
+ >>> urls = ['http://localhost/test.js', 'http://localhost/test?a=1&b=2']
137
+ >>> filter_urls(urls, filter_ext=True)
138
+ ['http://localhost/test']
139
+
140
+ Returns:
141
+ list: List of filtered URLs.
142
+ """
143
+ if not remove_parts:
144
+ return urls
145
+ furl_remove_args = {
146
+ k.replace('remove_', ''): v for k, v in remove_parts.items()
147
+ }
148
+ return [
149
+ sanitize_url(furl(url).remove(**furl_remove_args).url)
150
+ for url in urls
151
+ ]
152
+
153
+
154
+ def deduplicate(array, attr=None):
155
+ """Deduplicate list of OutputType items.
156
+
157
+ Args:
158
+ array (list): Input list.
159
+
160
+ Returns:
161
+ list: Deduplicated list.
162
+ """
163
+ from secator.output_types import OUTPUT_TYPES
164
+ if attr and len(array) > 0 and isinstance(array[0], tuple(OUTPUT_TYPES)):
165
+ memo = set()
166
+ res = []
167
+ for sub in array:
168
+ if attr in sub.keys() and getattr(sub, attr) not in memo:
169
+ res.append(sub)
170
+ memo.add(getattr(sub, attr))
171
+ return sorted(res, key=operator.attrgetter(attr))
172
+ return sorted(list(dict.fromkeys(array)))
173
+
174
+
175
+ def setup_logger(level='info', format='%(message)s'):
176
+ logger = logging.getLogger('secator')
177
+ level = logging.getLevelName(level.upper())
178
+ logger.setLevel(level)
179
+ handler = logging.StreamHandler()
180
+ formatter = logging.Formatter(format)
181
+ handler.setFormatter(formatter)
182
+ logger.addHandler(handler)
183
+ return logger
184
+
185
+
186
+ def discover_internal_tasks():
187
+ """Find internal secator tasks."""
188
+ from secator.runners import Runner
189
+ package_dir = Path(__file__).resolve().parent / 'tasks'
190
+ task_classes = []
191
+ for (_, module_name, _) in iter_modules([str(package_dir)]):
192
+ if module_name.startswith('_'):
193
+ continue
194
+ try:
195
+ module = import_module(f'secator.tasks.{module_name}')
196
+ except ImportError:
197
+ continue
198
+ for attribute_name in dir(module):
199
+ attribute = getattr(module, attribute_name)
200
+ if isclass(attribute):
201
+ bases = inspect.getmro(attribute)
202
+ if Runner in bases and hasattr(attribute, '__task__'):
203
+ task_classes.append(attribute)
204
+
205
+ # Sort task_classes by category
206
+ task_classes = sorted(
207
+ task_classes,
208
+ key=lambda x: (get_command_category(x), x.__name__))
209
+
210
+ return task_classes
211
+
212
+
213
+ def discover_external_tasks():
214
+ """Find external secator tasks."""
215
+ if not os.path.exists('config.secator'):
216
+ return []
217
+ with open('config.secator', 'r') as f:
218
+ classes = f.read().splitlines()
219
+ output = []
220
+ for cls_path in classes:
221
+ cls = import_dynamic(cls_path, cls_root='Command')
222
+ if not cls:
223
+ continue
224
+ # logger.warning(f'Added external tool {cls_path}')
225
+ output.append(cls)
226
+ return output
227
+
228
+
229
+ def discover_tasks():
230
+ """Find all secator tasks (internal + external)."""
231
+ return discover_internal_tasks() + discover_external_tasks()
232
+
233
+
234
+ def import_dynamic(cls_path, cls_root='Command'):
235
+ """Import class dynamically from class path.
236
+
237
+ Args:
238
+ cls_path (str): Class path.
239
+ cls_root (str): Root parent class.
240
+
241
+ Returns:
242
+ cls: Class object.
243
+ """
244
+ try:
245
+ package, name = cls_path.rsplit(".", maxsplit=1)
246
+ cls = getattr(importlib.import_module(package), name)
247
+ root_cls = inspect.getmro(cls)[-2]
248
+ if root_cls.__name__ == cls_root:
249
+ return cls
250
+ return None
251
+ except Exception:
252
+ warnings.warn(f'"{package}.{name}" not found.')
253
+ return None
254
+
255
+
256
+ def get_command_cls(cls_name):
257
+ """Get secator command by class name.
258
+
259
+ Args:
260
+ cls_name (str): Class name to load.
261
+
262
+ Returns:
263
+ cls: Class.
264
+ """
265
+ tasks_classes = discover_internal_tasks() + discover_external_tasks()
266
+ for task_cls in tasks_classes:
267
+ if task_cls.__name__ == cls_name:
268
+ return task_cls
269
+ return None
270
+
271
+
272
+ def get_command_category(command):
273
+ """Get the category of a command.
274
+
275
+ Args:
276
+ command (class): Command class.
277
+
278
+ Returns:
279
+ str: Command category.
280
+ """
281
+ base_cls = command.__bases__[0].__name__.replace('Command', '').replace('Runner', 'misc')
282
+ category = re.sub(r'(?<!^)(?=[A-Z])', '/', base_cls).lower()
283
+ return category
284
+
285
+
286
+ def merge_opts(*options):
287
+ """Merge multiple options dict into a final one, overriding by order.
288
+
289
+ Args:
290
+ list: List of options dict.
291
+
292
+ Returns:
293
+ dict: Options.
294
+ """
295
+ all_opts = {}
296
+ for opts in options:
297
+ if opts:
298
+ opts_noemtpy = {k: v for k, v in opts.items() if v is not None}
299
+ all_opts.update(opts_noemtpy)
300
+ return all_opts
301
+
302
+
303
+ def flatten(array: list):
304
+ """Flatten list if it contains multiple sublists.
305
+
306
+ Args:
307
+ l (list): Input list.
308
+
309
+ Returns:
310
+ list: Output list.
311
+ """
312
+ if isinstance(array, list) and len(array) > 0 and isinstance(array[0], list):
313
+ return list(itertools.chain(*array))
314
+ return array
315
+
316
+
317
+ def pluralize(word):
318
+ """Pluralize a word.
319
+
320
+ Args:
321
+ word (string): Word.
322
+
323
+ Returns:
324
+ string: Plural word.
325
+ """
326
+ if word.endswith('y'):
327
+ return word.rstrip('y') + 'ies'
328
+ else:
329
+ return f'{word}s'
330
+
331
+
332
+ def get_task_name_padding(classes=None):
333
+ all_tasks = discover_tasks()
334
+ classes = classes or all_tasks
335
+ return max([len(cls.__name__) for cls in discover_tasks() if cls in classes]) + 2
336
+
337
+
338
+ def load_fixture(name, fixtures_dir, ext=None, only_path=False):
339
+ fixture_path = f'{fixtures_dir}/{name}'
340
+ exts = ['.json', '.txt', '.xml', '.rc']
341
+ if ext:
342
+ exts = [ext]
343
+ for ext in exts:
344
+ path = f'{fixture_path}{ext}'
345
+ if os.path.exists(path):
346
+ if only_path:
347
+ return path
348
+ with open(path) as f:
349
+ content = f.read()
350
+ if path.endswith(('.json', '.yaml')):
351
+ return yaml.load(content, Loader=yaml.Loader)
352
+ else:
353
+ return content
354
+
355
+
356
+ def get_file_timestamp():
357
+ return datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%f_%p")
358
+
359
+
360
+ def detect_host(interface=None):
361
+ ifaces = netifaces.interfaces()
362
+ host = None
363
+ for iface in ifaces:
364
+ addrs = netifaces.ifaddresses(iface)
365
+ if (interface and iface != interface) or iface == 'lo':
366
+ continue
367
+ host = addrs[netifaces.AF_INET][0]['addr']
368
+ interface = iface
369
+ if 'tun' in iface:
370
+ break
371
+ return host
372
+
373
+
374
+ def find_list_item(array, val, key='id', default=None):
375
+ return next((item for item in array if item[key] == val), default)
376
+
377
+
378
+ def print_results_table(results, title=None, exclude_fields=[], log=False):
379
+ from secator.output_types import OUTPUT_TYPES
380
+ from secator.rich import build_table
381
+ _print = console.log if log else console.print
382
+ _print()
383
+ if title:
384
+ title = ' '.join(title.capitalize().split('_')) + ' results'
385
+ h1 = Markdown(f'# {title}')
386
+ _print(h1, style='bold magenta', width=50)
387
+ _print()
388
+ tables = []
389
+ for output_type in OUTPUT_TYPES:
390
+ if output_type.__name__ == 'Progress':
391
+ continue
392
+ items = [
393
+ item for item in results if item._type == output_type.get_name() and not item._duplicate
394
+ ]
395
+ if items:
396
+ _table = build_table(
397
+ items,
398
+ output_fields=output_type._table_fields,
399
+ exclude_fields=exclude_fields,
400
+ sort_by=output_type._sort_by)
401
+ tables.append(_table)
402
+ title = pluralize(items[0]._type).upper()
403
+ _print(f':wrench: {title}', style='bold gold3', justify='left')
404
+ _print(_table)
405
+ _print()
406
+ return tables
407
+
408
+
409
+ def rich_to_ansi(text):
410
+ """Convert text formatted with rich markup to standard string."""
411
+ from rich.console import Console
412
+ tmp_console = Console(file=None, highlight=False, color_system='truecolor')
413
+ with tmp_console.capture() as capture:
414
+ tmp_console.print(text, end='', soft_wrap=True)
415
+ return capture.get()
416
+
417
+
418
+ def debug(msg, sub='', id='', obj=None, obj_after=True, obj_breaklines=False, level=1):
419
+ """Print debug log if DEBUG >= level."""
420
+ if not DEBUG >= level:
421
+ return
422
+ if DEBUG_COMPONENT and not any(s.startswith(sub) for s in DEBUG_COMPONENT):
423
+ return
424
+ s = ''
425
+ if sub:
426
+ s += f'[dim yellow4]{sub:13s}[/] '
427
+ obj_str = ''
428
+ if obj:
429
+ sep = ', '
430
+ if obj_breaklines:
431
+ obj_str += '\n '
432
+ sep = '\n '
433
+ if isinstance(obj, dict):
434
+ obj_str += sep.join(f'[dim blue]{k}[/] [dim yellow]->[/] [dim green]{v}[/]' for k, v in obj.items() if v is not None)
435
+ elif isinstance(obj, list):
436
+ obj_str += sep.join(obj)
437
+ if obj_str and not obj_after:
438
+ s = f'{s} {obj_str} '
439
+ s += f'[dim yellow]{msg}[/] '
440
+ if obj_str and obj_after:
441
+ s = f'{s}: {obj_str}'
442
+ if id:
443
+ s += f' [italic dim white]\[{id}][/] '
444
+ s = rich_to_ansi(f'[dim red]\[debug] {s}[/]')
445
+ print(s)