secator 0.1.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (99) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +421 -0
  4. secator/cli.py +927 -0
  5. secator/config.py +137 -0
  6. secator/configs/__init__.py +0 -0
  7. secator/configs/profiles/__init__.py +0 -0
  8. secator/configs/profiles/aggressive.yaml +7 -0
  9. secator/configs/profiles/default.yaml +9 -0
  10. secator/configs/profiles/stealth.yaml +7 -0
  11. secator/configs/scans/__init__.py +0 -0
  12. secator/configs/scans/domain.yaml +18 -0
  13. secator/configs/scans/host.yaml +14 -0
  14. secator/configs/scans/network.yaml +17 -0
  15. secator/configs/scans/subdomain.yaml +8 -0
  16. secator/configs/scans/url.yaml +12 -0
  17. secator/configs/workflows/__init__.py +0 -0
  18. secator/configs/workflows/cidr_recon.yaml +28 -0
  19. secator/configs/workflows/code_scan.yaml +11 -0
  20. secator/configs/workflows/host_recon.yaml +41 -0
  21. secator/configs/workflows/port_scan.yaml +34 -0
  22. secator/configs/workflows/subdomain_recon.yaml +33 -0
  23. secator/configs/workflows/url_crawl.yaml +29 -0
  24. secator/configs/workflows/url_dirsearch.yaml +29 -0
  25. secator/configs/workflows/url_fuzz.yaml +35 -0
  26. secator/configs/workflows/url_nuclei.yaml +11 -0
  27. secator/configs/workflows/url_vuln.yaml +55 -0
  28. secator/configs/workflows/user_hunt.yaml +10 -0
  29. secator/configs/workflows/wordpress.yaml +14 -0
  30. secator/decorators.py +346 -0
  31. secator/definitions.py +183 -0
  32. secator/exporters/__init__.py +12 -0
  33. secator/exporters/_base.py +3 -0
  34. secator/exporters/csv.py +29 -0
  35. secator/exporters/gdrive.py +118 -0
  36. secator/exporters/json.py +14 -0
  37. secator/exporters/table.py +7 -0
  38. secator/exporters/txt.py +24 -0
  39. secator/hooks/__init__.py +0 -0
  40. secator/hooks/mongodb.py +212 -0
  41. secator/output_types/__init__.py +24 -0
  42. secator/output_types/_base.py +95 -0
  43. secator/output_types/exploit.py +50 -0
  44. secator/output_types/ip.py +33 -0
  45. secator/output_types/port.py +45 -0
  46. secator/output_types/progress.py +35 -0
  47. secator/output_types/record.py +34 -0
  48. secator/output_types/subdomain.py +42 -0
  49. secator/output_types/tag.py +46 -0
  50. secator/output_types/target.py +30 -0
  51. secator/output_types/url.py +76 -0
  52. secator/output_types/user_account.py +41 -0
  53. secator/output_types/vulnerability.py +97 -0
  54. secator/report.py +95 -0
  55. secator/rich.py +123 -0
  56. secator/runners/__init__.py +12 -0
  57. secator/runners/_base.py +873 -0
  58. secator/runners/_helpers.py +154 -0
  59. secator/runners/command.py +674 -0
  60. secator/runners/scan.py +67 -0
  61. secator/runners/task.py +107 -0
  62. secator/runners/workflow.py +137 -0
  63. secator/serializers/__init__.py +8 -0
  64. secator/serializers/dataclass.py +33 -0
  65. secator/serializers/json.py +15 -0
  66. secator/serializers/regex.py +17 -0
  67. secator/tasks/__init__.py +10 -0
  68. secator/tasks/_categories.py +304 -0
  69. secator/tasks/cariddi.py +102 -0
  70. secator/tasks/dalfox.py +66 -0
  71. secator/tasks/dirsearch.py +88 -0
  72. secator/tasks/dnsx.py +56 -0
  73. secator/tasks/dnsxbrute.py +34 -0
  74. secator/tasks/feroxbuster.py +89 -0
  75. secator/tasks/ffuf.py +85 -0
  76. secator/tasks/fping.py +44 -0
  77. secator/tasks/gau.py +43 -0
  78. secator/tasks/gf.py +34 -0
  79. secator/tasks/gospider.py +71 -0
  80. secator/tasks/grype.py +78 -0
  81. secator/tasks/h8mail.py +80 -0
  82. secator/tasks/httpx.py +104 -0
  83. secator/tasks/katana.py +128 -0
  84. secator/tasks/maigret.py +78 -0
  85. secator/tasks/mapcidr.py +32 -0
  86. secator/tasks/msfconsole.py +176 -0
  87. secator/tasks/naabu.py +52 -0
  88. secator/tasks/nmap.py +341 -0
  89. secator/tasks/nuclei.py +97 -0
  90. secator/tasks/searchsploit.py +53 -0
  91. secator/tasks/subfinder.py +40 -0
  92. secator/tasks/wpscan.py +177 -0
  93. secator/utils.py +404 -0
  94. secator/utils_test.py +183 -0
  95. secator-0.1.0.dist-info/METADATA +379 -0
  96. secator-0.1.0.dist-info/RECORD +99 -0
  97. secator-0.1.0.dist-info/WHEEL +5 -0
  98. secator-0.1.0.dist-info/entry_points.txt +2 -0
  99. secator-0.1.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,53 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (CVES, EXTRA_DATA, ID, MATCHED_AT, NAME,
3
+ PROVIDER, REFERENCE, TAGS, OPT_NOT_SUPPORTED)
4
+ from secator.output_types import Exploit
5
+ from secator.runners import Command
6
+
7
+
8
+ @task()
9
+ class searchsploit(Command):
10
+ """Exploit-DB command line search tool."""
11
+ cmd = 'searchsploit'
12
+ input_flag = None
13
+ json_flag = '--json'
14
+ version_flag = OPT_NOT_SUPPORTED
15
+ opts = {
16
+ 'strict': {'short': 's', 'is_flag': True, 'default': False, 'help': 'Strict match'}
17
+ }
18
+ opt_key_map = {}
19
+ output_types = [Exploit]
20
+ output_map = {
21
+ Exploit: {
22
+ NAME: lambda x: '-'.join(x['Title'].split('-')[1:]).strip(),
23
+ PROVIDER: lambda x: 'EDB',
24
+ ID: 'EDB-ID',
25
+ CVES: lambda x: [c for c in x['Codes'].split(';') if c.startswith('CVE-')],
26
+ REFERENCE: lambda x: f'https://exploit-db.com/exploits/{x["EDB-ID"]}',
27
+ EXTRA_DATA: lambda x: {'verified': x['Verified']}
28
+ }
29
+ }
30
+ install_cmd = 'sudo git clone https://gitlab.com/exploit-database/exploitdb.git /opt/exploitdb || true && sudo ln -sf /opt/exploitdb/searchsploit /usr/local/bin/searchsploit' # noqa: E501
31
+ proxychains = False
32
+ proxy_socks5 = False
33
+ proxy_http = False
34
+ input_chunk_size = 1
35
+ profile = 'io'
36
+
37
+ @staticmethod
38
+ def before_init(self):
39
+ _in = self.input
40
+ self.matched_at = None
41
+ if '~' in _in:
42
+ split = _in.split('~')
43
+ self.matched_at = split[0]
44
+ self.input = split[1]
45
+ if isinstance(self.input, str):
46
+ self.input = self.input.replace('httpd', '').replace('/', ' ')
47
+
48
+ @staticmethod
49
+ def on_item_pre_convert(self, item):
50
+ if self.matched_at:
51
+ item[MATCHED_AT] = self.matched_at
52
+ item[TAGS] = [self.input.replace('\'', '')]
53
+ return item
@@ -0,0 +1,40 @@
1
+ from secator.decorators import task
2
+ from secator.definitions import (DELAY, DOMAIN, OPT_NOT_SUPPORTED, PROXY,
3
+ RATE_LIMIT, RETRIES, THREADS, TIMEOUT)
4
+ from secator.output_types import Subdomain
5
+ from secator.tasks._categories import ReconDns
6
+
7
+
8
+ @task()
9
+ class subfinder(ReconDns):
10
+ """Fast passive subdomain enumeration tool."""
11
+ cmd = 'subfinder -silent -cs'
12
+ file_flag = '-dL'
13
+ input_flag = '-d'
14
+ json_flag = '-json'
15
+ opt_key_map = {
16
+ DELAY: OPT_NOT_SUPPORTED,
17
+ PROXY: 'proxy',
18
+ RATE_LIMIT: 'rate-limit',
19
+ RETRIES: OPT_NOT_SUPPORTED,
20
+ TIMEOUT: 'timeout',
21
+ THREADS: 't'
22
+ }
23
+ opt_value_map = {
24
+ PROXY: lambda x: x.replace('http://', '').replace('https://', '') if x else None
25
+ }
26
+ output_map = {
27
+ Subdomain: {
28
+ DOMAIN: 'input',
29
+ }
30
+ }
31
+ output_types = [Subdomain]
32
+ install_cmd = 'go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest'
33
+ proxychains = False
34
+ proxy_http = True
35
+ proxy_socks5 = False
36
+ profile = 'io'
37
+
38
+ @staticmethod
39
+ def validate_item(self, item):
40
+ return item['input'] != 'localhost'
@@ -0,0 +1,177 @@
1
+ import json
2
+ import os
3
+
4
+ from secator.decorators import task
5
+ from secator.definitions import (CONFIDENCE, CVSS_SCORE, DELAY, DESCRIPTION,
6
+ EXTRA_DATA, FOLLOW_REDIRECT, HEADER, ID,
7
+ MATCHED_AT, NAME, OPT_NOT_SUPPORTED, OUTPUT_PATH, PROVIDER,
8
+ PROXY, RATE_LIMIT, REFERENCES, RETRIES,
9
+ SEVERITY, TAGS, THREADS, TIMEOUT,
10
+ URL, USER_AGENT)
11
+ from secator.output_types import Tag, Vulnerability
12
+ from secator.tasks._categories import VulnHttp
13
+
14
+
15
+ @task()
16
+ class wpscan(VulnHttp):
17
+ """Wordpress security scanner."""
18
+ cmd = 'wpscan --random-user-agent --force --verbose'
19
+ file_flag = None
20
+ input_flag = '--url'
21
+ input_type = URL
22
+ json_flag = '-f json'
23
+ opt_prefix = '--'
24
+ opts = {
25
+ 'cookie_string': {'type': str, 'short': 'cookie', 'help': 'Cookie string, format: cookie1=value1;...'},
26
+ 'api_token': {'type': str, 'short': 'token', 'help': 'WPScan API Token to display vulnerability data'},
27
+ 'wp_content_dir': {'type': str, 'short': 'wcd', 'help': 'wp-content directory if custom or not detected'},
28
+ 'wp_plugins_dir': {'type': str, 'short': 'wpd', 'help': 'wp-plugins directory if custom or not detected'},
29
+ 'passwords': {'type': str, 'help': 'List of passwords to use during the password attack.'},
30
+ 'usernames': {'type': str, 'help': 'List of usernames to use during the password attack.'},
31
+ 'login_uri': {'type': str, 'short': 'lu', 'help': 'URI of the login page if different from /wp-login.php'},
32
+ 'detection_mode': {'type': str, 'short': 'dm', 'help': 'Detection mode between mixed, passive, and aggressive'}
33
+ }
34
+ opt_key_map = {
35
+ HEADER: OPT_NOT_SUPPORTED,
36
+ DELAY: 'throttle',
37
+ FOLLOW_REDIRECT: OPT_NOT_SUPPORTED,
38
+ PROXY: 'proxy',
39
+ RATE_LIMIT: OPT_NOT_SUPPORTED,
40
+ RETRIES: OPT_NOT_SUPPORTED,
41
+ TIMEOUT: 'request-timeout',
42
+ THREADS: 'max-threads',
43
+ USER_AGENT: 'user-agent',
44
+ }
45
+ opt_value_map = {
46
+ DELAY: lambda x: x * 1000
47
+ }
48
+ output_map = {
49
+ Vulnerability: {
50
+ ID: lambda x: '',
51
+ NAME: lambda x: x['to_s'].split(':')[0],
52
+ DESCRIPTION: lambda x: '',
53
+ SEVERITY: lambda x: 'info',
54
+ CONFIDENCE: lambda x: 'high' if x.get('confidence', 0) == 100 else 'low',
55
+ CVSS_SCORE: lambda x: 0,
56
+ MATCHED_AT: lambda x: x['url'],
57
+ TAGS: lambda x: [x['type']],
58
+ REFERENCES: lambda x: x.get('references', {}).get('url', []),
59
+ EXTRA_DATA: lambda x: {
60
+ 'data': x.get('interesting_entries', []),
61
+ 'found_by': x.get('found_by', ''),
62
+ 'confirmed_by': x.get('confirmed_by', {}),
63
+ 'metasploit': x.get('references', {}).get('metasploit', [])
64
+ },
65
+ PROVIDER: 'wpscan',
66
+ },
67
+ }
68
+ output_types = [Vulnerability, Tag]
69
+ install_cmd = 'sudo gem install wpscan'
70
+ proxychains = False
71
+ proxy_http = True
72
+ proxy_socks5 = False
73
+ ignore_return_code = True
74
+ profile = 'io'
75
+
76
+ def yielder(self):
77
+ prev = self.print_item_count
78
+ self.print_item_count = False
79
+ yield from super().yielder()
80
+ if self.return_code != 0:
81
+ return
82
+ self.results = []
83
+ if not self.output_json:
84
+ return
85
+
86
+ note = f'wpscan JSON results saved to {self.output_path}'
87
+ if self.print_line:
88
+ self._print(note)
89
+
90
+ if os.path.exists(self.output_path):
91
+ with open(self.output_path, 'r') as f:
92
+ data = json.load(f)
93
+
94
+ if self.orig:
95
+ yield data
96
+ return
97
+
98
+ # Get URL
99
+ target = data.get('target_url', self.targets)
100
+
101
+ # Wordpress version
102
+ version = data.get('version', {})
103
+ if version:
104
+ wp_version = version['number']
105
+ wp_version_status = version['status']
106
+ if wp_version_status == 'outdated':
107
+ vuln = version
108
+ vuln.update({
109
+ 'url': target,
110
+ 'to_s': 'Wordpress outdated version',
111
+ 'type': wp_version,
112
+ 'references': {},
113
+ })
114
+ yield vuln
115
+
116
+ # Main theme
117
+ main_theme = data.get('main_theme', {})
118
+ if main_theme:
119
+ version = main_theme.get('version', {})
120
+ slug = main_theme['slug']
121
+ location = main_theme['location']
122
+ if version:
123
+ number = version['number']
124
+ latest_version = main_theme.get('latest_version')
125
+ yield Tag(
126
+ name=f'Wordpress theme - {slug} {number}',
127
+ match=target,
128
+ extra_data={
129
+ 'url': location,
130
+ 'latest_version': latest_version
131
+ }
132
+ )
133
+ if (latest_version and number < latest_version):
134
+ yield Vulnerability(
135
+ matched_at=target,
136
+ name=f'Wordpress theme - {slug} {number} outdated',
137
+ severity='info'
138
+ )
139
+
140
+ # Interesting findings
141
+ interesting_findings = data.get('interesting_findings', [])
142
+ for item in interesting_findings:
143
+ yield item
144
+
145
+ # Plugins
146
+ plugins = data.get('plugins', {})
147
+ for _, data in plugins.items():
148
+ version = data.get('version', {})
149
+ slug = data['slug']
150
+ location = data['location']
151
+ if version:
152
+ number = version['number']
153
+ latest_version = data.get('latest_version')
154
+ yield Tag(
155
+ name=f'Wordpress plugin - {slug} {number}',
156
+ match=target,
157
+ extra_data={
158
+ 'url': location,
159
+ 'latest_version': latest_version
160
+ }
161
+ )
162
+ if (latest_version and number < latest_version):
163
+ yield Vulnerability(
164
+ matched_at=target,
165
+ name=f'Wordpress plugin - {slug} {number} outdated',
166
+ severity='info'
167
+ )
168
+
169
+ self.print_item_count = prev
170
+
171
+ @staticmethod
172
+ def on_init(self):
173
+ output_path = self.get_opt_value(OUTPUT_PATH)
174
+ if not output_path:
175
+ output_path = f'{self.reports_folder}/.outputs/{self.unique_name}.json'
176
+ self.output_path = output_path
177
+ self.cmd += f' -o {self.output_path}'
secator/utils.py ADDED
@@ -0,0 +1,404 @@
1
+ import importlib
2
+ import inspect
3
+ import itertools
4
+ import logging
5
+ import operator
6
+ import os
7
+ import re
8
+ import select
9
+ import sys
10
+ import warnings
11
+ from datetime import datetime
12
+ from importlib import import_module
13
+ from inspect import isclass
14
+ from pathlib import Path
15
+ from pkgutil import iter_modules
16
+ from urllib.parse import urlparse, quote
17
+
18
+ import ifaddr
19
+ import yaml
20
+ from rich.markdown import Markdown
21
+
22
+ from secator.definitions import DEBUG, DEBUG_COMPONENT, DEFAULT_STDIN_TIMEOUT
23
+ from secator.rich import console
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ _tasks = []
28
+
29
+
30
+ class TaskError(ValueError):
31
+ pass
32
+
33
+
34
+ def setup_logging(level):
35
+ """Setup logging.
36
+
37
+ Args:
38
+ level: logging level.
39
+
40
+ Returns:
41
+ logging.Logger: logger.
42
+ """
43
+ logger = logging.getLogger('secator')
44
+ logger.setLevel(level)
45
+ ch = logging.StreamHandler()
46
+ ch.setLevel(level)
47
+ formatter = logging.Formatter('%(message)s')
48
+ ch.setFormatter(formatter)
49
+ logger.addHandler(ch)
50
+ return logger
51
+
52
+
53
+ def expand_input(input):
54
+ """Expand user-provided input on the CLI:
55
+ - If input is a path, read the file and return the lines.
56
+ - If it's a comma-separated list, return the list.
57
+ - Otherwise, return the original input.
58
+
59
+ Args:
60
+ input (str): Input.
61
+
62
+ Returns:
63
+ str: Input.
64
+ """
65
+ if input is None: # read from stdin
66
+ console.print('Waiting for input on stdin ...', style='bold yellow')
67
+ rlist, _, _ = select.select([sys.stdin], [], [], DEFAULT_STDIN_TIMEOUT)
68
+ if rlist:
69
+ data = sys.stdin.read().splitlines()
70
+ else:
71
+ console.print(
72
+ 'No input passed on stdin. Showing help page.',
73
+ style='bold red')
74
+ return None
75
+ return data
76
+ elif os.path.exists(input):
77
+ if os.path.isfile(input):
78
+ with open(input, 'r') as f:
79
+ data = f.read().splitlines()
80
+ return data
81
+ return input
82
+ elif isinstance(input, str):
83
+ input = input.split(',')
84
+
85
+ # If the list is only one item, return it instead of the list
86
+ # Usefull for commands that can take only one input at a time.
87
+ if isinstance(input, list) and len(input) == 1:
88
+ return input[0]
89
+
90
+ return input
91
+
92
+
93
+ def sanitize_url(http_url):
94
+ """Removes HTTP(s) ports 80 and 443 from HTTP(s) URL because it's ugly.
95
+
96
+ Args:
97
+ http_url (str): Input HTTP URL.
98
+
99
+ Returns:
100
+ str: Stripped HTTP URL.
101
+ """
102
+ url = urlparse(http_url)
103
+ if url.netloc.endswith(':80'):
104
+ url = url._replace(netloc=url.netloc.replace(':80', ''))
105
+ elif url.netloc.endswith(':443'):
106
+ url = url._replace(netloc=url.netloc.replace(':443', ''))
107
+ return url.geturl().rstrip('/')
108
+
109
+
110
+ def deduplicate(array, attr=None):
111
+ """Deduplicate list of OutputType items.
112
+
113
+ Args:
114
+ array (list): Input list.
115
+
116
+ Returns:
117
+ list: Deduplicated list.
118
+ """
119
+ from secator.output_types import OUTPUT_TYPES
120
+ if attr and len(array) > 0 and isinstance(array[0], tuple(OUTPUT_TYPES)):
121
+ memo = set()
122
+ res = []
123
+ for sub in array:
124
+ if attr in sub.keys() and getattr(sub, attr) not in memo:
125
+ res.append(sub)
126
+ memo.add(getattr(sub, attr))
127
+ return sorted(res, key=operator.attrgetter(attr))
128
+ return sorted(list(dict.fromkeys(array)))
129
+
130
+
131
+ def discover_internal_tasks():
132
+ """Find internal secator tasks."""
133
+ from secator.runners import Runner
134
+ package_dir = Path(__file__).resolve().parent / 'tasks'
135
+ task_classes = []
136
+ for (_, module_name, _) in iter_modules([str(package_dir)]):
137
+ if module_name.startswith('_'):
138
+ continue
139
+ try:
140
+ module = import_module(f'secator.tasks.{module_name}')
141
+ except ImportError as e:
142
+ console.print(f'[bold red]Could not import secator.tasks.{module_name}:[/]')
143
+ console.print(f'\t[bold red]{type(e).__name__}[/]: {str(e)}')
144
+ continue
145
+ for attribute_name in dir(module):
146
+ attribute = getattr(module, attribute_name)
147
+ if isclass(attribute):
148
+ bases = inspect.getmro(attribute)
149
+ if Runner in bases and hasattr(attribute, '__task__'):
150
+ task_classes.append(attribute)
151
+
152
+ # Sort task_classes by category
153
+ task_classes = sorted(
154
+ task_classes,
155
+ key=lambda x: (get_command_category(x), x.__name__))
156
+
157
+ return task_classes
158
+
159
+
160
+ def discover_external_tasks():
161
+ """Find external secator tasks."""
162
+ if not os.path.exists('config.secator'):
163
+ return []
164
+ with open('config.secator', 'r') as f:
165
+ classes = f.read().splitlines()
166
+ output = []
167
+ for cls_path in classes:
168
+ cls = import_dynamic(cls_path, cls_root='Command')
169
+ if not cls:
170
+ continue
171
+ # logger.warning(f'Added external tool {cls_path}')
172
+ output.append(cls)
173
+ return output
174
+
175
+
176
+ def discover_tasks():
177
+ """Find all secator tasks (internal + external)."""
178
+ global _tasks
179
+ if not _tasks:
180
+ _tasks = discover_internal_tasks() + discover_external_tasks()
181
+ return _tasks
182
+
183
+
184
+ def import_dynamic(cls_path, cls_root='Command'):
185
+ """Import class dynamically from class path.
186
+
187
+ Args:
188
+ cls_path (str): Class path.
189
+ cls_root (str): Root parent class.
190
+
191
+ Returns:
192
+ cls: Class object.
193
+ """
194
+ try:
195
+ package, name = cls_path.rsplit(".", maxsplit=1)
196
+ cls = getattr(importlib.import_module(package), name)
197
+ root_cls = inspect.getmro(cls)[-2]
198
+ if root_cls.__name__ == cls_root:
199
+ return cls
200
+ return None
201
+ except Exception:
202
+ warnings.warn(f'"{package}.{name}" not found.')
203
+ return None
204
+
205
+
206
+ def get_command_cls(cls_name):
207
+ """Get secator command by class name.
208
+
209
+ Args:
210
+ cls_name (str): Class name to load.
211
+
212
+ Returns:
213
+ cls: Class.
214
+ """
215
+ tasks_classes = discover_tasks()
216
+ for task_cls in tasks_classes:
217
+ if task_cls.__name__ == cls_name:
218
+ return task_cls
219
+ return None
220
+
221
+
222
+ def get_command_category(command):
223
+ """Get the category of a command.
224
+
225
+ Args:
226
+ command (class): Command class.
227
+
228
+ Returns:
229
+ str: Command category.
230
+ """
231
+ base_cls = command.__bases__[0].__name__.replace('Command', '').replace('Runner', 'misc')
232
+ category = re.sub(r'(?<!^)(?=[A-Z])', '/', base_cls).lower()
233
+ return category
234
+
235
+
236
+ def merge_opts(*options):
237
+ """Merge multiple options dict into a final one, overriding by order.
238
+
239
+ Args:
240
+ list: List of options dict.
241
+
242
+ Returns:
243
+ dict: Options.
244
+ """
245
+ all_opts = {}
246
+ for opts in options:
247
+ if opts:
248
+ opts_noemtpy = {k: v for k, v in opts.items() if v is not None}
249
+ all_opts.update(opts_noemtpy)
250
+ return all_opts
251
+
252
+
253
+ def flatten(array: list):
254
+ """Flatten list if it contains multiple sublists.
255
+
256
+ Args:
257
+ l (list): Input list.
258
+
259
+ Returns:
260
+ list: Output list.
261
+ """
262
+ if isinstance(array, list) and len(array) > 0 and isinstance(array[0], list):
263
+ return list(itertools.chain(*array))
264
+ return array
265
+
266
+
267
+ def pluralize(word):
268
+ """Pluralize a word.
269
+
270
+ Args:
271
+ word (string): Word.
272
+
273
+ Returns:
274
+ string: Plural word.
275
+ """
276
+ if word.endswith('y'):
277
+ return word.rstrip('y') + 'ies'
278
+ else:
279
+ return f'{word}s'
280
+
281
+
282
+ def load_fixture(name, fixtures_dir, ext=None, only_path=False):
283
+ fixture_path = f'{fixtures_dir}/{name}'
284
+ exts = ['.json', '.txt', '.xml', '.rc']
285
+ if ext:
286
+ exts = [ext]
287
+ for ext in exts:
288
+ path = f'{fixture_path}{ext}'
289
+ if os.path.exists(path):
290
+ if only_path:
291
+ return path
292
+ with open(path) as f:
293
+ content = f.read()
294
+ if path.endswith(('.json', '.yaml')):
295
+ return yaml.load(content, Loader=yaml.Loader)
296
+ else:
297
+ return content
298
+
299
+
300
+ def get_file_timestamp():
301
+ return datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%f_%p")
302
+
303
+
304
+ def detect_host(interface=None):
305
+ adapters = ifaddr.get_adapters()
306
+ for adapter in adapters:
307
+ iface = adapter.name
308
+ if (interface and iface != interface) or iface == 'lo':
309
+ continue
310
+ return adapter.ips[0].ip
311
+ return None
312
+
313
+
314
+ def find_list_item(array, val, key='id', default=None):
315
+ return next((item for item in array if item[key] == val), default)
316
+
317
+
318
+ def print_results_table(results, title=None, exclude_fields=[], log=False):
319
+ from secator.output_types import OUTPUT_TYPES
320
+ from secator.rich import build_table
321
+ _print = console.log if log else console.print
322
+ _print()
323
+ if title:
324
+ title = ' '.join(title.capitalize().split('_')) + ' results'
325
+ h1 = Markdown(f'# {title}')
326
+ _print(h1, style='bold magenta', width=50)
327
+ _print()
328
+ tables = []
329
+ for output_type in OUTPUT_TYPES:
330
+ if output_type.__name__ == 'Progress':
331
+ continue
332
+ items = [
333
+ item for item in results if item._type == output_type.get_name() and not item._duplicate
334
+ ]
335
+ if items:
336
+ _table = build_table(
337
+ items,
338
+ output_fields=output_type._table_fields,
339
+ exclude_fields=exclude_fields,
340
+ sort_by=output_type._sort_by)
341
+ tables.append(_table)
342
+ title = pluralize(items[0]._type).upper()
343
+ _print(f':wrench: {title}', style='bold gold3', justify='left')
344
+ _print(_table)
345
+ _print()
346
+ return tables
347
+
348
+
349
+ def rich_to_ansi(text):
350
+ """Convert text formatted with rich markup to standard string."""
351
+ from rich.console import Console
352
+ tmp_console = Console(file=None, highlight=False, color_system='truecolor')
353
+ with tmp_console.capture() as capture:
354
+ tmp_console.print(text, end='', soft_wrap=True)
355
+ return capture.get()
356
+
357
+
358
+ def debug(msg, sub='', id='', obj=None, obj_after=True, obj_breaklines=False, level=1):
359
+ """Print debug log if DEBUG >= level."""
360
+ debug_comp_empty = DEBUG_COMPONENT == [""] or not DEBUG_COMPONENT
361
+ if not debug_comp_empty and not any(sub.startswith(s) for s in DEBUG_COMPONENT):
362
+ return
363
+ elif debug_comp_empty and not DEBUG >= level:
364
+ return
365
+ s = ''
366
+ if sub:
367
+ s += f'[dim yellow4]{sub:13s}[/] '
368
+ obj_str = ''
369
+ if obj:
370
+ sep = ', '
371
+ if obj_breaklines:
372
+ obj_str += '\n '
373
+ sep = '\n '
374
+ if isinstance(obj, dict):
375
+ obj_str += sep.join(f'[dim blue]{k}[/] [dim yellow]->[/] [dim green]{v}[/]' for k, v in obj.items() if v is not None)
376
+ elif isinstance(obj, list):
377
+ obj_str += sep.join(obj)
378
+ if obj_str and not obj_after:
379
+ s = f'{s} {obj_str} '
380
+ s += f'[dim yellow]{msg}[/] '
381
+ if obj_str and obj_after:
382
+ s = f'{s}: {obj_str}'
383
+ if id:
384
+ s += f' [italic dim white]\[{id}][/] '
385
+ s = rich_to_ansi(f'[dim red]\[debug] {s}[/]')
386
+ print(s)
387
+
388
+
389
+ def escape_mongodb_url(url):
390
+ """Escape username / password from MongoDB URL if any.
391
+
392
+ Args:
393
+ url (str): Full MongoDB URL string.
394
+
395
+ Returns:
396
+ str: Escaped MongoDB URL string.
397
+ """
398
+ match = re.search('mongodb://(?P<userpass>.*)@(?P<url>.*)', url)
399
+ if match:
400
+ url = match.group('url')
401
+ user, password = tuple(match.group('userpass').split(':'))
402
+ user, password = quote(user), quote(password)
403
+ return f'mongodb://{user}:{password}@{url}'
404
+ return url