secator 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- secator/.gitignore +162 -0
- secator/__init__.py +0 -0
- secator/celery.py +453 -0
- secator/celery_signals.py +138 -0
- secator/celery_utils.py +320 -0
- secator/cli.py +2035 -0
- secator/cli_helper.py +395 -0
- secator/click.py +87 -0
- secator/config.py +670 -0
- secator/configs/__init__.py +0 -0
- secator/configs/profiles/__init__.py +0 -0
- secator/configs/profiles/aggressive.yaml +8 -0
- secator/configs/profiles/all_ports.yaml +7 -0
- secator/configs/profiles/full.yaml +31 -0
- secator/configs/profiles/http_headless.yaml +7 -0
- secator/configs/profiles/http_record.yaml +8 -0
- secator/configs/profiles/insane.yaml +8 -0
- secator/configs/profiles/paranoid.yaml +8 -0
- secator/configs/profiles/passive.yaml +11 -0
- secator/configs/profiles/polite.yaml +8 -0
- secator/configs/profiles/sneaky.yaml +8 -0
- secator/configs/profiles/tor.yaml +5 -0
- secator/configs/scans/__init__.py +0 -0
- secator/configs/scans/domain.yaml +31 -0
- secator/configs/scans/host.yaml +23 -0
- secator/configs/scans/network.yaml +30 -0
- secator/configs/scans/subdomain.yaml +27 -0
- secator/configs/scans/url.yaml +19 -0
- secator/configs/workflows/__init__.py +0 -0
- secator/configs/workflows/cidr_recon.yaml +48 -0
- secator/configs/workflows/code_scan.yaml +29 -0
- secator/configs/workflows/domain_recon.yaml +46 -0
- secator/configs/workflows/host_recon.yaml +95 -0
- secator/configs/workflows/subdomain_recon.yaml +120 -0
- secator/configs/workflows/url_bypass.yaml +15 -0
- secator/configs/workflows/url_crawl.yaml +98 -0
- secator/configs/workflows/url_dirsearch.yaml +62 -0
- secator/configs/workflows/url_fuzz.yaml +68 -0
- secator/configs/workflows/url_params_fuzz.yaml +66 -0
- secator/configs/workflows/url_secrets_hunt.yaml +23 -0
- secator/configs/workflows/url_vuln.yaml +91 -0
- secator/configs/workflows/user_hunt.yaml +29 -0
- secator/configs/workflows/wordpress.yaml +38 -0
- secator/cve.py +718 -0
- secator/decorators.py +7 -0
- secator/definitions.py +168 -0
- secator/exporters/__init__.py +14 -0
- secator/exporters/_base.py +3 -0
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +37 -0
- secator/exporters/gdrive.py +123 -0
- secator/exporters/json.py +16 -0
- secator/exporters/table.py +36 -0
- secator/exporters/txt.py +28 -0
- secator/hooks/__init__.py +0 -0
- secator/hooks/gcs.py +80 -0
- secator/hooks/mongodb.py +281 -0
- secator/installer.py +694 -0
- secator/loader.py +128 -0
- secator/output_types/__init__.py +49 -0
- secator/output_types/_base.py +108 -0
- secator/output_types/certificate.py +78 -0
- secator/output_types/domain.py +50 -0
- secator/output_types/error.py +42 -0
- secator/output_types/exploit.py +58 -0
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +47 -0
- secator/output_types/port.py +55 -0
- secator/output_types/progress.py +36 -0
- secator/output_types/record.py +36 -0
- secator/output_types/stat.py +41 -0
- secator/output_types/state.py +29 -0
- secator/output_types/subdomain.py +45 -0
- secator/output_types/tag.py +69 -0
- secator/output_types/target.py +38 -0
- secator/output_types/url.py +112 -0
- secator/output_types/user_account.py +41 -0
- secator/output_types/vulnerability.py +101 -0
- secator/output_types/warning.py +30 -0
- secator/report.py +140 -0
- secator/rich.py +130 -0
- secator/runners/__init__.py +14 -0
- secator/runners/_base.py +1240 -0
- secator/runners/_helpers.py +218 -0
- secator/runners/celery.py +18 -0
- secator/runners/command.py +1178 -0
- secator/runners/python.py +126 -0
- secator/runners/scan.py +87 -0
- secator/runners/task.py +81 -0
- secator/runners/workflow.py +168 -0
- secator/scans/__init__.py +29 -0
- secator/serializers/__init__.py +8 -0
- secator/serializers/dataclass.py +39 -0
- secator/serializers/json.py +45 -0
- secator/serializers/regex.py +25 -0
- secator/tasks/__init__.py +8 -0
- secator/tasks/_categories.py +487 -0
- secator/tasks/arjun.py +113 -0
- secator/tasks/arp.py +53 -0
- secator/tasks/arpscan.py +70 -0
- secator/tasks/bbot.py +372 -0
- secator/tasks/bup.py +118 -0
- secator/tasks/cariddi.py +193 -0
- secator/tasks/dalfox.py +87 -0
- secator/tasks/dirsearch.py +84 -0
- secator/tasks/dnsx.py +186 -0
- secator/tasks/feroxbuster.py +93 -0
- secator/tasks/ffuf.py +135 -0
- secator/tasks/fping.py +85 -0
- secator/tasks/gau.py +102 -0
- secator/tasks/getasn.py +60 -0
- secator/tasks/gf.py +36 -0
- secator/tasks/gitleaks.py +96 -0
- secator/tasks/gospider.py +84 -0
- secator/tasks/grype.py +109 -0
- secator/tasks/h8mail.py +75 -0
- secator/tasks/httpx.py +167 -0
- secator/tasks/jswhois.py +36 -0
- secator/tasks/katana.py +203 -0
- secator/tasks/maigret.py +87 -0
- secator/tasks/mapcidr.py +42 -0
- secator/tasks/msfconsole.py +179 -0
- secator/tasks/naabu.py +85 -0
- secator/tasks/nmap.py +487 -0
- secator/tasks/nuclei.py +151 -0
- secator/tasks/search_vulns.py +225 -0
- secator/tasks/searchsploit.py +109 -0
- secator/tasks/sshaudit.py +299 -0
- secator/tasks/subfinder.py +48 -0
- secator/tasks/testssl.py +283 -0
- secator/tasks/trivy.py +130 -0
- secator/tasks/trufflehog.py +240 -0
- secator/tasks/urlfinder.py +100 -0
- secator/tasks/wafw00f.py +106 -0
- secator/tasks/whois.py +34 -0
- secator/tasks/wpprobe.py +116 -0
- secator/tasks/wpscan.py +202 -0
- secator/tasks/x8.py +94 -0
- secator/tasks/xurlfind3r.py +83 -0
- secator/template.py +294 -0
- secator/thread.py +24 -0
- secator/tree.py +196 -0
- secator/utils.py +922 -0
- secator/utils_test.py +297 -0
- secator/workflows/__init__.py +29 -0
- secator-0.22.0.dist-info/METADATA +447 -0
- secator-0.22.0.dist-info/RECORD +150 -0
- secator-0.22.0.dist-info/WHEEL +4 -0
- secator-0.22.0.dist-info/entry_points.txt +2 -0
- secator-0.22.0.dist-info/licenses/LICENSE +60 -0
secator/utils.py
ADDED
|
@@ -0,0 +1,922 @@
|
|
|
1
|
+
import fnmatch
|
|
2
|
+
import importlib
|
|
3
|
+
import ipaddress
|
|
4
|
+
import itertools
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import operator
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import select
|
|
11
|
+
import signal
|
|
12
|
+
import sys
|
|
13
|
+
import tldextract
|
|
14
|
+
import traceback
|
|
15
|
+
import validators
|
|
16
|
+
import warnings
|
|
17
|
+
|
|
18
|
+
from datetime import datetime, timedelta
|
|
19
|
+
from functools import reduce
|
|
20
|
+
from pathlib import Path, PurePath
|
|
21
|
+
from time import time
|
|
22
|
+
from urllib.parse import urlparse, quote
|
|
23
|
+
|
|
24
|
+
import humanize
|
|
25
|
+
import ifaddr
|
|
26
|
+
import yaml
|
|
27
|
+
|
|
28
|
+
from secator.definitions import (DEBUG, VERSION, DEV_PACKAGE, IP, HOST, CIDR_RANGE,
|
|
29
|
+
MAC_ADDRESS, SLUG, UUID, EMAIL, IBAN, URL, PATH, HOST_PORT, GCS_URL)
|
|
30
|
+
from secator.config import CONFIG, ROOT_FOLDER, LIB_FOLDER, download_file
|
|
31
|
+
from secator.rich import console
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
_tasks = []
|
|
36
|
+
|
|
37
|
+
TIMEDELTA_REGEX = re.compile(r'((?P<years>\d+?)y)?((?P<months>\d+?)M)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?') # noqa: E501
|
|
38
|
+
CAMEL_TO_SNAKE_REGEX = re.compile(r"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class TaskError(ValueError):
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def setup_logging(level):
|
|
46
|
+
"""Setup logging.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
level: logging level.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
logging.Logger: logger.
|
|
53
|
+
"""
|
|
54
|
+
logger = logging.getLogger('secator')
|
|
55
|
+
logger.setLevel(level)
|
|
56
|
+
ch = logging.StreamHandler()
|
|
57
|
+
ch.setLevel(level)
|
|
58
|
+
formatter = logging.Formatter('%(message)s')
|
|
59
|
+
ch.setFormatter(formatter)
|
|
60
|
+
logger.addHandler(ch)
|
|
61
|
+
return logger
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def expand_input(input, ctx):
|
|
65
|
+
"""Expand user-provided input on the CLI:
|
|
66
|
+
- If input is a path, read the file and return the lines.
|
|
67
|
+
- If it's a comma-separated list, return the list.
|
|
68
|
+
- Otherwise, return the original input.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
input (str): Input.
|
|
72
|
+
ctx (click.Context): Click context.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
str: Input.
|
|
76
|
+
"""
|
|
77
|
+
piped_input = ctx.obj['piped_input']
|
|
78
|
+
dry_run = ctx.obj['dry_run']
|
|
79
|
+
default_inputs = ctx.obj['default_inputs']
|
|
80
|
+
input_required = ctx.obj['input_required']
|
|
81
|
+
if input is None: # read from stdin
|
|
82
|
+
if not piped_input and input_required and not default_inputs and not dry_run:
|
|
83
|
+
console.print('No input passed on stdin. Showing help page.', style='bold red')
|
|
84
|
+
ctx.get_help()
|
|
85
|
+
sys.exit(1)
|
|
86
|
+
elif piped_input:
|
|
87
|
+
rlist, _, _ = select.select([sys.stdin], [], [], CONFIG.cli.stdin_timeout)
|
|
88
|
+
if rlist:
|
|
89
|
+
data = sys.stdin.read().splitlines()
|
|
90
|
+
return data
|
|
91
|
+
else:
|
|
92
|
+
console.print('No input passed on stdin.', style='bold red')
|
|
93
|
+
sys.exit(1)
|
|
94
|
+
elif default_inputs:
|
|
95
|
+
console.print('[bold yellow]No inputs provided, using default inputs:[/]')
|
|
96
|
+
for inp in default_inputs:
|
|
97
|
+
console.print(f' • {inp}')
|
|
98
|
+
return default_inputs
|
|
99
|
+
elif not dry_run:
|
|
100
|
+
return []
|
|
101
|
+
elif os.path.exists(input):
|
|
102
|
+
input_types = ctx.obj['input_types']
|
|
103
|
+
if not input_types or 'path' in input_types:
|
|
104
|
+
return input
|
|
105
|
+
elif os.path.isfile(input):
|
|
106
|
+
with open(input, 'r') as f:
|
|
107
|
+
data = f.read().splitlines()
|
|
108
|
+
return data
|
|
109
|
+
return input
|
|
110
|
+
elif isinstance(input, str):
|
|
111
|
+
input = input.split(',')
|
|
112
|
+
|
|
113
|
+
# If the list is only one item, return it instead of the list
|
|
114
|
+
# Usefull for commands that can take only one input at a time.
|
|
115
|
+
if isinstance(input, list) and len(input) == 1:
|
|
116
|
+
return input[0]
|
|
117
|
+
|
|
118
|
+
if ctx.obj['dry_run'] and not input:
|
|
119
|
+
return ['TARGET']
|
|
120
|
+
|
|
121
|
+
return input
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def sanitize_url(http_url):
|
|
125
|
+
"""Removes HTTP(s) ports 80 and 443 from HTTP(s) URL because it's ugly.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
http_url (str): Input HTTP URL.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
str: Stripped HTTP URL.
|
|
132
|
+
"""
|
|
133
|
+
url = urlparse(http_url)
|
|
134
|
+
if url.netloc.endswith(':80'):
|
|
135
|
+
url = url._replace(netloc=url.netloc.replace(':80', ''))
|
|
136
|
+
elif url.netloc.endswith(':443'):
|
|
137
|
+
url = url._replace(netloc=url.netloc.replace(':443', ''))
|
|
138
|
+
return url.geturl().rstrip('/')
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def deduplicate(array, attr=None):
|
|
142
|
+
"""Deduplicate list of OutputType items.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
array (list): Input list.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
list: Deduplicated list.
|
|
149
|
+
"""
|
|
150
|
+
from secator.output_types import OUTPUT_TYPES
|
|
151
|
+
if attr and len(array) > 0 and isinstance(array[0], tuple(OUTPUT_TYPES)):
|
|
152
|
+
memo = set()
|
|
153
|
+
res = []
|
|
154
|
+
for sub in array:
|
|
155
|
+
if attr in sub.keys() and getattr(sub, attr) not in memo:
|
|
156
|
+
res.append(sub)
|
|
157
|
+
memo.add(getattr(sub, attr))
|
|
158
|
+
return sorted(res, key=operator.attrgetter(attr))
|
|
159
|
+
return sorted(list(dict.fromkeys(array)))
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def import_dynamic(path, name=None):
|
|
163
|
+
"""Import class or module dynamically from path.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
path (str): Path to class or module.
|
|
167
|
+
name (str): If specified, does a getattr() on the package to get this attribute.
|
|
168
|
+
cls_root (str): Root parent class.
|
|
169
|
+
|
|
170
|
+
Examples:
|
|
171
|
+
>>> import_dynamic('secator.exporters', name='CsvExporter')
|
|
172
|
+
>>> import_dynamic('secator.hooks.mongodb', name='HOOKS')
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
cls: Class object.
|
|
176
|
+
"""
|
|
177
|
+
try:
|
|
178
|
+
res = importlib.import_module(path)
|
|
179
|
+
if name:
|
|
180
|
+
res = getattr(res, name)
|
|
181
|
+
if res is None:
|
|
182
|
+
raise
|
|
183
|
+
return res
|
|
184
|
+
except Exception:
|
|
185
|
+
if name:
|
|
186
|
+
path += f'.{name}'
|
|
187
|
+
warnings.warn(f'"{path}" not found.', category=UserWarning, stacklevel=2)
|
|
188
|
+
return None
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def get_command_category(command):
|
|
192
|
+
"""Get the category of a command.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
command (class): Command class.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
str: Command category.
|
|
199
|
+
"""
|
|
200
|
+
if not command.tags:
|
|
201
|
+
return 'misc'
|
|
202
|
+
return '/'.join(command.tags)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def merge_opts(*options):
|
|
206
|
+
"""Merge multiple options dict into a final one, overriding by order.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
list: List of options dict.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
dict: Options.
|
|
213
|
+
"""
|
|
214
|
+
all_opts = {}
|
|
215
|
+
for opts in options:
|
|
216
|
+
if opts:
|
|
217
|
+
opts_noempty = {k: v for k, v in opts.items() if v is not None}
|
|
218
|
+
all_opts.update(opts_noempty)
|
|
219
|
+
return all_opts
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def flatten(array: list):
|
|
223
|
+
"""Flatten list if it contains multiple sublists.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
l (list): Input list.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
list: Output list.
|
|
230
|
+
"""
|
|
231
|
+
if isinstance(array, list) and len(array) > 0 and isinstance(array[0], list):
|
|
232
|
+
return list(itertools.chain(*array))
|
|
233
|
+
return array
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def pluralize(word):
|
|
237
|
+
"""Pluralize a word.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
word (string): Word.
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
string: Plural word.
|
|
244
|
+
"""
|
|
245
|
+
if word.endswith('y'):
|
|
246
|
+
return word.rstrip('y') + 'ies'
|
|
247
|
+
elif word.endswith('s'):
|
|
248
|
+
return word + 'es'
|
|
249
|
+
return f'{word}s'
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def load_fixture(name, fixtures_dir, ext=None, only_path=False):
|
|
253
|
+
"""Load fixture a fixture dir. Optionally load it's content if it's JSON / YAML.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
name (str): Fixture name.
|
|
257
|
+
fixtures_dir (str): Fixture parent directory.
|
|
258
|
+
ext (str, Optional): Extension to load.
|
|
259
|
+
only_path (bool, Optional): Return fixture path instead of fixture content.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
str: Fixture path or content.
|
|
263
|
+
"""
|
|
264
|
+
fixture_path = f'{fixtures_dir}/{name}'
|
|
265
|
+
exts = ['.json', '.txt', '.xml', '.rc']
|
|
266
|
+
if ext:
|
|
267
|
+
exts = [ext]
|
|
268
|
+
for ext in exts:
|
|
269
|
+
path = f'{fixture_path}{ext}'
|
|
270
|
+
if os.path.exists(path):
|
|
271
|
+
if only_path:
|
|
272
|
+
return path
|
|
273
|
+
with open(path) as f:
|
|
274
|
+
content = f.read()
|
|
275
|
+
if path.endswith(('.json', '.yaml')):
|
|
276
|
+
return yaml.load(content, Loader=yaml.Loader)
|
|
277
|
+
else:
|
|
278
|
+
return content
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def get_file_timestamp():
|
|
282
|
+
"""Get current timestamp into a formatted string."""
|
|
283
|
+
return datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%f_%p")
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def detect_host(interface=None):
|
|
287
|
+
"""Detect hostname from ethernet adapters.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
interface (str): Interface name to get hostname from.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
str | None: hostname or ip address, or None if not found.
|
|
294
|
+
"""
|
|
295
|
+
adapters = ifaddr.get_adapters()
|
|
296
|
+
for adapter in adapters:
|
|
297
|
+
iface = adapter.name
|
|
298
|
+
if (interface and iface != interface) or iface == 'lo':
|
|
299
|
+
continue
|
|
300
|
+
return adapter.ips[0].ip
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def rich_to_ansi(text):
|
|
305
|
+
"""Convert text formatted with rich markup to standard string.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
text (str): Text.
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
str: Converted text (ANSI).
|
|
312
|
+
"""
|
|
313
|
+
try:
|
|
314
|
+
from rich.console import Console
|
|
315
|
+
tmp_console = Console(file=None, highlight=False)
|
|
316
|
+
with tmp_console.capture() as capture:
|
|
317
|
+
tmp_console.print(text, end='', soft_wrap=True)
|
|
318
|
+
return capture.get()
|
|
319
|
+
except Exception:
|
|
320
|
+
print(f'Could not convert rich text to ansi: {text}[/]', file=sys.stderr)
|
|
321
|
+
return text
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def strip_rich_markup(text):
|
|
325
|
+
"""Strip rich markup from text.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
text (str): Text.
|
|
329
|
+
|
|
330
|
+
Returns:
|
|
331
|
+
str: Text without rich markup.
|
|
332
|
+
"""
|
|
333
|
+
from rich.text import Text
|
|
334
|
+
return Text.from_markup(text).plain
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def rich_escape(obj):
|
|
338
|
+
"""Escape object for rich printing.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
obj (any): Input object.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
any: Initial object, or escaped Rich string.
|
|
345
|
+
"""
|
|
346
|
+
if isinstance(obj, str):
|
|
347
|
+
return obj.replace('[', r'\[').replace(']', r'\]').replace(r'\[/', r'\[\/')
|
|
348
|
+
return obj
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def format_debug_object(obj, obj_breaklines=False):
|
|
352
|
+
"""Format the debug object for printing.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
obj (dict | list): Input object.
|
|
356
|
+
obj_breaklines (bool): Split output with newlines for each item in input object.
|
|
357
|
+
|
|
358
|
+
Returns:
|
|
359
|
+
str: Rich-formatted string.
|
|
360
|
+
"""
|
|
361
|
+
sep = '\n ' if obj_breaklines else ', '
|
|
362
|
+
if isinstance(obj, dict):
|
|
363
|
+
return sep.join(f'[bold blue]{k}[/] [yellow]->[/] [blue]{v}[/]' for k, v in obj.items() if v is not None) # noqa: E501
|
|
364
|
+
elif isinstance(obj, list):
|
|
365
|
+
return f'[dim green]{sep.join(obj)}[/]'
|
|
366
|
+
return ''
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def debug(msg, sub='', id='', obj=None, lazy=None, obj_after=True, obj_breaklines=False, verbose=False):
|
|
370
|
+
"""Print debug log if DEBUG >= level."""
|
|
371
|
+
if not DEBUG == ['all'] and not DEBUG == ['1']:
|
|
372
|
+
if not DEBUG or DEBUG == [""]:
|
|
373
|
+
return
|
|
374
|
+
if sub:
|
|
375
|
+
for s in DEBUG:
|
|
376
|
+
if '*' in s and re.match(s + '$', sub):
|
|
377
|
+
break
|
|
378
|
+
elif not verbose and sub.startswith(s):
|
|
379
|
+
break
|
|
380
|
+
elif verbose and sub == s:
|
|
381
|
+
break
|
|
382
|
+
else:
|
|
383
|
+
return
|
|
384
|
+
|
|
385
|
+
if lazy:
|
|
386
|
+
msg = lazy(msg)
|
|
387
|
+
|
|
388
|
+
formatted_msg = f'[yellow4]{sub:13s}[/] ' if sub else ''
|
|
389
|
+
obj_str = format_debug_object(obj, obj_breaklines) if obj else ''
|
|
390
|
+
|
|
391
|
+
# Constructing the message string based on object position
|
|
392
|
+
if obj_str and not obj_after:
|
|
393
|
+
formatted_msg += f'{obj_str} '
|
|
394
|
+
formatted_msg += f'[yellow]{msg}[/]'
|
|
395
|
+
if obj_str and obj_after:
|
|
396
|
+
formatted_msg += f': {obj_str}'
|
|
397
|
+
if id:
|
|
398
|
+
formatted_msg += rf' [italic gray11]\[{id}][/]'
|
|
399
|
+
|
|
400
|
+
try:
|
|
401
|
+
console.print(rf'[dim]\[[magenta4]DBG[/]] {formatted_msg}[/]', highlight=False)
|
|
402
|
+
except Exception:
|
|
403
|
+
console.print(rf'[dim]\[[magenta4]DBG[/]] <MARKUP_DISABLED>{rich_escape(formatted_msg)}</MARKUP_DISABLED>[/]', highlight=False) # noqa: E501
|
|
404
|
+
if 'rich' in DEBUG:
|
|
405
|
+
raise
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def escape_mongodb_url(url):
|
|
409
|
+
"""Escape username / password from MongoDB URL if any.
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
url (str): Full MongoDB URL string.
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
str: Escaped MongoDB URL string.
|
|
416
|
+
"""
|
|
417
|
+
match = re.search('mongodb://(?P<userpass>.*)@(?P<url>.*)', url)
|
|
418
|
+
if match:
|
|
419
|
+
url = match.group('url')
|
|
420
|
+
user, password = tuple(match.group('userpass').split(':'))
|
|
421
|
+
user, password = quote(user), quote(password)
|
|
422
|
+
return f'mongodb://{user}:{password}@{url}'
|
|
423
|
+
return url
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def caml_to_snake(name):
|
|
427
|
+
"""
|
|
428
|
+
Convert CamelCase string to snake_case, handling acronyms properly.
|
|
429
|
+
|
|
430
|
+
Examples:
|
|
431
|
+
>>> caml_to_snake("MongoDB")
|
|
432
|
+
'mongo_db'
|
|
433
|
+
>>> caml_to_snake("MONGODB")
|
|
434
|
+
'mongodb'
|
|
435
|
+
>>> caml_to_snake("getHTTPResponseCode")
|
|
436
|
+
'get_http_response_code'
|
|
437
|
+
>>> caml_to_snake("XMLHttpRequest")
|
|
438
|
+
'xml_http_request'
|
|
439
|
+
>>> caml_to_snake("HTMLElement")
|
|
440
|
+
'html_element'
|
|
441
|
+
"""
|
|
442
|
+
if not name:
|
|
443
|
+
return ""
|
|
444
|
+
name = CAMEL_TO_SNAKE_REGEX.sub(r'_', name)
|
|
445
|
+
return name.lower().replace('__', '_')
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def print_version():
|
|
449
|
+
"""Print secator version information."""
|
|
450
|
+
from secator.installer import get_version_info
|
|
451
|
+
console.print(f'[bold gold3]Current version[/]: {VERSION}', highlight=False, end='')
|
|
452
|
+
info = get_version_info('secator', github_handle='freelabz/secator', version=VERSION)
|
|
453
|
+
latest_version = info['latest_version']
|
|
454
|
+
status = info['status']
|
|
455
|
+
location = info['location']
|
|
456
|
+
if status == 'outdated':
|
|
457
|
+
console.print('[bold red] (outdated)[/]')
|
|
458
|
+
else:
|
|
459
|
+
console.print('')
|
|
460
|
+
console.print(f'[bold gold3]Latest version[/]: {latest_version}', highlight=False)
|
|
461
|
+
console.print(f'[bold gold3]Location[/]: {location}')
|
|
462
|
+
console.print(f'[bold gold3]Python binary[/]: {sys.executable}')
|
|
463
|
+
if DEV_PACKAGE:
|
|
464
|
+
console.print(f'[bold gold3]Root folder[/]: {ROOT_FOLDER}')
|
|
465
|
+
console.print(f'[bold gold3]Lib folder[/]: {LIB_FOLDER}')
|
|
466
|
+
if status == 'outdated':
|
|
467
|
+
console.print('[bold red]secator is outdated, run "secator update" to install the latest version.')
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
def extract_domain_info(input, domain_only=False):
|
|
471
|
+
"""Extracts domain info from a given any URL or FQDN.
|
|
472
|
+
|
|
473
|
+
Args:
|
|
474
|
+
input (str): An URL or FQDN.
|
|
475
|
+
domain_only (bool): Return only the registered domain name.
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
tldextract.ExtractResult: Extracted info.
|
|
479
|
+
str | None: Registered domain name or None if invalid domain (only if domain_only is set).
|
|
480
|
+
"""
|
|
481
|
+
result = tldextract.extract(input)
|
|
482
|
+
if not result or not result.domain or not result.suffix:
|
|
483
|
+
return None
|
|
484
|
+
if domain_only:
|
|
485
|
+
if not validators.domain(result.top_domain_under_public_suffix):
|
|
486
|
+
return None
|
|
487
|
+
return result.top_domain_under_public_suffix
|
|
488
|
+
return result
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def extract_subdomains_from_fqdn(fqdn, domain, suffix):
|
|
492
|
+
"""Generates a list of subdomains up to the root domain from a fully qualified domain name (FQDN).
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
fqdn (str): The full domain name, e.g., 'console.cloud.google.com'.
|
|
496
|
+
domain (str): The main domain, e.g., 'google'.
|
|
497
|
+
suffix (str): The top-level domain (TLD), e.g., 'com'.
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
List[str]: A list containing the FQDN and all its subdomains down to the root domain.
|
|
501
|
+
"""
|
|
502
|
+
# Start with the full domain and prepare to break it down
|
|
503
|
+
parts = fqdn.split('.')
|
|
504
|
+
|
|
505
|
+
# Initialize the list of subdomains with the full domain
|
|
506
|
+
subdomains = [fqdn]
|
|
507
|
+
|
|
508
|
+
# Continue stripping subdomains until reaching the base domain (domain + suffix)
|
|
509
|
+
base_domain = f"{domain}.{suffix}"
|
|
510
|
+
current = fqdn
|
|
511
|
+
|
|
512
|
+
while current != base_domain:
|
|
513
|
+
# Remove the leftmost part of the domain
|
|
514
|
+
parts = parts[1:]
|
|
515
|
+
current = '.'.join(parts)
|
|
516
|
+
subdomains.append(current)
|
|
517
|
+
|
|
518
|
+
return subdomains
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
def match_file_by_pattern(paths, pattern, type='both'):
|
|
522
|
+
"""Match pattern on a set of paths.
|
|
523
|
+
|
|
524
|
+
Args:
|
|
525
|
+
paths (iterable): An iterable of Path objects to be searched.
|
|
526
|
+
pattern (str): The pattern to search for in file names or directory names, supports Unix shell-style wildcards.
|
|
527
|
+
type (str): Specifies the type to search for; 'file', 'directory', or 'both'.
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
list of Path: A list of Path objects that match the given pattern.
|
|
531
|
+
"""
|
|
532
|
+
matches = []
|
|
533
|
+
for path in paths:
|
|
534
|
+
full_path = str(path.resolve())
|
|
535
|
+
if path.is_dir() and type in ['directory', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
|
|
536
|
+
matches.append(path)
|
|
537
|
+
elif path.is_file() and type in ['file', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
|
|
538
|
+
matches.append(path)
|
|
539
|
+
|
|
540
|
+
return matches
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
def get_file_date(file_path):
|
|
544
|
+
"""Retrieves the last modification date of the file and returns it in a human-readable format.
|
|
545
|
+
|
|
546
|
+
Args:
|
|
547
|
+
file_path (Path): Path object pointing to the file.
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
str: Human-readable time format.
|
|
551
|
+
"""
|
|
552
|
+
# Get the last modified time of the file
|
|
553
|
+
mod_timestamp = file_path.stat().st_mtime
|
|
554
|
+
mod_date = datetime.fromtimestamp(mod_timestamp)
|
|
555
|
+
|
|
556
|
+
# Determine how to display the date based on how long ago it was modified
|
|
557
|
+
now = datetime.now()
|
|
558
|
+
if (now - mod_date).days < 7:
|
|
559
|
+
# If the modification was less than a week ago, use natural time
|
|
560
|
+
return humanize.naturaltime(now - mod_date) + mod_date.strftime(" @ %H:%m")
|
|
561
|
+
else:
|
|
562
|
+
# Otherwise, return the date in "on %B %d" format
|
|
563
|
+
return f"{mod_date.strftime('%B %d @ %H:%m')}"
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
def trim_string(s, max_length=30):
|
|
567
|
+
"""Trims a long string to include the beginning and the end, with an ellipsis in the middle. The output string will
|
|
568
|
+
not exceed the specified maximum length.
|
|
569
|
+
|
|
570
|
+
Args:
|
|
571
|
+
s (str): The string to be trimmed.
|
|
572
|
+
max_length (int): The maximum allowed length of the trimmed string.
|
|
573
|
+
|
|
574
|
+
Returns:
|
|
575
|
+
str: The trimmed string.
|
|
576
|
+
"""
|
|
577
|
+
if len(s) <= max_length:
|
|
578
|
+
return s # Return the original string if it's short enough
|
|
579
|
+
|
|
580
|
+
# Calculate the lengths of the start and end parts
|
|
581
|
+
end_length = 30 # Default end length
|
|
582
|
+
if max_length - end_length - 5 < 0: # 5 accounts for the length of '[...] '
|
|
583
|
+
end_length = max_length - 5 # Adjust end length if total max_length is too small
|
|
584
|
+
start_length = max_length - end_length - 5 # Subtract the space for '[...] '
|
|
585
|
+
|
|
586
|
+
# Build the trimmed string
|
|
587
|
+
start_part = s[:start_length]
|
|
588
|
+
end_part = s[-end_length:]
|
|
589
|
+
return f"{start_part} [...] {end_part}"
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
def sort_files_by_date(file_list):
|
|
593
|
+
"""Sorts a list of file paths by their modification date.
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
file_list (list): A list of file paths (strings or Path objects).
|
|
597
|
+
|
|
598
|
+
Returns:
|
|
599
|
+
list: The list of file paths sorted by modification date.
|
|
600
|
+
"""
|
|
601
|
+
file_list.sort(key=lambda x: x.stat().st_mtime)
|
|
602
|
+
return file_list
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
def traceback_as_string(exc):
|
|
606
|
+
"""Format an exception's traceback as a readable string.
|
|
607
|
+
|
|
608
|
+
Args:
|
|
609
|
+
Exception: an exception.
|
|
610
|
+
|
|
611
|
+
Returns:
|
|
612
|
+
string: readable traceback.
|
|
613
|
+
"""
|
|
614
|
+
return ' '.join(traceback.format_exception(exc, value=exc, tb=exc.__traceback__))
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def should_update(update_frequency, last_updated=None, timestamp=None):
|
|
618
|
+
"""Determine if an object should be updated based on the update frequency and the last updated UNIX timestamp.
|
|
619
|
+
|
|
620
|
+
Args:
|
|
621
|
+
update_frequency (int): Update frequency in seconds.
|
|
622
|
+
last_updated (Union[int, None]): UNIX timestamp or None if unset.
|
|
623
|
+
timestamp (int): Item timestamp.
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
bool: Whether the object should be updated.
|
|
627
|
+
"""
|
|
628
|
+
if not timestamp:
|
|
629
|
+
timestamp = time()
|
|
630
|
+
if update_frequency == -1:
|
|
631
|
+
return False
|
|
632
|
+
if last_updated and (timestamp - last_updated) < update_frequency:
|
|
633
|
+
return False
|
|
634
|
+
return True
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
def list_reports(workspace=None, type=None, timedelta=None):
|
|
638
|
+
"""List all reports in secator reports dir.
|
|
639
|
+
|
|
640
|
+
Args:
|
|
641
|
+
workspace (str): Filter by workspace name.
|
|
642
|
+
type (str): Filter by runner type.
|
|
643
|
+
timedelta (None | datetime.timedelta): Keep results newer than timedelta.
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
list: List all JSON reports.
|
|
647
|
+
"""
|
|
648
|
+
if type and not type.endswith('s'):
|
|
649
|
+
type += 's'
|
|
650
|
+
json_reports = []
|
|
651
|
+
for root, _, files in os.walk(CONFIG.dirs.reports):
|
|
652
|
+
for file in files:
|
|
653
|
+
path = Path(root) / file
|
|
654
|
+
if not path.parts[-1] == 'report.json':
|
|
655
|
+
continue
|
|
656
|
+
if workspace and path.parts[-4] != workspace:
|
|
657
|
+
continue
|
|
658
|
+
if type and path.parts[-3] != type:
|
|
659
|
+
continue
|
|
660
|
+
if timedelta and (datetime.now() - datetime.fromtimestamp(path.stat().st_mtime)) > timedelta:
|
|
661
|
+
continue
|
|
662
|
+
json_reports.append(path)
|
|
663
|
+
return json_reports
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def get_info_from_report_path(path):
|
|
667
|
+
"""Get some info from the report path, like workspace, run type and id.
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
path (pathlib.Path): Report path.
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
dict: Info dict.
|
|
674
|
+
"""
|
|
675
|
+
try:
|
|
676
|
+
ws, runner_type, number = path.parts[-4], path.parts[-3], path.parts[-2]
|
|
677
|
+
workspace_path = '/'.join(path.parts[:-3])
|
|
678
|
+
return {
|
|
679
|
+
'workspace': ws,
|
|
680
|
+
'workspace_path': workspace_path,
|
|
681
|
+
'type': runner_type,
|
|
682
|
+
'id': number
|
|
683
|
+
}
|
|
684
|
+
except IndexError:
|
|
685
|
+
return {}
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def human_to_timedelta(time_str):
|
|
689
|
+
"""Convert human time to a timedelta object.
|
|
690
|
+
|
|
691
|
+
Args:
|
|
692
|
+
str: Time string in human format (like 2 years)
|
|
693
|
+
|
|
694
|
+
Returns:
|
|
695
|
+
datetime.TimeDelta: TimeDelta object.
|
|
696
|
+
"""
|
|
697
|
+
if not time_str:
|
|
698
|
+
return None
|
|
699
|
+
parts = TIMEDELTA_REGEX.match(time_str)
|
|
700
|
+
if not parts:
|
|
701
|
+
return
|
|
702
|
+
parts = parts.groupdict()
|
|
703
|
+
years = int(parts.pop('years') or 0)
|
|
704
|
+
months = int(parts.pop('months') or 0)
|
|
705
|
+
days = int(parts.get('days') or 0)
|
|
706
|
+
days += years * 365
|
|
707
|
+
days += months * 30
|
|
708
|
+
parts['days'] = days
|
|
709
|
+
time_params = {}
|
|
710
|
+
for name, param in parts.items():
|
|
711
|
+
if param:
|
|
712
|
+
time_params[name] = int(param)
|
|
713
|
+
return timedelta(**time_params)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def deep_merge_dicts(*dicts):
|
|
717
|
+
"""Recursively merges multiple dictionaries by concatenating lists and merging nested dictionaries.
|
|
718
|
+
|
|
719
|
+
Args:
|
|
720
|
+
dicts (tuple): A tuple of dictionary objects to merge.
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
dict: A new dictionary containing merged keys and values from all input dictionaries.
|
|
724
|
+
"""
|
|
725
|
+
def merge_two_dicts(dict1, dict2):
|
|
726
|
+
"""Helper function that merges two dictionaries.
|
|
727
|
+
|
|
728
|
+
Args:
|
|
729
|
+
dict1 (dict): First dict.
|
|
730
|
+
dict2 (dict): Second dict.
|
|
731
|
+
Returns:
|
|
732
|
+
dict: Merged dict.
|
|
733
|
+
"""
|
|
734
|
+
result = dict(dict1) # Create a copy of dict1 to avoid modifying it.
|
|
735
|
+
for key, value in dict2.items():
|
|
736
|
+
if key in result:
|
|
737
|
+
if isinstance(result[key], dict) and isinstance(value, dict):
|
|
738
|
+
result[key] = merge_two_dicts(result[key], value)
|
|
739
|
+
elif isinstance(result[key], list) and isinstance(value, list):
|
|
740
|
+
result[key] += value # Concatenating lists
|
|
741
|
+
else:
|
|
742
|
+
result[key] = value # Overwrite if not both lists or both dicts
|
|
743
|
+
else:
|
|
744
|
+
result[key] = value
|
|
745
|
+
return result
|
|
746
|
+
|
|
747
|
+
# Use reduce to apply merge_two_dicts to all dictionaries in dicts
|
|
748
|
+
return reduce(merge_two_dicts, dicts, {})
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def process_wordlist(val):
|
|
752
|
+
"""Pre-process wordlist option value to allow referencing wordlists from remote URLs or from config keys.
|
|
753
|
+
|
|
754
|
+
Args:
|
|
755
|
+
val (str): Can be a config value in CONFIG.wordlists.defaults or CONFIG.wordlists.templates, or a local path,
|
|
756
|
+
or a URL.
|
|
757
|
+
"""
|
|
758
|
+
default_wordlist = getattr(CONFIG.wordlists.defaults, val)
|
|
759
|
+
if default_wordlist:
|
|
760
|
+
val = default_wordlist
|
|
761
|
+
template_wordlist = getattr(CONFIG.wordlists.templates, val)
|
|
762
|
+
if template_wordlist:
|
|
763
|
+
val = template_wordlist
|
|
764
|
+
|
|
765
|
+
return download_file(
|
|
766
|
+
val,
|
|
767
|
+
target_folder=CONFIG.dirs.wordlists,
|
|
768
|
+
offline_mode=CONFIG.offline_mode,
|
|
769
|
+
type='wordlist'
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
def convert_functions_to_strings(data):
|
|
774
|
+
"""Recursively convert functions to strings in a dict.
|
|
775
|
+
|
|
776
|
+
Args:
|
|
777
|
+
data (dict): Dictionary to convert.
|
|
778
|
+
|
|
779
|
+
Returns:
|
|
780
|
+
dict: Converted dictionary.
|
|
781
|
+
"""
|
|
782
|
+
if isinstance(data, dict):
|
|
783
|
+
return {k: convert_functions_to_strings(v) for k, v in data.items()}
|
|
784
|
+
elif isinstance(data, list):
|
|
785
|
+
return [convert_functions_to_strings(v) for v in data]
|
|
786
|
+
elif callable(data):
|
|
787
|
+
return json.dumps(data.__name__) # or use inspect.getsource(data) if you want the actual function code
|
|
788
|
+
else:
|
|
789
|
+
return data
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
def headers_to_dict(header_opt):
|
|
793
|
+
headers = {}
|
|
794
|
+
for header in header_opt.split(';;'):
|
|
795
|
+
split = header.strip().split(':')
|
|
796
|
+
key = split[0].strip()
|
|
797
|
+
val = ':'.join(split[1:]).strip()
|
|
798
|
+
headers[key] = val
|
|
799
|
+
return headers
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def format_object(obj, color='magenta', skip_keys=[]):
|
|
803
|
+
if isinstance(obj, list) and obj:
|
|
804
|
+
return ' [' + ', '.join([f'[{color}]{rich_escape(item)}[/]' for item in obj]) + ']'
|
|
805
|
+
elif isinstance(obj, dict) and obj.keys():
|
|
806
|
+
obj = {k: v for k, v in obj.items() if k.lower().replace('-', '_') not in skip_keys}
|
|
807
|
+
if obj:
|
|
808
|
+
return ' [' + ', '.join([f'[bold {color}]{rich_escape(k)}[/]: [{color}]{rich_escape(v)}[/]' for k, v in obj.items()]) + ']' # noqa: E501
|
|
809
|
+
return ''
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def is_host_port(target):
|
|
813
|
+
"""Check if a target is a host:port.
|
|
814
|
+
|
|
815
|
+
Args:
|
|
816
|
+
target (str): The target to check.
|
|
817
|
+
|
|
818
|
+
Returns:
|
|
819
|
+
bool: True if the target is a host:port, False otherwise.
|
|
820
|
+
"""
|
|
821
|
+
split = target.split(':')
|
|
822
|
+
if not (validators.domain(split[0]) or validators.ipv4(split[0]) or validators.ipv6(split[0]) or split[0] == 'localhost'): # noqa: E501
|
|
823
|
+
return False
|
|
824
|
+
try:
|
|
825
|
+
port = int(split[1])
|
|
826
|
+
if port < 1 or port > 65535:
|
|
827
|
+
return False
|
|
828
|
+
except ValueError:
|
|
829
|
+
return False
|
|
830
|
+
return True
|
|
831
|
+
|
|
832
|
+
|
|
833
|
+
def autodetect_type(target):
|
|
834
|
+
"""Autodetect the type of a target.
|
|
835
|
+
|
|
836
|
+
Args:
|
|
837
|
+
target (str): The target to autodetect the type of.
|
|
838
|
+
|
|
839
|
+
Returns:
|
|
840
|
+
str: The type of the target.
|
|
841
|
+
"""
|
|
842
|
+
if validators.url(target, simple_host=True):
|
|
843
|
+
return URL
|
|
844
|
+
elif target.startswith('gs://'):
|
|
845
|
+
return GCS_URL
|
|
846
|
+
elif validate_cidr_range(target):
|
|
847
|
+
return CIDR_RANGE
|
|
848
|
+
elif validators.ipv4(target) or validators.ipv6(target) or target == 'localhost':
|
|
849
|
+
return IP
|
|
850
|
+
elif validators.domain(target):
|
|
851
|
+
return HOST
|
|
852
|
+
elif is_host_port(target):
|
|
853
|
+
return HOST_PORT
|
|
854
|
+
elif validators.mac_address(target):
|
|
855
|
+
return MAC_ADDRESS
|
|
856
|
+
elif validators.email(target):
|
|
857
|
+
return EMAIL
|
|
858
|
+
elif validators.iban(target):
|
|
859
|
+
return IBAN
|
|
860
|
+
elif validators.uuid(target):
|
|
861
|
+
return UUID
|
|
862
|
+
elif Path(target).exists():
|
|
863
|
+
return PATH
|
|
864
|
+
elif validators.slug(target):
|
|
865
|
+
return SLUG
|
|
866
|
+
|
|
867
|
+
return str(type(target).__name__).lower()
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
def validate_cidr_range(target):
|
|
871
|
+
if '/' not in target:
|
|
872
|
+
return False
|
|
873
|
+
try:
|
|
874
|
+
ipaddress.ip_network(target, False)
|
|
875
|
+
return True
|
|
876
|
+
except ValueError:
|
|
877
|
+
return False
|
|
878
|
+
|
|
879
|
+
|
|
880
|
+
def get_versions_from_string(string):
|
|
881
|
+
"""Get versions from a string.
|
|
882
|
+
|
|
883
|
+
Args:
|
|
884
|
+
string (str): String to get versions from.
|
|
885
|
+
|
|
886
|
+
Returns:
|
|
887
|
+
list[str]: List of versions.
|
|
888
|
+
"""
|
|
889
|
+
regex = r'v?[0-9]+\.[0-9]+\.?[0-9]*\.?[a-zA-Z]*'
|
|
890
|
+
matches = re.findall(regex, string)
|
|
891
|
+
if not matches:
|
|
892
|
+
return []
|
|
893
|
+
return matches
|
|
894
|
+
|
|
895
|
+
|
|
896
|
+
def signal_to_name(signum):
|
|
897
|
+
"""Convert a signal number to its name"""
|
|
898
|
+
for name, value in vars(signal).items():
|
|
899
|
+
if name.startswith('SIG') and not name.startswith('SIG_') and value == signum:
|
|
900
|
+
return name
|
|
901
|
+
return str(signum)
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
def is_valid_path(path):
|
|
905
|
+
"""Check if a path is valid.
|
|
906
|
+
|
|
907
|
+
Args:
|
|
908
|
+
path (str): Path to check.
|
|
909
|
+
|
|
910
|
+
Returns:
|
|
911
|
+
bool: True if the path is valid, False otherwise.
|
|
912
|
+
"""
|
|
913
|
+
try:
|
|
914
|
+
PurePath(path)
|
|
915
|
+
return True
|
|
916
|
+
except (TypeError, ValueError):
|
|
917
|
+
return False
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
def is_terminal_interactive():
|
|
921
|
+
"""Check if the terminal is interactive (even if stdin is piped)."""
|
|
922
|
+
return sys.stdout.isatty() and not os.getenv('CI', '').lower() in ('true', '1')
|