secator 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of secator might be problematic. Click here for more details.
- secator/celery.py +160 -185
- secator/celery_utils.py +268 -0
- secator/cli.py +327 -106
- secator/config.py +27 -11
- secator/configs/workflows/host_recon.yaml +5 -3
- secator/configs/workflows/port_scan.yaml +7 -3
- secator/configs/workflows/url_bypass.yaml +10 -0
- secator/configs/workflows/url_vuln.yaml +1 -1
- secator/decorators.py +169 -92
- secator/definitions.py +10 -3
- secator/exporters/__init__.py +7 -5
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +27 -19
- secator/exporters/gdrive.py +16 -11
- secator/exporters/json.py +3 -1
- secator/exporters/table.py +30 -2
- secator/exporters/txt.py +20 -16
- secator/hooks/gcs.py +53 -0
- secator/hooks/mongodb.py +53 -27
- secator/output_types/__init__.py +29 -11
- secator/output_types/_base.py +11 -1
- secator/output_types/error.py +36 -0
- secator/output_types/exploit.py +1 -1
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +7 -0
- secator/output_types/port.py +8 -1
- secator/output_types/progress.py +5 -0
- secator/output_types/record.py +3 -1
- secator/output_types/stat.py +33 -0
- secator/output_types/tag.py +6 -4
- secator/output_types/url.py +6 -3
- secator/output_types/vulnerability.py +3 -2
- secator/output_types/warning.py +24 -0
- secator/report.py +55 -23
- secator/rich.py +44 -39
- secator/runners/_base.py +622 -635
- secator/runners/_helpers.py +5 -91
- secator/runners/celery.py +18 -0
- secator/runners/command.py +364 -211
- secator/runners/scan.py +8 -24
- secator/runners/task.py +21 -55
- secator/runners/workflow.py +41 -40
- secator/scans/__init__.py +28 -0
- secator/serializers/dataclass.py +6 -0
- secator/serializers/json.py +10 -5
- secator/serializers/regex.py +12 -4
- secator/tasks/_categories.py +5 -2
- secator/tasks/bbot.py +293 -0
- secator/tasks/bup.py +98 -0
- secator/tasks/cariddi.py +38 -49
- secator/tasks/dalfox.py +3 -0
- secator/tasks/dirsearch.py +12 -23
- secator/tasks/dnsx.py +49 -30
- secator/tasks/dnsxbrute.py +2 -0
- secator/tasks/feroxbuster.py +8 -17
- secator/tasks/ffuf.py +3 -2
- secator/tasks/fping.py +3 -3
- secator/tasks/gau.py +5 -0
- secator/tasks/gf.py +2 -2
- secator/tasks/gospider.py +4 -0
- secator/tasks/grype.py +9 -9
- secator/tasks/h8mail.py +31 -41
- secator/tasks/httpx.py +58 -21
- secator/tasks/katana.py +18 -22
- secator/tasks/maigret.py +26 -24
- secator/tasks/mapcidr.py +2 -3
- secator/tasks/msfconsole.py +4 -16
- secator/tasks/naabu.py +3 -1
- secator/tasks/nmap.py +50 -35
- secator/tasks/nuclei.py +9 -2
- secator/tasks/searchsploit.py +17 -9
- secator/tasks/subfinder.py +5 -1
- secator/tasks/wpscan.py +79 -93
- secator/template.py +61 -45
- secator/thread.py +24 -0
- secator/utils.py +330 -80
- secator/utils_test.py +48 -23
- secator/workflows/__init__.py +28 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/METADATA +11 -5
- secator-0.7.0.dist-info/RECORD +115 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/WHEEL +1 -1
- secator-0.6.0.dist-info/RECORD +0 -101
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/entry_points.txt +0 -0
- {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/licenses/LICENSE +0 -0
secator/template.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import glob
|
|
2
|
+
|
|
3
|
+
from collections import OrderedDict
|
|
2
4
|
from pathlib import Path
|
|
3
5
|
|
|
4
6
|
import yaml
|
|
@@ -84,54 +86,68 @@ class TemplateLoader(DotMap):
|
|
|
84
86
|
for key in TEMPLATES_DIR_KEYS
|
|
85
87
|
})
|
|
86
88
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
return tasks
|
|
97
|
-
|
|
98
|
-
def get_workflows(self):
|
|
99
|
-
return [TemplateLoader(name=f'workflows/{name}') for name, _ in self.workflows.items()]
|
|
100
|
-
|
|
101
|
-
def get_workflow_supported_opts(self):
|
|
102
|
-
opts = {}
|
|
103
|
-
tasks = self.get_tasks_class()
|
|
104
|
-
for task_cls in tasks:
|
|
105
|
-
task_opts = task_cls.get_supported_opts()
|
|
106
|
-
for name, conf in task_opts.items():
|
|
107
|
-
supported = opts.get(name, {}).get('supported', False)
|
|
108
|
-
opts[name] = conf
|
|
109
|
-
opts[name]['supported'] = conf['supported'] or supported
|
|
110
|
-
return opts
|
|
89
|
+
@property
|
|
90
|
+
def supported_opts(self):
|
|
91
|
+
"""Property to access supported options easily."""
|
|
92
|
+
return self._collect_supported_opts()
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def flat_tasks(self):
|
|
96
|
+
"""Property to access tasks easily."""
|
|
97
|
+
return self._extract_tasks()
|
|
111
98
|
|
|
112
|
-
def
|
|
99
|
+
def _collect_supported_opts(self):
|
|
100
|
+
"""Collect supported options from the tasks extracted from the config."""
|
|
101
|
+
tasks = self._extract_tasks()
|
|
113
102
|
opts = {}
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
103
|
+
for _, task_info in tasks.items():
|
|
104
|
+
task_class = task_info['class']
|
|
105
|
+
if task_class:
|
|
106
|
+
task_opts = task_class.get_supported_opts()
|
|
107
|
+
for name, conf in task_opts.items():
|
|
108
|
+
if name not in opts or not opts[name].get('supported', False):
|
|
109
|
+
opts[name] = conf
|
|
121
110
|
return opts
|
|
122
111
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
112
|
+
def _extract_tasks(self):
|
|
113
|
+
"""Extract tasks from any workflow or scan config.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
dict: A dict of task full name to task configuration containing the keyts keys ['name', 'class', 'opts']).
|
|
117
|
+
"""
|
|
118
|
+
from secator.runners import Task
|
|
119
|
+
tasks = OrderedDict()
|
|
120
|
+
|
|
121
|
+
def parse_config(config, prefix=''):
|
|
122
|
+
for key, value in config.items():
|
|
123
|
+
if key == '_group':
|
|
124
|
+
parse_config(value, prefix)
|
|
125
|
+
elif value:
|
|
126
|
+
task_name = f'{prefix}/{key}' if prefix else key
|
|
127
|
+
name = key.split('/')[0]
|
|
128
|
+
if task_name not in tasks:
|
|
129
|
+
tasks[task_name] = {'name': name, 'class': Task.get_task_class(name), 'opts': {}}
|
|
130
|
+
tasks[task_name]['opts'] = value.toDict()
|
|
131
|
+
|
|
132
|
+
if not self.type:
|
|
133
|
+
return tasks
|
|
126
134
|
|
|
127
|
-
def get_supported_opts(self):
|
|
128
|
-
opts = {}
|
|
129
|
-
if self.type == 'workflow':
|
|
130
|
-
opts = self.get_workflow_supported_opts()
|
|
131
|
-
elif self.type == 'scan':
|
|
132
|
-
opts = self.get_scan_supported_opts()
|
|
133
135
|
elif self.type == 'task':
|
|
134
|
-
tasks = self.
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
136
|
+
tasks[self.name] = {'name': self.name, 'class': Task.get_task_class(self.name)}
|
|
137
|
+
|
|
138
|
+
elif self.type == 'scan':
|
|
139
|
+
# For each workflow in the scan, load it and incorporate it with a unique prefix
|
|
140
|
+
for wf_name, _ in self.workflows.items():
|
|
141
|
+
name = wf_name.split('/')[0]
|
|
142
|
+
config = TemplateLoader(name=f'workflows/{name}')
|
|
143
|
+
wf_tasks = config.flat_tasks
|
|
144
|
+
# Prefix tasks from this workflow with its name to prevent collision
|
|
145
|
+
for task_key, task_val in wf_tasks.items():
|
|
146
|
+
unique_task_key = f"{wf_name}/{task_key}" # Append workflow name to task key
|
|
147
|
+
tasks[unique_task_key] = task_val
|
|
148
|
+
|
|
149
|
+
elif self.type == 'workflow':
|
|
150
|
+
# Normal parsing of a workflow
|
|
151
|
+
parse_config(self.tasks)
|
|
152
|
+
|
|
153
|
+
return dict(tasks)
|
secator/thread.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
|
|
3
|
+
from secator.output_types import Error
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Thread(threading.Thread):
|
|
7
|
+
"""A thread that returns errors in their join() method as secator.output_types.Error."""
|
|
8
|
+
|
|
9
|
+
def __init__(self, *args, **kwargs):
|
|
10
|
+
super().__init__(*args, **kwargs)
|
|
11
|
+
self.error = None
|
|
12
|
+
|
|
13
|
+
def run(self):
|
|
14
|
+
try:
|
|
15
|
+
if hasattr(self, '_target'):
|
|
16
|
+
self._target(*self._args, **self._kwargs)
|
|
17
|
+
except Exception as e:
|
|
18
|
+
self.error = Error.from_exception(e)
|
|
19
|
+
|
|
20
|
+
def join(self, *args, **kwargs):
|
|
21
|
+
super().join(*args, **kwargs)
|
|
22
|
+
if self.error:
|
|
23
|
+
return self.error
|
|
24
|
+
return None
|
secator/utils.py
CHANGED
|
@@ -1,26 +1,31 @@
|
|
|
1
|
+
import fnmatch
|
|
1
2
|
import inspect
|
|
2
3
|
import importlib
|
|
3
4
|
import itertools
|
|
4
5
|
import logging
|
|
5
6
|
import operator
|
|
6
7
|
import os
|
|
8
|
+
import tldextract
|
|
7
9
|
import re
|
|
8
10
|
import select
|
|
9
11
|
import sys
|
|
12
|
+
import validators
|
|
10
13
|
import warnings
|
|
11
|
-
from datetime import datetime
|
|
12
14
|
|
|
15
|
+
from datetime import datetime, timedelta
|
|
16
|
+
from functools import reduce
|
|
13
17
|
from inspect import isclass
|
|
14
18
|
from pathlib import Path
|
|
15
19
|
from pkgutil import iter_modules
|
|
20
|
+
from time import time
|
|
21
|
+
import traceback
|
|
16
22
|
from urllib.parse import urlparse, quote
|
|
17
23
|
|
|
18
|
-
|
|
24
|
+
import humanize
|
|
19
25
|
import ifaddr
|
|
20
26
|
import yaml
|
|
21
|
-
from rich.markdown import Markdown
|
|
22
27
|
|
|
23
|
-
from secator.definitions import (
|
|
28
|
+
from secator.definitions import (DEBUG_COMPONENT, VERSION, DEV_PACKAGE)
|
|
24
29
|
from secator.config import CONFIG, ROOT_FOLDER, LIB_FOLDER
|
|
25
30
|
from secator.rich import console
|
|
26
31
|
|
|
@@ -28,6 +33,8 @@ logger = logging.getLogger(__name__)
|
|
|
28
33
|
|
|
29
34
|
_tasks = []
|
|
30
35
|
|
|
36
|
+
TIMEDELTA_REGEX = re.compile(r'((?P<years>\d+?)y)?((?P<months>\d+?)M)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?') # noqa: E501
|
|
37
|
+
|
|
31
38
|
|
|
32
39
|
class TaskError(ValueError):
|
|
33
40
|
pass
|
|
@@ -52,7 +59,7 @@ def setup_logging(level):
|
|
|
52
59
|
return logger
|
|
53
60
|
|
|
54
61
|
|
|
55
|
-
def expand_input(input):
|
|
62
|
+
def expand_input(input, ctx):
|
|
56
63
|
"""Expand user-provided input on the CLI:
|
|
57
64
|
- If input is a path, read the file and return the lines.
|
|
58
65
|
- If it's a comma-separated list, return the list.
|
|
@@ -60,12 +67,14 @@ def expand_input(input):
|
|
|
60
67
|
|
|
61
68
|
Args:
|
|
62
69
|
input (str): Input.
|
|
70
|
+
ctx (click.Context): Click context.
|
|
63
71
|
|
|
64
72
|
Returns:
|
|
65
73
|
str: Input.
|
|
66
74
|
"""
|
|
67
75
|
if input is None: # read from stdin
|
|
68
|
-
|
|
76
|
+
if not ctx.obj['piped_input']:
|
|
77
|
+
console.print('Waiting for input on stdin ...', style='bold yellow')
|
|
69
78
|
rlist, _, _ = select.select([sys.stdin], [], [], CONFIG.cli.stdin_timeout)
|
|
70
79
|
if rlist:
|
|
71
80
|
data = sys.stdin.read().splitlines()
|
|
@@ -198,25 +207,32 @@ def discover_tasks():
|
|
|
198
207
|
return _tasks
|
|
199
208
|
|
|
200
209
|
|
|
201
|
-
def import_dynamic(
|
|
202
|
-
"""Import class dynamically from
|
|
210
|
+
def import_dynamic(path, name=None):
|
|
211
|
+
"""Import class or module dynamically from path.
|
|
203
212
|
|
|
204
213
|
Args:
|
|
205
|
-
|
|
214
|
+
path (str): Path to class or module.
|
|
215
|
+
name (str): If specified, does a getattr() on the package to get this attribute.
|
|
206
216
|
cls_root (str): Root parent class.
|
|
207
217
|
|
|
218
|
+
Examples:
|
|
219
|
+
>>> import_dynamic('secator.exporters', name='CsvExporter')
|
|
220
|
+
>>> import_dynamic('secator.hooks.mongodb', name='HOOKS')
|
|
221
|
+
|
|
208
222
|
Returns:
|
|
209
223
|
cls: Class object.
|
|
210
224
|
"""
|
|
211
225
|
try:
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
return
|
|
226
|
+
res = importlib.import_module(path)
|
|
227
|
+
if name:
|
|
228
|
+
res = getattr(res, name)
|
|
229
|
+
if res is None:
|
|
230
|
+
raise
|
|
231
|
+
return res
|
|
218
232
|
except Exception:
|
|
219
|
-
|
|
233
|
+
if name:
|
|
234
|
+
path += f'.{name}'
|
|
235
|
+
warnings.warn(f'"{path}" not found.', category=UserWarning, stacklevel=2)
|
|
220
236
|
return None
|
|
221
237
|
|
|
222
238
|
|
|
@@ -262,8 +278,8 @@ def merge_opts(*options):
|
|
|
262
278
|
all_opts = {}
|
|
263
279
|
for opts in options:
|
|
264
280
|
if opts:
|
|
265
|
-
|
|
266
|
-
all_opts.update(
|
|
281
|
+
opts_noempty = {k: v for k, v in opts.items() if v is not None}
|
|
282
|
+
all_opts.update(opts_noempty)
|
|
267
283
|
return all_opts
|
|
268
284
|
|
|
269
285
|
|
|
@@ -328,39 +344,6 @@ def detect_host(interface=None):
|
|
|
328
344
|
return None
|
|
329
345
|
|
|
330
346
|
|
|
331
|
-
def print_results_table(results, title=None, exclude_fields=[], log=False):
|
|
332
|
-
from secator.output_types import OUTPUT_TYPES
|
|
333
|
-
from secator.rich import build_table
|
|
334
|
-
_print = console.log if log else console.print
|
|
335
|
-
_print()
|
|
336
|
-
if title:
|
|
337
|
-
title = ' '.join(title.capitalize().split('_')) + ' results'
|
|
338
|
-
h1 = Markdown(f'# {title}')
|
|
339
|
-
_print(h1, style='bold magenta', width=50)
|
|
340
|
-
_print()
|
|
341
|
-
tables = []
|
|
342
|
-
for output_type in OUTPUT_TYPES:
|
|
343
|
-
if output_type.__name__ == 'Progress':
|
|
344
|
-
continue
|
|
345
|
-
items = [
|
|
346
|
-
item for item in results if item._type == output_type.get_name()
|
|
347
|
-
]
|
|
348
|
-
if CONFIG.runners.remove_duplicates:
|
|
349
|
-
items = [item for item in items if not item._duplicate]
|
|
350
|
-
if items:
|
|
351
|
-
_table = build_table(
|
|
352
|
-
items,
|
|
353
|
-
output_fields=output_type._table_fields,
|
|
354
|
-
exclude_fields=exclude_fields,
|
|
355
|
-
sort_by=output_type._sort_by)
|
|
356
|
-
tables.append(_table)
|
|
357
|
-
title = pluralize(items[0]._type).upper()
|
|
358
|
-
_print(f':wrench: {title}', style='bold gold3', justify='left')
|
|
359
|
-
_print(_table)
|
|
360
|
-
_print()
|
|
361
|
-
return tables
|
|
362
|
-
|
|
363
|
-
|
|
364
347
|
def rich_to_ansi(text):
|
|
365
348
|
"""Convert text formatted with rich markup to standard string."""
|
|
366
349
|
from rich.console import Console
|
|
@@ -370,35 +353,43 @@ def rich_to_ansi(text):
|
|
|
370
353
|
return capture.get()
|
|
371
354
|
|
|
372
355
|
|
|
373
|
-
def
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
356
|
+
def format_object(obj, obj_breaklines=False):
|
|
357
|
+
"""Format the debug object for printing."""
|
|
358
|
+
sep = '\n ' if obj_breaklines else ', '
|
|
359
|
+
if isinstance(obj, dict):
|
|
360
|
+
return sep.join(f'[dim cyan]{k}[/] [dim yellow]->[/] [dim green]{v}[/]' for k, v in obj.items() if v is not None) # noqa: E501
|
|
361
|
+
elif isinstance(obj, list):
|
|
362
|
+
return f'[dim green]{sep.join(obj)}[/]'
|
|
363
|
+
return ''
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def debug(msg, sub='', id='', obj=None, lazy=None, obj_after=True, obj_breaklines=False, verbose=False):
|
|
367
|
+
"""Print debug log if DEBUG >= level."""
|
|
368
|
+
if not DEBUG_COMPONENT or DEBUG_COMPONENT == [""]:
|
|
369
|
+
return
|
|
370
|
+
|
|
371
|
+
if sub:
|
|
372
|
+
if verbose and sub not in DEBUG_COMPONENT:
|
|
373
|
+
sub = f'debug.{sub}'
|
|
374
|
+
if not any(sub.startswith(s) for s in DEBUG_COMPONENT):
|
|
375
|
+
return
|
|
376
|
+
|
|
377
|
+
if lazy:
|
|
378
|
+
msg = lazy(msg)
|
|
379
|
+
|
|
380
|
+
formatted_msg = f'[dim yellow4]{sub:13s}[/] ' if sub else ''
|
|
381
|
+
obj_str = format_object(obj, obj_breaklines) if obj else ''
|
|
382
|
+
|
|
383
|
+
# Constructing the message string based on object position
|
|
384
|
+
if obj_str and not obj_after:
|
|
385
|
+
formatted_msg += f'{obj_str} '
|
|
386
|
+
formatted_msg += f'[dim yellow]{msg}[/]'
|
|
387
|
+
if obj_str and obj_after:
|
|
388
|
+
formatted_msg += f': {obj_str}'
|
|
389
|
+
if id:
|
|
390
|
+
formatted_msg += f' [italic dim gray11]\[{id}][/]'
|
|
391
|
+
|
|
392
|
+
console.print(f'[dim red]🐛 {formatted_msg}[/]', style='red')
|
|
402
393
|
|
|
403
394
|
|
|
404
395
|
def escape_mongodb_url(url):
|
|
@@ -439,3 +430,262 @@ def print_version():
|
|
|
439
430
|
console.print(f'[bold gold3]Lib folder[/]: {LIB_FOLDER}')
|
|
440
431
|
if status == 'outdated':
|
|
441
432
|
console.print('[bold red]secator is outdated, run "secator update" to install the latest version.')
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def extract_domain_info(input, domain_only=False):
|
|
436
|
+
"""Extracts domain info from a given any URL or FQDN.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
input (str): An URL or FQDN.
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
tldextract.ExtractResult: Extracted info.
|
|
443
|
+
str | None: Registered domain name or None if invalid domain (only if domain_only is set).
|
|
444
|
+
"""
|
|
445
|
+
result = tldextract.extract(input)
|
|
446
|
+
if not result or not result.domain or not result.suffix:
|
|
447
|
+
return None
|
|
448
|
+
if domain_only:
|
|
449
|
+
if not validators.domain(result.registered_domain):
|
|
450
|
+
return None
|
|
451
|
+
return result.registered_domain
|
|
452
|
+
return result
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
def extract_subdomains_from_fqdn(fqdn, domain, suffix):
|
|
456
|
+
"""Generates a list of subdomains up to the root domain from a fully qualified domain name (FQDN).
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
fqdn (str): The full domain name, e.g., 'console.cloud.google.com'.
|
|
460
|
+
domain (str): The main domain, e.g., 'google'.
|
|
461
|
+
suffix (str): The top-level domain (TLD), e.g., 'com'.
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
List[str]: A list containing the FQDN and all its subdomains down to the root domain.
|
|
465
|
+
"""
|
|
466
|
+
# Start with the full domain and prepare to break it down
|
|
467
|
+
parts = fqdn.split('.')
|
|
468
|
+
|
|
469
|
+
# Initialize the list of subdomains with the full domain
|
|
470
|
+
subdomains = [fqdn]
|
|
471
|
+
|
|
472
|
+
# Continue stripping subdomains until reaching the base domain (domain + suffix)
|
|
473
|
+
base_domain = f"{domain}.{suffix}"
|
|
474
|
+
current = fqdn
|
|
475
|
+
|
|
476
|
+
while current != base_domain:
|
|
477
|
+
# Remove the leftmost part of the domain
|
|
478
|
+
parts = parts[1:]
|
|
479
|
+
current = '.'.join(parts)
|
|
480
|
+
subdomains.append(current)
|
|
481
|
+
|
|
482
|
+
return subdomains
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
def match_file_by_pattern(paths, pattern, type='both'):
|
|
486
|
+
"""Match pattern on a set of paths.
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
paths (iterable): An iterable of Path objects to be searched.
|
|
490
|
+
pattern (str): The pattern to search for in file names or directory names, supports Unix shell-style wildcards.
|
|
491
|
+
type (str): Specifies the type to search for; 'file', 'directory', or 'both'.
|
|
492
|
+
|
|
493
|
+
Returns:
|
|
494
|
+
list of Path: A list of Path objects that match the given pattern.
|
|
495
|
+
"""
|
|
496
|
+
matches = []
|
|
497
|
+
for path in paths:
|
|
498
|
+
full_path = str(path.resolve())
|
|
499
|
+
if path.is_dir() and type in ['directory', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
|
|
500
|
+
matches.append(path)
|
|
501
|
+
elif path.is_file() and type in ['file', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
|
|
502
|
+
matches.append(path)
|
|
503
|
+
|
|
504
|
+
return matches
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
def get_file_date(file_path):
|
|
508
|
+
"""Retrieves the last modification date of the file and returns it in a human-readable format.
|
|
509
|
+
|
|
510
|
+
Args:
|
|
511
|
+
file_path (Path): Path object pointing to the file.
|
|
512
|
+
|
|
513
|
+
Returns:
|
|
514
|
+
str: Human-readable time format.
|
|
515
|
+
"""
|
|
516
|
+
# Get the last modified time of the file
|
|
517
|
+
mod_timestamp = file_path.stat().st_mtime
|
|
518
|
+
mod_date = datetime.fromtimestamp(mod_timestamp)
|
|
519
|
+
|
|
520
|
+
# Determine how to display the date based on how long ago it was modified
|
|
521
|
+
now = datetime.now()
|
|
522
|
+
if (now - mod_date).days < 7:
|
|
523
|
+
# If the modification was less than a week ago, use natural time
|
|
524
|
+
return humanize.naturaltime(now - mod_date) + mod_date.strftime(" @ %H:%m")
|
|
525
|
+
else:
|
|
526
|
+
# Otherwise, return the date in "on %B %d" format
|
|
527
|
+
return f"{mod_date.strftime('%B %d @ %H:%m')}"
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def trim_string(s, max_length=30):
|
|
531
|
+
"""
|
|
532
|
+
Trims a long string to include the beginning and the end, with an ellipsis in the middle.
|
|
533
|
+
The output string will not exceed the specified maximum length.
|
|
534
|
+
|
|
535
|
+
Args:
|
|
536
|
+
s (str): The string to be trimmed.
|
|
537
|
+
max_length (int): The maximum allowed length of the trimmed string.
|
|
538
|
+
|
|
539
|
+
Returns:
|
|
540
|
+
str: The trimmed string.
|
|
541
|
+
"""
|
|
542
|
+
if len(s) <= max_length:
|
|
543
|
+
return s # Return the original string if it's short enough
|
|
544
|
+
|
|
545
|
+
# Calculate the lengths of the start and end parts
|
|
546
|
+
end_length = 30 # Default end length
|
|
547
|
+
if max_length - end_length - 5 < 0: # 5 accounts for the length of '[...] '
|
|
548
|
+
end_length = max_length - 5 # Adjust end length if total max_length is too small
|
|
549
|
+
start_length = max_length - end_length - 5 # Subtract the space for '[...] '
|
|
550
|
+
|
|
551
|
+
# Build the trimmed string
|
|
552
|
+
start_part = s[:start_length]
|
|
553
|
+
end_part = s[-end_length:]
|
|
554
|
+
return f"{start_part} [...] {end_part}"
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
def sort_files_by_date(file_list):
|
|
558
|
+
"""Sorts a list of file paths by their modification date.
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
file_list (list): A list of file paths (strings or Path objects).
|
|
562
|
+
|
|
563
|
+
Returns:
|
|
564
|
+
list: The list of file paths sorted by modification date.
|
|
565
|
+
"""
|
|
566
|
+
file_list.sort(key=lambda x: x.stat().st_mtime)
|
|
567
|
+
return file_list
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
def traceback_as_string(exc):
|
|
571
|
+
"""Format an exception's traceback as a readable string.
|
|
572
|
+
|
|
573
|
+
Args:
|
|
574
|
+
Exception: an exception.
|
|
575
|
+
|
|
576
|
+
Returns:
|
|
577
|
+
string: readable traceback.
|
|
578
|
+
"""
|
|
579
|
+
return ' '.join(traceback.format_exception(exc, value=exc, tb=exc.__traceback__))
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
def should_update(update_frequency, last_updated=None, timestamp=None):
|
|
583
|
+
"""Determine if an object should be updated based on the update frequency and the last updated UNIX timestamp.
|
|
584
|
+
|
|
585
|
+
Args:
|
|
586
|
+
update_frequency (int): Update frequency in seconds.
|
|
587
|
+
last_updated (Union[int, None]): UNIX timestamp or None if unset.
|
|
588
|
+
timestamp (int): Item timestamp.
|
|
589
|
+
|
|
590
|
+
Returns:
|
|
591
|
+
bool: Whether the object should be updated.
|
|
592
|
+
"""
|
|
593
|
+
if not timestamp:
|
|
594
|
+
timestamp = time()
|
|
595
|
+
if last_updated and (timestamp - last_updated) < update_frequency:
|
|
596
|
+
return False
|
|
597
|
+
return True
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
def list_reports(workspace=None, type=None, timedelta=None):
|
|
601
|
+
"""List all reports in secator reports dir.
|
|
602
|
+
|
|
603
|
+
Args:
|
|
604
|
+
workspace (str): Filter by workspace name.
|
|
605
|
+
type (str): Filter by runner type.
|
|
606
|
+
timedelta (None | datetime.timedelta): Keep results newer than timedelta.
|
|
607
|
+
|
|
608
|
+
Returns:
|
|
609
|
+
list: List all JSON reports.
|
|
610
|
+
"""
|
|
611
|
+
if type and not type.endswith('s'):
|
|
612
|
+
type += 's'
|
|
613
|
+
json_reports = []
|
|
614
|
+
for root, _, files in os.walk(CONFIG.dirs.reports):
|
|
615
|
+
for file in files:
|
|
616
|
+
path = Path(root) / file
|
|
617
|
+
if not path.parts[-1] == 'report.json':
|
|
618
|
+
continue
|
|
619
|
+
if workspace and path.parts[-4] != workspace:
|
|
620
|
+
continue
|
|
621
|
+
if type and path.parts[-3] != type:
|
|
622
|
+
continue
|
|
623
|
+
if timedelta and (datetime.now() - datetime.fromtimestamp(path.stat().st_mtime)) > timedelta:
|
|
624
|
+
continue
|
|
625
|
+
json_reports.append(path)
|
|
626
|
+
return json_reports
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
def get_info_from_report_path(path):
|
|
630
|
+
try:
|
|
631
|
+
ws, runner_type, number = path.parts[-4], path.parts[-3], path.parts[-2]
|
|
632
|
+
workspace_path = '/'.join(path.parts[:-3])
|
|
633
|
+
return {
|
|
634
|
+
'workspace': ws,
|
|
635
|
+
'workspace_path': workspace_path,
|
|
636
|
+
'type': runner_type,
|
|
637
|
+
'id': number
|
|
638
|
+
}
|
|
639
|
+
except IndexError:
|
|
640
|
+
return {}
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
def human_to_timedelta(time_str):
|
|
644
|
+
if not time_str:
|
|
645
|
+
return None
|
|
646
|
+
parts = TIMEDELTA_REGEX.match(time_str)
|
|
647
|
+
if not parts:
|
|
648
|
+
return
|
|
649
|
+
parts = parts.groupdict()
|
|
650
|
+
years = int(parts.pop('years') or 0)
|
|
651
|
+
months = int(parts.pop('months') or 0)
|
|
652
|
+
days = int(parts.get('days') or 0)
|
|
653
|
+
days += years * 365
|
|
654
|
+
days += months * 30
|
|
655
|
+
parts['days'] = days
|
|
656
|
+
time_params = {}
|
|
657
|
+
for name, param in parts.items():
|
|
658
|
+
if param:
|
|
659
|
+
time_params[name] = int(param)
|
|
660
|
+
return timedelta(**time_params)
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def deep_merge_dicts(*dicts):
|
|
664
|
+
"""
|
|
665
|
+
Recursively merges multiple dictionaries by concatenating lists and merging nested dictionaries.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
dicts (tuple): A tuple of dictionary objects to merge.
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
dict: A new dictionary containing merged keys and values from all input dictionaries.
|
|
672
|
+
"""
|
|
673
|
+
def merge_two_dicts(dict1, dict2):
|
|
674
|
+
"""
|
|
675
|
+
Helper function that merges two dictionaries.
|
|
676
|
+
"""
|
|
677
|
+
result = dict(dict1) # Create a copy of dict1 to avoid modifying it.
|
|
678
|
+
for key, value in dict2.items():
|
|
679
|
+
if key in result:
|
|
680
|
+
if isinstance(result[key], dict) and isinstance(value, dict):
|
|
681
|
+
result[key] = merge_two_dicts(result[key], value)
|
|
682
|
+
elif isinstance(result[key], list) and isinstance(value, list):
|
|
683
|
+
result[key] += value # Concatenating lists
|
|
684
|
+
else:
|
|
685
|
+
result[key] = value # Overwrite if not both lists or both dicts
|
|
686
|
+
else:
|
|
687
|
+
result[key] = value
|
|
688
|
+
return result
|
|
689
|
+
|
|
690
|
+
# Use reduce to apply merge_two_dicts to all dictionaries in dicts
|
|
691
|
+
return reduce(merge_two_dicts, dicts, {})
|