secator 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (90) hide show
  1. secator/celery.py +160 -185
  2. secator/celery_utils.py +268 -0
  3. secator/cli.py +427 -176
  4. secator/config.py +114 -68
  5. secator/configs/workflows/host_recon.yaml +5 -3
  6. secator/configs/workflows/port_scan.yaml +7 -3
  7. secator/configs/workflows/subdomain_recon.yaml +2 -2
  8. secator/configs/workflows/url_bypass.yaml +10 -0
  9. secator/configs/workflows/url_dirsearch.yaml +1 -1
  10. secator/configs/workflows/url_vuln.yaml +1 -1
  11. secator/decorators.py +170 -92
  12. secator/definitions.py +11 -4
  13. secator/exporters/__init__.py +7 -5
  14. secator/exporters/console.py +10 -0
  15. secator/exporters/csv.py +27 -19
  16. secator/exporters/gdrive.py +16 -11
  17. secator/exporters/json.py +3 -1
  18. secator/exporters/table.py +30 -2
  19. secator/exporters/txt.py +20 -16
  20. secator/hooks/gcs.py +53 -0
  21. secator/hooks/mongodb.py +53 -27
  22. secator/installer.py +277 -60
  23. secator/output_types/__init__.py +29 -11
  24. secator/output_types/_base.py +11 -1
  25. secator/output_types/error.py +36 -0
  26. secator/output_types/exploit.py +12 -8
  27. secator/output_types/info.py +24 -0
  28. secator/output_types/ip.py +8 -1
  29. secator/output_types/port.py +9 -2
  30. secator/output_types/progress.py +5 -0
  31. secator/output_types/record.py +5 -3
  32. secator/output_types/stat.py +33 -0
  33. secator/output_types/subdomain.py +1 -1
  34. secator/output_types/tag.py +8 -6
  35. secator/output_types/target.py +2 -2
  36. secator/output_types/url.py +14 -11
  37. secator/output_types/user_account.py +6 -6
  38. secator/output_types/vulnerability.py +8 -6
  39. secator/output_types/warning.py +24 -0
  40. secator/report.py +56 -23
  41. secator/rich.py +44 -39
  42. secator/runners/_base.py +629 -638
  43. secator/runners/_helpers.py +5 -91
  44. secator/runners/celery.py +18 -0
  45. secator/runners/command.py +404 -214
  46. secator/runners/scan.py +8 -24
  47. secator/runners/task.py +21 -55
  48. secator/runners/workflow.py +41 -40
  49. secator/scans/__init__.py +28 -0
  50. secator/serializers/dataclass.py +6 -0
  51. secator/serializers/json.py +10 -5
  52. secator/serializers/regex.py +12 -4
  53. secator/tasks/_categories.py +147 -42
  54. secator/tasks/bbot.py +295 -0
  55. secator/tasks/bup.py +99 -0
  56. secator/tasks/cariddi.py +38 -49
  57. secator/tasks/dalfox.py +3 -0
  58. secator/tasks/dirsearch.py +14 -25
  59. secator/tasks/dnsx.py +49 -30
  60. secator/tasks/dnsxbrute.py +4 -1
  61. secator/tasks/feroxbuster.py +10 -20
  62. secator/tasks/ffuf.py +3 -2
  63. secator/tasks/fping.py +4 -4
  64. secator/tasks/gau.py +5 -0
  65. secator/tasks/gf.py +2 -2
  66. secator/tasks/gospider.py +4 -0
  67. secator/tasks/grype.py +11 -13
  68. secator/tasks/h8mail.py +32 -42
  69. secator/tasks/httpx.py +58 -21
  70. secator/tasks/katana.py +19 -23
  71. secator/tasks/maigret.py +27 -25
  72. secator/tasks/mapcidr.py +2 -3
  73. secator/tasks/msfconsole.py +22 -19
  74. secator/tasks/naabu.py +18 -2
  75. secator/tasks/nmap.py +82 -55
  76. secator/tasks/nuclei.py +13 -3
  77. secator/tasks/searchsploit.py +26 -11
  78. secator/tasks/subfinder.py +5 -1
  79. secator/tasks/wpscan.py +91 -94
  80. secator/template.py +61 -45
  81. secator/thread.py +24 -0
  82. secator/utils.py +417 -78
  83. secator/utils_test.py +48 -23
  84. secator/workflows/__init__.py +28 -0
  85. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/METADATA +59 -48
  86. secator-0.8.0.dist-info/RECORD +115 -0
  87. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/WHEEL +1 -1
  88. secator-0.6.0.dist-info/RECORD +0 -101
  89. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/entry_points.txt +0 -0
  90. {secator-0.6.0.dist-info → secator-0.8.0.dist-info}/licenses/LICENSE +0 -0
secator/utils.py CHANGED
@@ -1,33 +1,40 @@
1
+ import fnmatch
1
2
  import inspect
2
3
  import importlib
3
4
  import itertools
4
5
  import logging
5
6
  import operator
6
7
  import os
8
+ import tldextract
7
9
  import re
8
10
  import select
9
11
  import sys
12
+ import validators
10
13
  import warnings
11
- from datetime import datetime
12
14
 
15
+ from datetime import datetime, timedelta
16
+ from functools import reduce
13
17
  from inspect import isclass
14
18
  from pathlib import Path
15
19
  from pkgutil import iter_modules
20
+ from time import time
21
+ import traceback
16
22
  from urllib.parse import urlparse, quote
17
23
 
18
-
24
+ import humanize
19
25
  import ifaddr
20
26
  import yaml
21
- from rich.markdown import Markdown
22
27
 
23
- from secator.definitions import (DEBUG, DEBUG_COMPONENT, VERSION, DEV_PACKAGE)
24
- from secator.config import CONFIG, ROOT_FOLDER, LIB_FOLDER
28
+ from secator.definitions import (DEBUG_COMPONENT, VERSION, DEV_PACKAGE)
29
+ from secator.config import CONFIG, ROOT_FOLDER, LIB_FOLDER, download_file
25
30
  from secator.rich import console
26
31
 
27
32
  logger = logging.getLogger(__name__)
28
33
 
29
34
  _tasks = []
30
35
 
36
+ TIMEDELTA_REGEX = re.compile(r'((?P<years>\d+?)y)?((?P<months>\d+?)M)?((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?') # noqa: E501
37
+
31
38
 
32
39
  class TaskError(ValueError):
33
40
  pass
@@ -52,7 +59,7 @@ def setup_logging(level):
52
59
  return logger
53
60
 
54
61
 
55
- def expand_input(input):
62
+ def expand_input(input, ctx):
56
63
  """Expand user-provided input on the CLI:
57
64
  - If input is a path, read the file and return the lines.
58
65
  - If it's a comma-separated list, return the list.
@@ -60,12 +67,14 @@ def expand_input(input):
60
67
 
61
68
  Args:
62
69
  input (str): Input.
70
+ ctx (click.Context): Click context.
63
71
 
64
72
  Returns:
65
73
  str: Input.
66
74
  """
67
75
  if input is None: # read from stdin
68
- console.print('Waiting for input on stdin ...', style='bold yellow')
76
+ if not ctx.obj['piped_input']:
77
+ console.print('Waiting for input on stdin ...', style='bold yellow')
69
78
  rlist, _, _ = select.select([sys.stdin], [], [], CONFIG.cli.stdin_timeout)
70
79
  if rlist:
71
80
  data = sys.stdin.read().splitlines()
@@ -198,25 +207,32 @@ def discover_tasks():
198
207
  return _tasks
199
208
 
200
209
 
201
- def import_dynamic(cls_path, cls_root='Command'):
202
- """Import class dynamically from class path.
210
+ def import_dynamic(path, name=None):
211
+ """Import class or module dynamically from path.
203
212
 
204
213
  Args:
205
- cls_path (str): Class path.
214
+ path (str): Path to class or module.
215
+ name (str): If specified, does a getattr() on the package to get this attribute.
206
216
  cls_root (str): Root parent class.
207
217
 
218
+ Examples:
219
+ >>> import_dynamic('secator.exporters', name='CsvExporter')
220
+ >>> import_dynamic('secator.hooks.mongodb', name='HOOKS')
221
+
208
222
  Returns:
209
223
  cls: Class object.
210
224
  """
211
225
  try:
212
- package, name = cls_path.rsplit(".", maxsplit=1)
213
- cls = getattr(importlib.import_module(package), name)
214
- root_cls = inspect.getmro(cls)[-2]
215
- if root_cls.__name__ == cls_root:
216
- return cls
217
- return None
226
+ res = importlib.import_module(path)
227
+ if name:
228
+ res = getattr(res, name)
229
+ if res is None:
230
+ raise
231
+ return res
218
232
  except Exception:
219
- warnings.warn(f'"{package}.{name}" not found.')
233
+ if name:
234
+ path += f'.{name}'
235
+ warnings.warn(f'"{path}" not found.', category=UserWarning, stacklevel=2)
220
236
  return None
221
237
 
222
238
 
@@ -262,8 +278,8 @@ def merge_opts(*options):
262
278
  all_opts = {}
263
279
  for opts in options:
264
280
  if opts:
265
- opts_noemtpy = {k: v for k, v in opts.items() if v is not None}
266
- all_opts.update(opts_noemtpy)
281
+ opts_noempty = {k: v for k, v in opts.items() if v is not None}
282
+ all_opts.update(opts_noempty)
267
283
  return all_opts
268
284
 
269
285
 
@@ -292,11 +308,21 @@ def pluralize(word):
292
308
  """
293
309
  if word.endswith('y'):
294
310
  return word.rstrip('y') + 'ies'
295
- else:
296
- return f'{word}s'
311
+ return f'{word}s'
297
312
 
298
313
 
299
314
  def load_fixture(name, fixtures_dir, ext=None, only_path=False):
315
+ """Load fixture a fixture dir. Optionally load it's content if it's JSON / YAML.
316
+
317
+ Args:
318
+ name (str): Fixture name.
319
+ fixtures_dir (str): Fixture parent directory.
320
+ ext (str, Optional): Extension to load.
321
+ only_path (bool, Optional): Return fixture path instead of fixture content.
322
+
323
+ Returns:
324
+ str: Fixture path or content.
325
+ """
300
326
  fixture_path = f'{fixtures_dir}/{name}'
301
327
  exts = ['.json', '.txt', '.xml', '.rc']
302
328
  if ext:
@@ -315,10 +341,19 @@ def load_fixture(name, fixtures_dir, ext=None, only_path=False):
315
341
 
316
342
 
317
343
  def get_file_timestamp():
344
+ """Get current timestamp into a formatted string."""
318
345
  return datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%f_%p")
319
346
 
320
347
 
321
348
  def detect_host(interface=None):
349
+ """Detect hostname from ethernet adapters.
350
+
351
+ Args:
352
+ interface (str): Interface name to get hostname from.
353
+
354
+ Returns:
355
+ str | None: hostname or ip address, or None if not found.
356
+ """
322
357
  adapters = ifaddr.get_adapters()
323
358
  for adapter in adapters:
324
359
  iface = adapter.name
@@ -328,41 +363,15 @@ def detect_host(interface=None):
328
363
  return None
329
364
 
330
365
 
331
- def print_results_table(results, title=None, exclude_fields=[], log=False):
332
- from secator.output_types import OUTPUT_TYPES
333
- from secator.rich import build_table
334
- _print = console.log if log else console.print
335
- _print()
336
- if title:
337
- title = ' '.join(title.capitalize().split('_')) + ' results'
338
- h1 = Markdown(f'# {title}')
339
- _print(h1, style='bold magenta', width=50)
340
- _print()
341
- tables = []
342
- for output_type in OUTPUT_TYPES:
343
- if output_type.__name__ == 'Progress':
344
- continue
345
- items = [
346
- item for item in results if item._type == output_type.get_name()
347
- ]
348
- if CONFIG.runners.remove_duplicates:
349
- items = [item for item in items if not item._duplicate]
350
- if items:
351
- _table = build_table(
352
- items,
353
- output_fields=output_type._table_fields,
354
- exclude_fields=exclude_fields,
355
- sort_by=output_type._sort_by)
356
- tables.append(_table)
357
- title = pluralize(items[0]._type).upper()
358
- _print(f':wrench: {title}', style='bold gold3', justify='left')
359
- _print(_table)
360
- _print()
361
- return tables
366
+ def rich_to_ansi(text):
367
+ """Convert text formatted with rich markup to standard string.
362
368
 
369
+ Args:
370
+ text (str): Text.
363
371
 
364
- def rich_to_ansi(text):
365
- """Convert text formatted with rich markup to standard string."""
372
+ Returns:
373
+ str: Converted text (ANSI).
374
+ """
366
375
  from rich.console import Console
367
376
  tmp_console = Console(file=None, highlight=False, color_system='truecolor')
368
377
  with tmp_console.capture() as capture:
@@ -370,35 +379,65 @@ def rich_to_ansi(text):
370
379
  return capture.get()
371
380
 
372
381
 
373
- def debug(msg, sub='', id='', obj=None, obj_after=True, obj_breaklines=False, level=1):
382
+ def rich_escape(obj):
383
+ """Escape object for rich printing.
384
+
385
+ Args:
386
+ obj (any): Input object.
387
+
388
+ Returns:
389
+ any: Initial object, or escaped Rich string.
390
+ """
391
+ if isinstance(obj, str):
392
+ return obj.replace('[', r'\[').replace(']', r'\]')
393
+ return obj
394
+
395
+
396
+ def format_object(obj, obj_breaklines=False):
397
+ """Format the debug object for printing.
398
+
399
+ Args:
400
+ obj (dict | list): Input object.
401
+ obj_breaklines (bool): Split output with newlines for each item in input object.
402
+
403
+ Returns:
404
+ str: Rich-formatted string.
405
+ """
406
+ sep = '\n ' if obj_breaklines else ', '
407
+ if isinstance(obj, dict):
408
+ return sep.join(f'[dim cyan]{k}[/] [dim yellow]->[/] [dim green]{v}[/]' for k, v in obj.items() if v is not None) # noqa: E501
409
+ elif isinstance(obj, list):
410
+ return f'[dim green]{sep.join(obj)}[/]'
411
+ return ''
412
+
413
+
414
+ def debug(msg, sub='', id='', obj=None, lazy=None, obj_after=True, obj_breaklines=False, verbose=False):
374
415
  """Print debug log if DEBUG >= level."""
375
- debug_comp_empty = DEBUG_COMPONENT == [""] or not DEBUG_COMPONENT
376
- if not debug_comp_empty and not any(sub.startswith(s) for s in DEBUG_COMPONENT):
416
+ if not DEBUG_COMPONENT or DEBUG_COMPONENT == [""]:
377
417
  return
378
- elif debug_comp_empty and not DEBUG >= level:
379
- return
380
- s = ''
418
+
381
419
  if sub:
382
- s += f'[dim yellow4]{sub:13s}[/] '
383
- obj_str = ''
384
- if obj:
385
- sep = ', '
386
- if obj_breaklines:
387
- obj_str += '\n '
388
- sep = '\n '
389
- if isinstance(obj, dict):
390
- obj_str += sep.join(f'[dim blue]{k}[/] [dim yellow]->[/] [dim green]{v}[/]' for k, v in obj.items() if v is not None)
391
- elif isinstance(obj, list):
392
- obj_str += sep.join(obj)
420
+ if verbose and sub not in DEBUG_COMPONENT:
421
+ sub = f'debug.{sub}'
422
+ if not any(sub.startswith(s) for s in DEBUG_COMPONENT):
423
+ return
424
+
425
+ if lazy:
426
+ msg = lazy(msg)
427
+
428
+ formatted_msg = f'[yellow4]{sub:13s}[/] ' if sub else ''
429
+ obj_str = format_object(obj, obj_breaklines) if obj else ''
430
+
431
+ # Constructing the message string based on object position
393
432
  if obj_str and not obj_after:
394
- s = f'{s} {obj_str} '
395
- s += f'[dim yellow]{msg}[/] '
433
+ formatted_msg += f'{obj_str} '
434
+ formatted_msg += f'[yellow]{msg}[/]'
396
435
  if obj_str and obj_after:
397
- s = f'{s}: {obj_str}'
436
+ formatted_msg += f': {obj_str}'
398
437
  if id:
399
- s += f' [italic dim white]\[{id}][/] '
400
- s = rich_to_ansi(f'[dim red]\[debug] {s}[/]')
401
- print(s)
438
+ formatted_msg += rf' [italic gray11]\[{id}][/]'
439
+
440
+ console.print(rf'[dim]\[[magenta4]DBG[/]] {formatted_msg}[/]')
402
441
 
403
442
 
404
443
  def escape_mongodb_url(url):
@@ -423,7 +462,7 @@ def print_version():
423
462
  """Print secator version information."""
424
463
  from secator.installer import get_version_info
425
464
  console.print(f'[bold gold3]Current version[/]: {VERSION}', highlight=False, end='')
426
- info = get_version_info('secator', github_handle='freelabz/secator', version=VERSION)
465
+ info = get_version_info('secator', install_github_handle='freelabz/secator', version=VERSION)
427
466
  latest_version = info['latest_version']
428
467
  status = info['status']
429
468
  location = info['location']
@@ -439,3 +478,303 @@ def print_version():
439
478
  console.print(f'[bold gold3]Lib folder[/]: {LIB_FOLDER}')
440
479
  if status == 'outdated':
441
480
  console.print('[bold red]secator is outdated, run "secator update" to install the latest version.')
481
+
482
+
483
+ def extract_domain_info(input, domain_only=False):
484
+ """Extracts domain info from a given any URL or FQDN.
485
+
486
+ Args:
487
+ input (str): An URL or FQDN.
488
+
489
+ Returns:
490
+ tldextract.ExtractResult: Extracted info.
491
+ str | None: Registered domain name or None if invalid domain (only if domain_only is set).
492
+ """
493
+ result = tldextract.extract(input)
494
+ if not result or not result.domain or not result.suffix:
495
+ return None
496
+ if domain_only:
497
+ if not validators.domain(result.registered_domain):
498
+ return None
499
+ return result.registered_domain
500
+ return result
501
+
502
+
503
+ def extract_subdomains_from_fqdn(fqdn, domain, suffix):
504
+ """Generates a list of subdomains up to the root domain from a fully qualified domain name (FQDN).
505
+
506
+ Args:
507
+ fqdn (str): The full domain name, e.g., 'console.cloud.google.com'.
508
+ domain (str): The main domain, e.g., 'google'.
509
+ suffix (str): The top-level domain (TLD), e.g., 'com'.
510
+
511
+ Returns:
512
+ List[str]: A list containing the FQDN and all its subdomains down to the root domain.
513
+ """
514
+ # Start with the full domain and prepare to break it down
515
+ parts = fqdn.split('.')
516
+
517
+ # Initialize the list of subdomains with the full domain
518
+ subdomains = [fqdn]
519
+
520
+ # Continue stripping subdomains until reaching the base domain (domain + suffix)
521
+ base_domain = f"{domain}.{suffix}"
522
+ current = fqdn
523
+
524
+ while current != base_domain:
525
+ # Remove the leftmost part of the domain
526
+ parts = parts[1:]
527
+ current = '.'.join(parts)
528
+ subdomains.append(current)
529
+
530
+ return subdomains
531
+
532
+
533
+ def match_file_by_pattern(paths, pattern, type='both'):
534
+ """Match pattern on a set of paths.
535
+
536
+ Args:
537
+ paths (iterable): An iterable of Path objects to be searched.
538
+ pattern (str): The pattern to search for in file names or directory names, supports Unix shell-style wildcards.
539
+ type (str): Specifies the type to search for; 'file', 'directory', or 'both'.
540
+
541
+ Returns:
542
+ list of Path: A list of Path objects that match the given pattern.
543
+ """
544
+ matches = []
545
+ for path in paths:
546
+ full_path = str(path.resolve())
547
+ if path.is_dir() and type in ['directory', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
548
+ matches.append(path)
549
+ elif path.is_file() and type in ['file', 'both'] and fnmatch.fnmatch(full_path, f'*{pattern}*'):
550
+ matches.append(path)
551
+
552
+ return matches
553
+
554
+
555
+ def get_file_date(file_path):
556
+ """Retrieves the last modification date of the file and returns it in a human-readable format.
557
+
558
+ Args:
559
+ file_path (Path): Path object pointing to the file.
560
+
561
+ Returns:
562
+ str: Human-readable time format.
563
+ """
564
+ # Get the last modified time of the file
565
+ mod_timestamp = file_path.stat().st_mtime
566
+ mod_date = datetime.fromtimestamp(mod_timestamp)
567
+
568
+ # Determine how to display the date based on how long ago it was modified
569
+ now = datetime.now()
570
+ if (now - mod_date).days < 7:
571
+ # If the modification was less than a week ago, use natural time
572
+ return humanize.naturaltime(now - mod_date) + mod_date.strftime(" @ %H:%m")
573
+ else:
574
+ # Otherwise, return the date in "on %B %d" format
575
+ return f"{mod_date.strftime('%B %d @ %H:%m')}"
576
+
577
+
578
+ def trim_string(s, max_length=30):
579
+ """Trims a long string to include the beginning and the end, with an ellipsis in the middle. The output string will
580
+ not exceed the specified maximum length.
581
+
582
+ Args:
583
+ s (str): The string to be trimmed.
584
+ max_length (int): The maximum allowed length of the trimmed string.
585
+
586
+ Returns:
587
+ str: The trimmed string.
588
+ """
589
+ if len(s) <= max_length:
590
+ return s # Return the original string if it's short enough
591
+
592
+ # Calculate the lengths of the start and end parts
593
+ end_length = 30 # Default end length
594
+ if max_length - end_length - 5 < 0: # 5 accounts for the length of '[...] '
595
+ end_length = max_length - 5 # Adjust end length if total max_length is too small
596
+ start_length = max_length - end_length - 5 # Subtract the space for '[...] '
597
+
598
+ # Build the trimmed string
599
+ start_part = s[:start_length]
600
+ end_part = s[-end_length:]
601
+ return f"{start_part} [...] {end_part}"
602
+
603
+
604
+ def sort_files_by_date(file_list):
605
+ """Sorts a list of file paths by their modification date.
606
+
607
+ Args:
608
+ file_list (list): A list of file paths (strings or Path objects).
609
+
610
+ Returns:
611
+ list: The list of file paths sorted by modification date.
612
+ """
613
+ file_list.sort(key=lambda x: x.stat().st_mtime)
614
+ return file_list
615
+
616
+
617
+ def traceback_as_string(exc):
618
+ """Format an exception's traceback as a readable string.
619
+
620
+ Args:
621
+ Exception: an exception.
622
+
623
+ Returns:
624
+ string: readable traceback.
625
+ """
626
+ return ' '.join(traceback.format_exception(exc, value=exc, tb=exc.__traceback__))
627
+
628
+
629
+ def should_update(update_frequency, last_updated=None, timestamp=None):
630
+ """Determine if an object should be updated based on the update frequency and the last updated UNIX timestamp.
631
+
632
+ Args:
633
+ update_frequency (int): Update frequency in seconds.
634
+ last_updated (Union[int, None]): UNIX timestamp or None if unset.
635
+ timestamp (int): Item timestamp.
636
+
637
+ Returns:
638
+ bool: Whether the object should be updated.
639
+ """
640
+ if not timestamp:
641
+ timestamp = time()
642
+ if last_updated and (timestamp - last_updated) < update_frequency:
643
+ return False
644
+ return True
645
+
646
+
647
+ def list_reports(workspace=None, type=None, timedelta=None):
648
+ """List all reports in secator reports dir.
649
+
650
+ Args:
651
+ workspace (str): Filter by workspace name.
652
+ type (str): Filter by runner type.
653
+ timedelta (None | datetime.timedelta): Keep results newer than timedelta.
654
+
655
+ Returns:
656
+ list: List all JSON reports.
657
+ """
658
+ if type and not type.endswith('s'):
659
+ type += 's'
660
+ json_reports = []
661
+ for root, _, files in os.walk(CONFIG.dirs.reports):
662
+ for file in files:
663
+ path = Path(root) / file
664
+ if not path.parts[-1] == 'report.json':
665
+ continue
666
+ if workspace and path.parts[-4] != workspace:
667
+ continue
668
+ if type and path.parts[-3] != type:
669
+ continue
670
+ if timedelta and (datetime.now() - datetime.fromtimestamp(path.stat().st_mtime)) > timedelta:
671
+ continue
672
+ json_reports.append(path)
673
+ return json_reports
674
+
675
+
676
+ def get_info_from_report_path(path):
677
+ """Get some info from the report path, like workspace, run type and id.
678
+
679
+ Args:
680
+ path (pathlib.Path): Report path.
681
+
682
+ Returns:
683
+ dict: Info dict.
684
+ """
685
+ try:
686
+ ws, runner_type, number = path.parts[-4], path.parts[-3], path.parts[-2]
687
+ workspace_path = '/'.join(path.parts[:-3])
688
+ return {
689
+ 'workspace': ws,
690
+ 'workspace_path': workspace_path,
691
+ 'type': runner_type,
692
+ 'id': number
693
+ }
694
+ except IndexError:
695
+ return {}
696
+
697
+
698
+ def human_to_timedelta(time_str):
699
+ """Convert human time to a timedelta object.
700
+
701
+ Args:
702
+ str: Time string in human format (like 2 years)
703
+
704
+ Returns:
705
+ datetime.TimeDelta: TimeDelta object.
706
+ """
707
+ if not time_str:
708
+ return None
709
+ parts = TIMEDELTA_REGEX.match(time_str)
710
+ if not parts:
711
+ return
712
+ parts = parts.groupdict()
713
+ years = int(parts.pop('years') or 0)
714
+ months = int(parts.pop('months') or 0)
715
+ days = int(parts.get('days') or 0)
716
+ days += years * 365
717
+ days += months * 30
718
+ parts['days'] = days
719
+ time_params = {}
720
+ for name, param in parts.items():
721
+ if param:
722
+ time_params[name] = int(param)
723
+ return timedelta(**time_params)
724
+
725
+
726
+ def deep_merge_dicts(*dicts):
727
+ """Recursively merges multiple dictionaries by concatenating lists and merging nested dictionaries.
728
+
729
+ Args:
730
+ dicts (tuple): A tuple of dictionary objects to merge.
731
+
732
+ Returns:
733
+ dict: A new dictionary containing merged keys and values from all input dictionaries.
734
+ """
735
+ def merge_two_dicts(dict1, dict2):
736
+ """Helper function that merges two dictionaries.
737
+
738
+ Args:
739
+ dict1 (dict): First dict.
740
+ dict2 (dict): Second dict.
741
+ Returns:
742
+ dict: Merged dict.
743
+ """
744
+ result = dict(dict1) # Create a copy of dict1 to avoid modifying it.
745
+ for key, value in dict2.items():
746
+ if key in result:
747
+ if isinstance(result[key], dict) and isinstance(value, dict):
748
+ result[key] = merge_two_dicts(result[key], value)
749
+ elif isinstance(result[key], list) and isinstance(value, list):
750
+ result[key] += value # Concatenating lists
751
+ else:
752
+ result[key] = value # Overwrite if not both lists or both dicts
753
+ else:
754
+ result[key] = value
755
+ return result
756
+
757
+ # Use reduce to apply merge_two_dicts to all dictionaries in dicts
758
+ return reduce(merge_two_dicts, dicts, {})
759
+
760
+
761
+ def process_wordlist(val):
762
+ """Pre-process wordlist option value to allow referencing wordlists from remote URLs or from config keys.
763
+
764
+ Args:
765
+ val (str): Can be a config value in CONFIG.wordlists.defaults or CONFIG.wordlists.templates, or a local path,
766
+ or a URL.
767
+ """
768
+ default_wordlist = getattr(CONFIG.wordlists.defaults, val)
769
+ if default_wordlist:
770
+ val = default_wordlist
771
+ template_wordlist = getattr(CONFIG.wordlists.templates, val)
772
+ if template_wordlist:
773
+ return template_wordlist
774
+ else:
775
+ return download_file(
776
+ val,
777
+ target_folder=CONFIG.dirs.wordlists,
778
+ offline_mode=CONFIG.offline_mode,
779
+ type='wordlist'
780
+ )