secator 0.15.1__py3-none-any.whl → 0.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (106) hide show
  1. secator/celery.py +40 -24
  2. secator/celery_signals.py +71 -68
  3. secator/celery_utils.py +43 -27
  4. secator/cli.py +520 -280
  5. secator/cli_helper.py +394 -0
  6. secator/click.py +87 -0
  7. secator/config.py +67 -39
  8. secator/configs/profiles/http_headless.yaml +6 -0
  9. secator/configs/profiles/http_record.yaml +6 -0
  10. secator/configs/profiles/tor.yaml +1 -1
  11. secator/configs/scans/domain.yaml +4 -2
  12. secator/configs/scans/host.yaml +1 -1
  13. secator/configs/scans/network.yaml +1 -4
  14. secator/configs/scans/subdomain.yaml +13 -1
  15. secator/configs/scans/url.yaml +1 -2
  16. secator/configs/workflows/cidr_recon.yaml +6 -4
  17. secator/configs/workflows/code_scan.yaml +1 -1
  18. secator/configs/workflows/host_recon.yaml +29 -3
  19. secator/configs/workflows/subdomain_recon.yaml +67 -16
  20. secator/configs/workflows/url_crawl.yaml +44 -15
  21. secator/configs/workflows/url_dirsearch.yaml +4 -4
  22. secator/configs/workflows/url_fuzz.yaml +25 -17
  23. secator/configs/workflows/url_params_fuzz.yaml +7 -0
  24. secator/configs/workflows/url_vuln.yaml +33 -8
  25. secator/configs/workflows/user_hunt.yaml +4 -2
  26. secator/configs/workflows/wordpress.yaml +5 -3
  27. secator/cve.py +718 -0
  28. secator/decorators.py +0 -454
  29. secator/definitions.py +49 -30
  30. secator/exporters/_base.py +2 -2
  31. secator/exporters/console.py +2 -2
  32. secator/exporters/table.py +4 -3
  33. secator/exporters/txt.py +1 -1
  34. secator/hooks/mongodb.py +2 -4
  35. secator/installer.py +77 -49
  36. secator/loader.py +116 -0
  37. secator/output_types/_base.py +3 -0
  38. secator/output_types/certificate.py +63 -63
  39. secator/output_types/error.py +4 -5
  40. secator/output_types/info.py +2 -2
  41. secator/output_types/ip.py +3 -1
  42. secator/output_types/progress.py +5 -9
  43. secator/output_types/state.py +17 -17
  44. secator/output_types/tag.py +3 -0
  45. secator/output_types/target.py +10 -2
  46. secator/output_types/url.py +19 -7
  47. secator/output_types/vulnerability.py +11 -7
  48. secator/output_types/warning.py +2 -2
  49. secator/report.py +27 -15
  50. secator/rich.py +18 -10
  51. secator/runners/_base.py +446 -233
  52. secator/runners/_helpers.py +133 -24
  53. secator/runners/command.py +182 -102
  54. secator/runners/scan.py +33 -5
  55. secator/runners/task.py +13 -7
  56. secator/runners/workflow.py +105 -72
  57. secator/scans/__init__.py +2 -2
  58. secator/serializers/dataclass.py +20 -20
  59. secator/tasks/__init__.py +4 -4
  60. secator/tasks/_categories.py +39 -27
  61. secator/tasks/arjun.py +9 -5
  62. secator/tasks/bbot.py +53 -21
  63. secator/tasks/bup.py +19 -5
  64. secator/tasks/cariddi.py +24 -3
  65. secator/tasks/dalfox.py +26 -7
  66. secator/tasks/dirsearch.py +10 -4
  67. secator/tasks/dnsx.py +70 -25
  68. secator/tasks/feroxbuster.py +11 -3
  69. secator/tasks/ffuf.py +42 -6
  70. secator/tasks/fping.py +20 -8
  71. secator/tasks/gau.py +3 -1
  72. secator/tasks/gf.py +3 -3
  73. secator/tasks/gitleaks.py +2 -2
  74. secator/tasks/gospider.py +7 -1
  75. secator/tasks/grype.py +5 -4
  76. secator/tasks/h8mail.py +2 -1
  77. secator/tasks/httpx.py +18 -5
  78. secator/tasks/katana.py +35 -15
  79. secator/tasks/maigret.py +4 -4
  80. secator/tasks/mapcidr.py +3 -3
  81. secator/tasks/msfconsole.py +4 -4
  82. secator/tasks/naabu.py +2 -2
  83. secator/tasks/nmap.py +12 -14
  84. secator/tasks/nuclei.py +3 -3
  85. secator/tasks/searchsploit.py +4 -5
  86. secator/tasks/subfinder.py +2 -2
  87. secator/tasks/testssl.py +264 -263
  88. secator/tasks/trivy.py +5 -5
  89. secator/tasks/wafw00f.py +21 -3
  90. secator/tasks/wpprobe.py +90 -83
  91. secator/tasks/wpscan.py +6 -5
  92. secator/template.py +218 -104
  93. secator/thread.py +15 -15
  94. secator/tree.py +196 -0
  95. secator/utils.py +131 -123
  96. secator/utils_test.py +60 -19
  97. secator/workflows/__init__.py +2 -2
  98. {secator-0.15.1.dist-info → secator-0.16.1.dist-info}/METADATA +36 -36
  99. secator-0.16.1.dist-info/RECORD +132 -0
  100. secator/configs/profiles/default.yaml +0 -8
  101. secator/configs/workflows/url_nuclei.yaml +0 -11
  102. secator/tasks/dnsxbrute.py +0 -42
  103. secator-0.15.1.dist-info/RECORD +0 -128
  104. {secator-0.15.1.dist-info → secator-0.16.1.dist-info}/WHEEL +0 -0
  105. {secator-0.15.1.dist-info → secator-0.16.1.dist-info}/entry_points.txt +0 -0
  106. {secator-0.15.1.dist-info → secator-0.16.1.dist-info}/licenses/LICENSE +0 -0
@@ -4,35 +4,59 @@ from secator.output_types import Error
4
4
  from secator.utils import deduplicate, debug
5
5
 
6
6
 
7
- def run_extractors(results, opts, inputs=[], dry_run=False):
7
+ def run_extractors(results, opts, inputs=None, ctx=None, dry_run=False):
8
8
  """Run extractors and merge extracted values with option dict.
9
9
 
10
10
  Args:
11
11
  results (list): List of results.
12
12
  opts (dict): Options.
13
13
  inputs (list): Original inputs.
14
+ ctx (dict): Context.
14
15
  dry_run (bool): Dry run.
15
16
 
16
17
  Returns:
17
18
  tuple: inputs, options, errors.
18
19
  """
20
+ if inputs is None:
21
+ inputs = []
22
+ if ctx is None:
23
+ ctx = {}
19
24
  extractors = {k: v for k, v in opts.items() if k.endswith('_')}
25
+ if dry_run:
26
+ input_extractors = {k: v for k, v in extractors.items() if k.rstrip('_') == 'targets'}
27
+ opts_extractors = {k: v for k, v in extractors.items() if k.rstrip('_') != 'targets'}
28
+ if input_extractors:
29
+ dry_inputs = [" && ".join([fmt_extractor(v) for k, val in input_extractors.items() for v in val])]
30
+ else:
31
+ dry_inputs = inputs
32
+ if opts_extractors:
33
+ dry_opts = {k.rstrip('_'): [" && ".join([fmt_extractor(v) for v in val])] for k, val in opts_extractors.items()}
34
+ else:
35
+ dry_opts = {}
36
+ inputs = dry_inputs
37
+ opts.update(dry_opts)
38
+ return inputs, opts, []
39
+
20
40
  errors = []
21
41
  computed_inputs = []
42
+ input_extractors = False
22
43
  computed_opts = {}
44
+
23
45
  for key, val in extractors.items():
24
46
  key = key.rstrip('_')
25
- values, err = extract_from_results(results, val)
47
+ ctx['key'] = key
48
+ values, err = extract_from_results(results, val, ctx=ctx)
26
49
  errors.extend(err)
27
50
  if key == 'targets':
28
- targets = ['<COMPUTED>'] if dry_run else deduplicate(values)
51
+ input_extractors = True
52
+ targets = deduplicate(values)
29
53
  computed_inputs.extend(targets)
30
54
  else:
31
- computed_opt = ['<COMPUTED>'] if dry_run else deduplicate(values)
55
+ computed_opt = deduplicate(values)
32
56
  if computed_opt:
33
57
  computed_opts[key] = computed_opt
34
58
  opts[key] = computed_opts[key]
35
- if computed_inputs:
59
+ if input_extractors:
36
60
  debug('computed_inputs', obj=computed_inputs, sub='extractors')
37
61
  inputs = computed_inputs
38
62
  if computed_opts:
@@ -40,55 +64,140 @@ def run_extractors(results, opts, inputs=[], dry_run=False):
40
64
  return inputs, opts, errors
41
65
 
42
66
 
43
- def extract_from_results(results, extractors):
67
+ def fmt_extractor(extractor):
68
+ """Format extractor.
69
+
70
+ Args:
71
+ extractor (dict / str): extractor definition.
72
+
73
+ Returns:
74
+ str: formatted extractor.
75
+ """
76
+ parsed_extractor = parse_extractor(extractor)
77
+ if not parsed_extractor:
78
+ return '<DYNAMIC[INVALID_EXTRACTOR]>'
79
+ _type, _field, _condition = parsed_extractor
80
+ s = f'{_type}.{_field}'
81
+ if _condition:
82
+ s = f'{s} if {_condition}'
83
+ return f'<DYNAMIC({s})>'
84
+
85
+
86
+ def extract_from_results(results, extractors, ctx=None):
44
87
  """Extract sub extractors from list of results dict.
45
88
 
46
89
  Args:
47
90
  results (list): List of dict.
48
91
  extractors (list): List of extractors to extract from.
92
+ ctx (dict, optional): Context.
49
93
 
50
94
  Returns:
51
95
  tuple: List of extracted results (flat), list of errors.
52
96
  """
53
- extracted_results = []
97
+ if ctx is None:
98
+ ctx = {}
99
+ all_results = []
54
100
  errors = []
101
+ key = ctx.get('key', 'unknown')
102
+ ancestor_id = ctx.get('ancestor_id', None)
55
103
  if not isinstance(extractors, list):
56
104
  extractors = [extractors]
57
105
  for extractor in extractors:
58
106
  try:
59
- extracted_results.extend(process_extractor(results, extractor))
107
+ extractor_results = process_extractor(results, extractor, ctx=ctx)
108
+ msg = f'extracted [bold]{len(extractor_results)}[/] / [bold]{len(results)}[/] for key [bold]{key}[/] with extractor [bold]{fmt_extractor(extractor)}[/]' # noqa: E501
109
+ if ancestor_id:
110
+ msg = f'{msg} ([bold]ancestor_id[/]: {ancestor_id})'
111
+ debug(msg, sub='extractors')
112
+ all_results.extend(extractor_results)
60
113
  except Exception as e:
61
114
  error = Error.from_exception(e)
62
115
  errors.append(error)
63
- return extracted_results, errors
116
+ if key == 'targets':
117
+ ctx['targets'] = all_results
118
+ return all_results, errors
64
119
 
65
120
 
66
- def process_extractor(results, extractor, ctx={}):
67
- """Process extractor.
121
+ def parse_extractor(extractor):
122
+ """Parse extractor.
68
123
 
69
124
  Args:
70
- results (list): List of results.
71
125
  extractor (dict / str): extractor definition.
72
126
 
73
127
  Returns:
74
- list: List of extracted results.
128
+ tuple|None: type, field, condition or None if invalid.
75
129
  """
76
- debug('before extract', obj={'results': results, 'extractor': extractor}, sub='extractor')
130
+ # Parse extractor, it can be a dict or a string (shortcut)
77
131
  if isinstance(extractor, dict):
78
132
  _type = extractor['type']
79
133
  _field = extractor.get('field')
80
- _condition = extractor.get('condition', 'True')
134
+ _condition = extractor.get('condition')
81
135
  else:
82
- _type, _field = tuple(extractor.split('.'))
83
- _condition = 'True'
84
- items = [
85
- item for item in results if item._type == _type and eval(_condition)
86
- ]
136
+ parts = tuple(extractor.split('.'))
137
+ if len(parts) == 2:
138
+ _type = parts[0]
139
+ _field = parts[1]
140
+ _condition = None
141
+ else:
142
+ return None
143
+ return _type, _field, _condition
144
+
145
+
146
+ def process_extractor(results, extractor, ctx=None):
147
+ """Process extractor.
148
+
149
+ Args:
150
+ results (list): List of results.
151
+ extractor (dict / str): extractor definition.
152
+
153
+ Returns:
154
+ list: List of extracted results.
155
+ """
156
+ if ctx is None:
157
+ ctx = {}
158
+ # debug('before extract', obj={'results_count': len(results), 'extractor': extractor, 'key': ctx.get('key')}, sub='extractor') # noqa: E501
159
+ ancestor_id = ctx.get('ancestor_id')
160
+ key = ctx.get('key')
161
+
162
+ # Parse extractor, it can be a dict or a string (shortcut)
163
+ parsed_extractor = parse_extractor(extractor)
164
+ if not parsed_extractor:
165
+ return results
166
+ _type, _field, _condition = parsed_extractor
167
+
168
+ # Evaluate condition for each result
169
+ if _condition:
170
+ tmp_results = []
171
+ if ancestor_id:
172
+ _condition = _condition + f' and item._context.get("ancestor_id") == "{str(ancestor_id)}"'
173
+ for item in results:
174
+ if item._type != _type:
175
+ continue
176
+ ctx['item'] = item
177
+ ctx[f'{_type}'] = item
178
+ safe_globals = {'__builtins__': {'len': len}}
179
+ eval_result = eval(_condition, safe_globals, ctx)
180
+ if eval_result:
181
+ tmp_results.append(item)
182
+ del ctx['item']
183
+ del ctx[f'{_type}']
184
+ # debug(f'kept {len(tmp_results)} / {len(results)} items after condition [bold]{_condition}[/bold]', sub='extractor') # noqa: E501
185
+ results = tmp_results
186
+ else:
187
+ results = [item for item in results if item._type == _type]
188
+ if ancestor_id:
189
+ results = [item for item in results if item._context.get('ancestor_id') == ancestor_id]
190
+
191
+ results_str = "\n".join([f'{repr(item)} [{str(item._context.get("ancestor_id", ""))}]' for item in results])
192
+ debug(f'extracted results ([bold]ancestor_id[/]: {ancestor_id}, [bold]key[/]: {key}):\n{results_str}', sub='extractor')
193
+
194
+ # Format field if needed
87
195
  if _field:
88
- _field = '{' + _field + '}' if not _field.startswith('{') else _field
89
- items = [_field.format(**item.toDict()) for item in items]
90
- debug('after extract', obj={'items': items}, sub='extractor')
91
- return items
196
+ already_formatted = '{' in _field and '}' in _field
197
+ _field = '{' + _field + '}' if not already_formatted else _field
198
+ results = [_field.format(**item.toDict()) for item in results]
199
+ # debug('after extract', obj={'results_count': len(results), 'key': ctx.get('key')}, sub='extractor')
200
+ return results
92
201
 
93
202
 
94
203
  def get_task_folder_id(path):