secator 0.15.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (106) hide show
  1. secator/celery.py +40 -24
  2. secator/celery_signals.py +71 -68
  3. secator/celery_utils.py +43 -27
  4. secator/cli.py +520 -280
  5. secator/cli_helper.py +394 -0
  6. secator/click.py +87 -0
  7. secator/config.py +67 -39
  8. secator/configs/profiles/http_headless.yaml +6 -0
  9. secator/configs/profiles/http_record.yaml +6 -0
  10. secator/configs/profiles/tor.yaml +1 -1
  11. secator/configs/scans/domain.yaml +4 -2
  12. secator/configs/scans/host.yaml +1 -1
  13. secator/configs/scans/network.yaml +1 -4
  14. secator/configs/scans/subdomain.yaml +13 -1
  15. secator/configs/scans/url.yaml +1 -2
  16. secator/configs/workflows/cidr_recon.yaml +6 -4
  17. secator/configs/workflows/code_scan.yaml +1 -1
  18. secator/configs/workflows/host_recon.yaml +29 -3
  19. secator/configs/workflows/subdomain_recon.yaml +67 -16
  20. secator/configs/workflows/url_crawl.yaml +44 -15
  21. secator/configs/workflows/url_dirsearch.yaml +4 -4
  22. secator/configs/workflows/url_fuzz.yaml +25 -17
  23. secator/configs/workflows/url_params_fuzz.yaml +7 -0
  24. secator/configs/workflows/url_vuln.yaml +33 -8
  25. secator/configs/workflows/user_hunt.yaml +2 -1
  26. secator/configs/workflows/wordpress.yaml +5 -3
  27. secator/cve.py +718 -0
  28. secator/decorators.py +0 -454
  29. secator/definitions.py +49 -30
  30. secator/exporters/_base.py +2 -2
  31. secator/exporters/console.py +2 -2
  32. secator/exporters/table.py +4 -3
  33. secator/exporters/txt.py +1 -1
  34. secator/hooks/mongodb.py +2 -4
  35. secator/installer.py +77 -49
  36. secator/loader.py +116 -0
  37. secator/output_types/_base.py +3 -0
  38. secator/output_types/certificate.py +63 -63
  39. secator/output_types/error.py +4 -5
  40. secator/output_types/info.py +2 -2
  41. secator/output_types/ip.py +3 -1
  42. secator/output_types/progress.py +5 -9
  43. secator/output_types/state.py +17 -17
  44. secator/output_types/tag.py +3 -0
  45. secator/output_types/target.py +10 -2
  46. secator/output_types/url.py +19 -7
  47. secator/output_types/vulnerability.py +11 -7
  48. secator/output_types/warning.py +2 -2
  49. secator/report.py +27 -15
  50. secator/rich.py +18 -10
  51. secator/runners/_base.py +447 -234
  52. secator/runners/_helpers.py +133 -24
  53. secator/runners/command.py +182 -102
  54. secator/runners/scan.py +33 -5
  55. secator/runners/task.py +13 -7
  56. secator/runners/workflow.py +105 -72
  57. secator/scans/__init__.py +2 -2
  58. secator/serializers/dataclass.py +20 -20
  59. secator/tasks/__init__.py +4 -4
  60. secator/tasks/_categories.py +39 -27
  61. secator/tasks/arjun.py +9 -5
  62. secator/tasks/bbot.py +53 -21
  63. secator/tasks/bup.py +19 -5
  64. secator/tasks/cariddi.py +24 -3
  65. secator/tasks/dalfox.py +26 -7
  66. secator/tasks/dirsearch.py +10 -4
  67. secator/tasks/dnsx.py +70 -25
  68. secator/tasks/feroxbuster.py +11 -3
  69. secator/tasks/ffuf.py +42 -6
  70. secator/tasks/fping.py +20 -8
  71. secator/tasks/gau.py +3 -1
  72. secator/tasks/gf.py +5 -4
  73. secator/tasks/gitleaks.py +2 -2
  74. secator/tasks/gospider.py +7 -1
  75. secator/tasks/grype.py +5 -4
  76. secator/tasks/h8mail.py +2 -1
  77. secator/tasks/httpx.py +18 -5
  78. secator/tasks/katana.py +35 -15
  79. secator/tasks/maigret.py +4 -4
  80. secator/tasks/mapcidr.py +3 -3
  81. secator/tasks/msfconsole.py +4 -4
  82. secator/tasks/naabu.py +5 -4
  83. secator/tasks/nmap.py +12 -14
  84. secator/tasks/nuclei.py +3 -3
  85. secator/tasks/searchsploit.py +6 -5
  86. secator/tasks/subfinder.py +2 -2
  87. secator/tasks/testssl.py +264 -263
  88. secator/tasks/trivy.py +5 -5
  89. secator/tasks/wafw00f.py +21 -3
  90. secator/tasks/wpprobe.py +90 -83
  91. secator/tasks/wpscan.py +6 -5
  92. secator/template.py +218 -104
  93. secator/thread.py +15 -15
  94. secator/tree.py +196 -0
  95. secator/utils.py +131 -123
  96. secator/utils_test.py +60 -19
  97. secator/workflows/__init__.py +2 -2
  98. {secator-0.15.0.dist-info → secator-0.16.0.dist-info}/METADATA +37 -36
  99. secator-0.16.0.dist-info/RECORD +132 -0
  100. secator/configs/profiles/default.yaml +0 -8
  101. secator/configs/workflows/url_nuclei.yaml +0 -11
  102. secator/tasks/dnsxbrute.py +0 -42
  103. secator-0.15.0.dist-info/RECORD +0 -128
  104. {secator-0.15.0.dist-info → secator-0.16.0.dist-info}/WHEEL +0 -0
  105. {secator-0.15.0.dist-info → secator-0.16.0.dist-info}/entry_points.txt +0 -0
  106. {secator-0.15.0.dist-info → secator-0.16.0.dist-info}/licenses/LICENSE +0 -0
secator/tasks/bbot.py CHANGED
@@ -1,3 +1,4 @@
1
+ import re
1
2
  import shutil
2
3
 
3
4
  from secator.config import CONFIG
@@ -5,7 +6,7 @@ from secator.decorators import task
5
6
  from secator.definitions import FILENAME, HOST, IP, ORG_NAME, PORT, URL, USERNAME
6
7
  from secator.runners import Command
7
8
  from secator.serializers import RegexSerializer
8
- from secator.output_types import Vulnerability, Port, Url, Record, Ip, Tag, Info, Error
9
+ from secator.output_types import Vulnerability, Port, Url, Record, Ip, Tag, Info, Error, UserAccount, Warning
9
10
  from secator.serializers import JSONSerializer
10
11
 
11
12
 
@@ -151,12 +152,16 @@ BBOT_MAP_TYPES = {
151
152
  'PROTOCOL': Port,
152
153
  'OPEN_TCP_PORT': Port,
153
154
  'URL': Url,
154
- 'TECHNOLOGY': Tag,
155
+ 'URL_HINT': Url,
155
156
  'ASN': Record,
156
157
  'DNS_NAME': Record,
157
158
  'WEBSCREENSHOT': Url,
158
159
  'VULNERABILITY': Vulnerability,
159
- 'FINDING': Tag
160
+ 'EMAIL_ADDRESS': UserAccount,
161
+ 'FINDING': Tag,
162
+ 'AZURE_TENANT': Tag,
163
+ 'STORAGE_BUCKET': Tag,
164
+ 'TECHNOLOGY': Tag,
160
165
  }
161
166
  BBOT_DESCRIPTION_REGEX = RegexSerializer(
162
167
  regex=r'(?P<name>[\w ]+): \[(?P<value>[^\[\]]+)\]',
@@ -178,10 +183,11 @@ def output_discriminator(self, item):
178
183
  class bbot(Command):
179
184
  """Multipurpose scanner."""
180
185
  cmd = 'bbot -y --allow-deadly --force'
186
+ input_types = [HOST, IP, URL, PORT, ORG_NAME, USERNAME, FILENAME]
187
+ output_types = [Vulnerability, Port, Url, Record, Ip]
181
188
  tags = ['vuln', 'scan']
182
189
  json_flag = '--json'
183
190
  input_flag = '-t'
184
- input_types = [HOST, IP, URL, PORT, ORG_NAME, USERNAME, FILENAME]
185
191
  file_flag = None
186
192
  version_flag = '--help'
187
193
  opts = {
@@ -198,7 +204,6 @@ class bbot(Command):
198
204
  'presets': lambda x: ' '.join(x.split(','))
199
205
  }
200
206
  item_loaders = [JSONSerializer()]
201
- output_types = [Vulnerability, Port, Url, Record, Ip]
202
207
  output_discriminator = output_discriminator
203
208
  output_map = {
204
209
  Ip: {
@@ -209,7 +214,7 @@ class bbot(Command):
209
214
  },
210
215
  Tag: {
211
216
  'name': 'name',
212
- 'match': lambda x: x['data'].get('url') or x['data'].get('host'),
217
+ 'match': lambda x: x['data'].get('url') or x['data'].get('host') or '',
213
218
  'extra_data': 'extra_data',
214
219
  '_source': lambda x: 'bbot-' + x['module']
215
220
  },
@@ -233,8 +238,9 @@ class bbot(Command):
233
238
  },
234
239
  Vulnerability: {
235
240
  'name': 'name',
236
- 'match': lambda x: x['data'].get('url') or x['data']['host'],
241
+ 'matched_at': lambda x: x['data'].get('url') or x['data'].get('host') or '',
237
242
  'extra_data': 'extra_data',
243
+ 'confidence': 'high',
238
244
  'severity': lambda x: x['data']['severity'].lower()
239
245
  },
240
246
  Record: {
@@ -244,6 +250,12 @@ class bbot(Command):
244
250
  },
245
251
  Error: {
246
252
  'message': 'message'
253
+ },
254
+ UserAccount: {
255
+ 'username': lambda x: x['data'].split('@')[0],
256
+ 'email': 'data',
257
+ 'site_name': 'host',
258
+ 'extra_data': 'extra_data',
247
259
  }
248
260
  }
249
261
  install_pre = {
@@ -270,7 +282,8 @@ class bbot(Command):
270
282
  return
271
283
 
272
284
  if _type not in BBOT_MAP_TYPES:
273
- self._print(f'[bold orange3]Found unsupported bbot type: {_type}.[/] [bold green]Skipping.[/]', rich=True)
285
+ yield Warning(message=f'Found unsupported bbot type: {_type}. Skipping.')
286
+ self.debug(f'Found unsupported bbot type: {item}')
274
287
  return
275
288
 
276
289
  if isinstance(item['data'], str):
@@ -279,23 +292,37 @@ class bbot(Command):
279
292
  return
280
293
 
281
294
  item['extra_data'] = item['data']
295
+ if self.scan_config:
296
+ modules = self.scan_config.get('preset', {}).get('modules', [])
297
+ item['extra_data']['bbot_modules'] = modules
282
298
 
283
299
  # Parse bbot description into extra_data
284
300
  description = item['data'].get('description')
285
301
  if description:
286
- del item['data']['description']
287
- match = BBOT_DESCRIPTION_REGEX.run(description)
288
- for chunk in match:
289
- key, val = tuple([c.strip() for c in chunk])
290
- if ',' in val:
291
- val = val.split(',')
292
- key = '_'.join(key.split(' ')).lower()
293
- item['extra_data'][key] = val
302
+ parts = description.split(':')
303
+ if len(parts) == 2:
304
+ description = parts[0].strip()
305
+ match = list(BBOT_DESCRIPTION_REGEX.run(description))
306
+ if match:
307
+ del item['data']['description']
308
+ for chunk in match:
309
+ key, val = tuple([c.strip() for c in chunk])
310
+ if ',' in val:
311
+ val = val.split(',')
312
+ key = '_'.join(key.split(' ')).lower()
313
+ item['extra_data'][key] = val
314
+ description = re.split(r'\s*(\(|\.|Detected.)', description.strip(), 1)[0].rstrip()
294
315
 
295
- # Set technology as name for Tag
296
- if item['type'] == 'TECHNOLOGY':
297
- item['name'] = item['data']['technology']
298
- del item['data']['technology']
316
+ # Set tag name for objects mapping Tag
317
+ if item['type'] in ['AZURE_TENANT', 'STORAGE_BUCKET', 'TECHNOLOGY']:
318
+ item['name'] = ' '.join(item['type'].split('_')).lower().title()
319
+ keys = ['technology', 'tenant-names', 'url']
320
+ info = next((item['data'].get(key) for key in keys if key in item['data']))
321
+ if info:
322
+ item['extra_data']['info'] = info
323
+ for key in keys:
324
+ if key in item['data']:
325
+ del item['data'][key]
299
326
 
300
327
  # If 'name' key is present in 'data', set it as name
301
328
  elif 'name' in item['data'].keys():
@@ -307,6 +334,11 @@ class bbot(Command):
307
334
  item['name'] = item['extra_data']['name']
308
335
  del item['extra_data']['name']
309
336
 
337
+ # If 'description' key is present in 'data', set it as name
338
+ elif description:
339
+ item['name'] = description
340
+ del item['data']['description']
341
+
310
342
  # If 'discovery_context' and no name set yet, set it as name
311
343
  else:
312
344
  item['name'] = item['discovery_context']
@@ -318,7 +350,7 @@ class bbot(Command):
318
350
  name = path.as_posix().split('/')[-1]
319
351
  secator_path = f'{self.reports_folder}/.outputs/{name}'
320
352
  yield Info(f'Copying screenshot {path} to {secator_path}')
321
- shutil.copy(path, secator_path)
353
+ shutil.copyfile(path, secator_path)
322
354
  item['data']['path'] = secator_path
323
355
 
324
356
  yield item
secator/tasks/bup.py CHANGED
@@ -16,15 +16,16 @@ from secator.tasks._categories import Http
16
16
  class bup(Http):
17
17
  """40X bypasser."""
18
18
  cmd = 'bup'
19
+ input_types = [URL]
20
+ output_types = [Url, Progress]
19
21
  tags = ['url', 'bypass']
20
22
  input_flag = '-u'
21
- input_types = [URL]
22
23
  json_flag = '--jsonl'
23
24
  opt_prefix = '--'
24
25
  opts = {
25
26
  'spoofport': {'type': int, 'short': 'sp', 'help': 'Port(s) to inject in port-specific headers'},
26
27
  'spoofip': {'type': str, 'short': 'si', 'help': 'IP(s) to inject in ip-specific headers'},
27
- 'mode': {'type': str, 'help': 'Bypass modes.'},
28
+ 'mode': {'type': str, 'help': 'Bypass modes (comma-delimited) amongst: all, mid_paths, end_paths, case_substitution, char_encode, http_methods, http_versions, http_headers_method, http_headers_scheme, http_headers_ip, http_headers_port, http_headers_url, user_agent'}, # noqa: E501
28
29
  }
29
30
  opt_key_map = {
30
31
  HEADER: 'header',
@@ -48,12 +49,12 @@ class bup(Http):
48
49
  PROXY: 'proxy',
49
50
  }
50
51
  item_loaders = [JSONSerializer()]
51
- output_types = [Url, Progress]
52
52
  output_map = {
53
53
  Url: {
54
54
  'url': 'request_url',
55
55
  'method': lambda x: bup.method_extractor(x),
56
- 'headers': lambda x: bup.headers_extractor(x),
56
+ 'request_headers': lambda x: bup.request_headers_extractor(x),
57
+ 'response_headers': lambda x: bup.response_headers_extractor(x),
57
58
  'status_code': 'response_status_code',
58
59
  'content_type': 'response_content_type',
59
60
  'content_length': 'response_content_length',
@@ -90,7 +91,20 @@ class bup(Http):
90
91
  return 'GET'
91
92
 
92
93
  @staticmethod
93
- def headers_extractor(item):
94
+ def request_headers_extractor(item):
95
+ headers = {}
96
+ match1 = list(re.finditer(r'-H\s*\'?([^\']*)\'?', str(item['request_curl_payload'])))
97
+ match2 = list(re.finditer(r'-H\s*\'?([^\']*)\"?', str(item['request_curl_cmd'])))
98
+ matches = match1
99
+ matches.extend(match2)
100
+ for match in matches:
101
+ header = match.group(1).split(':', 1)
102
+ if len(header) == 2:
103
+ headers[header[0].strip()] = header[1].strip()
104
+ return headers
105
+
106
+ @staticmethod
107
+ def response_headers_extractor(item):
94
108
  headers_list = item['response_headers'].split('\n')[1:]
95
109
  headers = {}
96
110
  for header in headers_list:
secator/tasks/cariddi.py CHANGED
@@ -1,3 +1,4 @@
1
+ import re
1
2
  from secator.decorators import task
2
3
  from secator.definitions import (DELAY, DEPTH, FILTER_CODES, FILTER_REGEX,
3
4
  FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT,
@@ -9,15 +10,28 @@ from secator.output_types import Tag, Url
9
10
  from secator.serializers import JSONSerializer
10
11
  from secator.tasks._categories import HttpCrawler
11
12
 
13
+ CARIDDI_IGNORE_PATTERNS = re.compile(r"|".join([
14
+ r"<!--\s*Instance.*\s*-->",
15
+ r"<!--\s*(Styles|Scripts|Fonts|Images|Links|Forms|Inputs|Buttons|List|Next|Prev|Navigation dots)\s*-->",
16
+ r"<!--\s*end.*-->",
17
+ r"<!--\s*start.*-->",
18
+ r"<!--\s*begin.*-->",
19
+ r"<!--\s*here goes.*-->",
20
+ r"<!--\s*.*Yoast SEO.*\s*-->",
21
+ r"<!--\s*.*Google Analytics.*\s*-->",
22
+ ]), re.IGNORECASE)
23
+
24
+ CARIDDI_IGNORE_LIST = ['BTC address']
25
+
12
26
 
13
27
  @task()
14
28
  class cariddi(HttpCrawler):
15
29
  """Crawl endpoints, secrets, api keys, extensions, tokens..."""
16
30
  cmd = 'cariddi'
17
- tags = ['url', 'crawl']
18
31
  input_types = [URL]
19
- input_flag = OPT_PIPE_INPUT
20
32
  output_types = [Url, Tag]
33
+ tags = ['url', 'crawl']
34
+ input_flag = OPT_PIPE_INPUT
21
35
  file_flag = OPT_PIPE_INPUT
22
36
  json_flag = '-json'
23
37
  opts = {
@@ -27,6 +41,9 @@ class cariddi(HttpCrawler):
27
41
  'juicy_extensions': {'type': int, 'short': 'jext', 'help': 'Hunt for juicy file extensions. Integer from 1(juicy) to 7(not juicy)'}, # noqa: E501
28
42
  'juicy_endpoints': {'is_flag': True, 'short': 'jep', 'help': 'Hunt for juicy endpoints.'}
29
43
  }
44
+ opt_value_map = {
45
+ HEADER: lambda headers: headers
46
+ }
30
47
  opt_key_map = {
31
48
  HEADER: 'headers',
32
49
  DELAY: 'd',
@@ -65,7 +82,10 @@ class cariddi(HttpCrawler):
65
82
  @staticmethod
66
83
  def on_json_loaded(self, item):
67
84
  url_item = {k: v for k, v in item.items() if k != 'matches'}
85
+ url_item['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
68
86
  yield Url(**url_item)
87
+
88
+ # Get matches, params, errors, secrets, infos
69
89
  url = url_item[URL]
70
90
  matches = item.get('matches', {})
71
91
  params = matches.get('parameters', [])
@@ -96,10 +116,11 @@ class cariddi(HttpCrawler):
96
116
  yield Tag(**secret)
97
117
 
98
118
  for info in infos:
99
- CARIDDI_IGNORE_LIST = ['BTC address'] # TODO: make this a config option
100
119
  if info['name'] in CARIDDI_IGNORE_LIST:
101
120
  continue
102
121
  match = info['match']
122
+ if CARIDDI_IGNORE_PATTERNS.match(match):
123
+ continue
103
124
  info['extra_data'] = {'info': match, 'source': 'body'}
104
125
  info['match'] = url
105
126
  yield Tag(**info)
secator/tasks/dalfox.py CHANGED
@@ -4,9 +4,9 @@ from secator.decorators import task
4
4
  from secator.definitions import (CONFIDENCE, DELAY, EXTRA_DATA, FOLLOW_REDIRECT,
5
5
  HEADER, ID, MATCHED_AT, METHOD, NAME,
6
6
  OPT_NOT_SUPPORTED, PROVIDER, PROXY, RATE_LIMIT,
7
- SEVERITY, TAGS, THREADS, TIMEOUT, URL,
7
+ RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, URL,
8
8
  USER_AGENT)
9
- from secator.output_types import Vulnerability
9
+ from secator.output_types import Vulnerability, Url
10
10
  from secator.serializers import JSONSerializer
11
11
  from secator.tasks._categories import VulnHttp
12
12
 
@@ -21,8 +21,9 @@ DALFOX_TYPE_MAP = {
21
21
  class dalfox(VulnHttp):
22
22
  """Powerful open source XSS scanning tool."""
23
23
  cmd = 'dalfox'
24
- tags = ['url', 'fuzz']
25
24
  input_types = [URL]
25
+ output_types = [Vulnerability, Url]
26
+ tags = ['url', 'fuzz']
26
27
  input_flag = 'url'
27
28
  file_flag = 'file'
28
29
  # input_chunk_size = 1
@@ -36,6 +37,7 @@ class dalfox(VulnHttp):
36
37
  METHOD: 'method',
37
38
  PROXY: 'proxy',
38
39
  RATE_LIMIT: OPT_NOT_SUPPORTED,
40
+ RETRIES: OPT_NOT_SUPPORTED,
39
41
  THREADS: 'worker',
40
42
  TIMEOUT: 'timeout',
41
43
  USER_AGENT: 'user-agent'
@@ -49,10 +51,7 @@ class dalfox(VulnHttp):
49
51
  TAGS: lambda x: [x['cwe']] if x['cwe'] else [],
50
52
  CONFIDENCE: lambda x: 'high',
51
53
  MATCHED_AT: lambda x: urlparse(x['data'])._replace(query='').geturl(),
52
- EXTRA_DATA: lambda x: {
53
- k: v for k, v in x.items()
54
- if k not in ['type', 'severity', 'cwe']
55
- },
54
+ EXTRA_DATA: lambda x: dalfox.extra_data_extractor(x),
56
55
  SEVERITY: lambda x: x['severity'].lower()
57
56
  }
58
57
  }
@@ -70,3 +69,23 @@ class dalfox(VulnHttp):
70
69
  def on_line(self, line):
71
70
  line = line.rstrip(',')
72
71
  return line
72
+
73
+ @staticmethod
74
+ def on_json_loaded(self, item):
75
+ if item.get('type', '') == 'V':
76
+ item['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
77
+ yield Url(
78
+ url=item['data'],
79
+ method=item['method'],
80
+ request_headers=item['request_headers'],
81
+ extra_data={k: v for k, v in item.items() if k not in ['type', 'severity', 'cwe', 'request_headers', 'method', 'data']} # noqa: E501
82
+ )
83
+ yield item
84
+
85
+ @staticmethod
86
+ def extra_data_extractor(item):
87
+ extra_data = {}
88
+ for key, value in item.items():
89
+ if key not in ['type', 'severity', 'cwe']:
90
+ extra_data[key] = value
91
+ return extra_data
@@ -3,7 +3,7 @@ import os
3
3
  import yaml
4
4
 
5
5
  from secator.decorators import task
6
- from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, DELAY, DEPTH,
6
+ from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, DATA, DELAY, DEPTH,
7
7
  FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
8
8
  FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
9
9
  MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
@@ -18,8 +18,9 @@ from secator.tasks._categories import HttpFuzzer
18
18
  class dirsearch(HttpFuzzer):
19
19
  """Advanced web path brute-forcer."""
20
20
  cmd = 'dirsearch'
21
- tags = ['url', 'fuzz']
22
21
  input_types = [URL]
22
+ output_types = [Url]
23
+ tags = ['url', 'fuzz']
23
24
  input_flag = '-u'
24
25
  file_flag = '-l'
25
26
  json_flag = '-O json'
@@ -27,6 +28,7 @@ class dirsearch(HttpFuzzer):
27
28
  encoding = 'ansi'
28
29
  opt_key_map = {
29
30
  HEADER: 'header',
31
+ DATA: 'data',
30
32
  DELAY: 'delay',
31
33
  DEPTH: 'max-recursion-depth',
32
34
  FILTER_CODES: 'exclude-status',
@@ -51,10 +53,12 @@ class dirsearch(HttpFuzzer):
51
53
  Url: {
52
54
  CONTENT_LENGTH: 'content-length',
53
55
  CONTENT_TYPE: 'content-type',
54
- STATUS_CODE: 'status'
56
+ STATUS_CODE: 'status',
57
+ 'request_headers': 'request_headers'
55
58
  }
56
59
  }
57
60
  install_cmd = 'pipx install git+https://github.com/maurosoria/dirsearch.git --force'
61
+ install_version = '0.4.3'
58
62
  proxychains = True
59
63
  proxy_socks5 = True
60
64
  proxy_http = True
@@ -76,4 +80,6 @@ class dirsearch(HttpFuzzer):
76
80
  yield Info(message=f'JSON results saved to {self.output_path}')
77
81
  with open(self.output_path, 'r') as f:
78
82
  results = yaml.safe_load(f.read()).get('results', [])
79
- yield from results
83
+ for result in results:
84
+ result['request_headers'] = self.get_opt_value(HEADER, preprocess=True)
85
+ yield result
secator/tasks/dnsx.py CHANGED
@@ -1,10 +1,13 @@
1
+ import validators
2
+
1
3
  from secator.decorators import task
2
- from secator.definitions import (HOST, OPT_PIPE_INPUT, RATE_LIMIT, RETRIES, THREADS)
3
- from secator.output_types import Record, Ip, Subdomain
4
+ from secator.definitions import (HOST, CIDR_RANGE, DELAY, IP, OPT_PIPE_INPUT, PROXY,
5
+ RATE_LIMIT, RETRIES, THREADS, TIMEOUT, WORDLIST, OPT_NOT_SUPPORTED)
6
+ from secator.output_types import Record, Ip, Subdomain, Error, Warning
4
7
  from secator.output_types.ip import IpProtocol
5
8
  from secator.tasks._categories import ReconDns
6
9
  from secator.serializers import JSONSerializer
7
- from secator.utils import extract_domain_info
10
+ from secator.utils import extract_domain_info, process_wordlist
8
11
 
9
12
 
10
13
  @task()
@@ -12,20 +15,26 @@ class dnsx(ReconDns):
12
15
  """dnsx is a fast and multi-purpose DNS toolkit designed for running various retryabledns library."""
13
16
  cmd = 'dnsx -resp -recon'
14
17
  tags = ['dns', 'fuzz']
18
+ input_types = [HOST, CIDR_RANGE, IP]
19
+ output_types = [Record, Ip, Subdomain]
15
20
  json_flag = '-json'
16
21
  input_flag = OPT_PIPE_INPUT
17
- input_types = [HOST]
18
22
  file_flag = OPT_PIPE_INPUT
19
- output_types = [Record, Ip, Subdomain]
20
23
  opt_key_map = {
21
24
  RATE_LIMIT: 'rate-limit',
22
25
  RETRIES: 'retry',
23
26
  THREADS: 'threads',
27
+ PROXY: 'proxy',
28
+ DELAY: OPT_NOT_SUPPORTED,
29
+ TIMEOUT: OPT_NOT_SUPPORTED,
24
30
  }
25
31
  opts = {
26
32
  'trace': {'is_flag': True, 'default': False, 'help': 'Perform dns tracing'},
27
33
  'resolver': {'type': str, 'short': 'r', 'help': 'List of resolvers to use (file or comma separated)'},
28
34
  'wildcard_domain': {'type': str, 'short': 'wd', 'help': 'Domain name for wildcard filtering'},
35
+ 'rc': {'type': str, 'short': 'rc', 'help': 'DNS return code to filter (noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, xrrset, notauth, notzone)'}, # noqa: E501
36
+ 'subdomains_only': {'is_flag': True, 'short': 'so', 'default': False, 'internal': True, 'help': 'Only return subdomains'}, # noqa: E501
37
+ WORDLIST: {'type': str, 'short': 'w', 'default': None, 'process': process_wordlist, 'help': 'Wordlist to use'}, # noqa: E501
29
38
  }
30
39
  item_loaders = [JSONSerializer()]
31
40
  install_version = 'v1.2.2'
@@ -34,46 +43,82 @@ class dnsx(ReconDns):
34
43
  profile = 'io'
35
44
 
36
45
  @staticmethod
37
- def on_json_loaded(self, item):
38
- # Show full DNS response
39
- quiet = self.get_opt_value('quiet')
40
- if not quiet:
41
- all = item['all']
42
- for line in all:
43
- yield line
44
- yield '\n'
46
+ def before_init(self):
47
+ if self.get_opt_value('wordlist'):
48
+ self.file_flag = '-d'
49
+ self.input_flag = '-d'
50
+ rc = self.get_opt_value('rc')
51
+ if not rc:
52
+ self.cmd += ' -rc noerror'
53
+ if len(self.inputs) > 1 and self.get_opt_value('wildcard_domain'):
54
+ fqdn = extract_domain_info(self.inputs[0], domain_only=True)
55
+ for input in self.inputs[1:]:
56
+ fqdn_item = extract_domain_info(input, domain_only=True)
57
+ if fqdn_item != fqdn:
58
+ return Error('Wildcard domain is not supported when using multiple hosts with different FQDNs !')
45
59
 
46
- # Loop through record types and yield records
60
+ @staticmethod
61
+ def on_json_loaded(self, item):
47
62
  record_types = ['a', 'aaaa', 'cname', 'mx', 'ns', 'txt', 'srv', 'ptr', 'soa', 'axfr', 'caa']
48
63
  host = item['host']
64
+ status_code = item.get('status_code')
65
+ if host.startswith('*'):
66
+ yield Warning(f'Wildcard domain detected: {host}. Ignore previous results.')
67
+ self.stop_process(exit_ok=True)
68
+ return
69
+ is_ip = validators.ipv4(host) or validators.ipv6(host)
70
+ if status_code and status_code == 'NOERROR' and not is_ip:
71
+ yield Subdomain(
72
+ host=host,
73
+ domain=extract_domain_info(host, domain_only=True),
74
+ sources=['dns']
75
+ )
76
+ if self.get_opt_value('subdomains_only'):
77
+ return
49
78
  for _type in record_types:
50
79
  values = item.get(_type, [])
80
+ if isinstance(values, dict):
81
+ values = [values]
51
82
  for value in values:
52
83
  name = value
53
84
  extra_data = {}
54
85
  if isinstance(value, dict):
55
- name = value['name']
56
- extra_data = {k: v for k, v in value.items() if k != 'name'}
86
+ name = value.get('name', host)
87
+ extra_data = {k: v for k, v in value.items() if k != 'name' and k != 'host'}
57
88
  if _type == 'a':
58
- yield Ip(
89
+ ip = Ip(
59
90
  host=host,
60
91
  ip=name,
61
- protocol=IpProtocol.IPv4
92
+ protocol=IpProtocol.IPv4,
93
+ alive=False
62
94
  )
95
+ if ip not in self.results:
96
+ yield ip
63
97
  elif _type == 'aaaa':
64
- yield Ip(
98
+ ip = Ip(
65
99
  host=host,
66
100
  ip=name,
67
- protocol=IpProtocol.IPv6
101
+ protocol=IpProtocol.IPv6,
102
+ alive=False
68
103
  )
104
+ if ip not in self.results:
105
+ yield ip
69
106
  elif _type == 'ptr':
70
- yield Subdomain(
71
- host=name,
72
- domain=extract_domain_info(name, domain_only=True)
107
+ ip = Ip(
108
+ host=host,
109
+ ip=name,
110
+ protocol=IpProtocol.IPv4,
111
+ alive=False
73
112
  )
74
- yield Record(
113
+ if ip not in self.results:
114
+ yield ip
115
+ record = Record(
75
116
  host=host,
76
117
  name=name,
77
118
  type=_type.upper(),
78
- extra_data=extra_data
119
+ extra_data=extra_data,
120
+ _source=self.unique_name
79
121
  )
122
+
123
+ if record not in self.results:
124
+ yield record
@@ -1,6 +1,6 @@
1
1
  from secator.config import CONFIG
2
2
  from secator.decorators import task
3
- from secator.definitions import (CONTENT_TYPE, DELAY, DEPTH, FILTER_CODES,
3
+ from secator.definitions import (CONTENT_TYPE, DATA, DELAY, DEPTH, FILTER_CODES,
4
4
  FILTER_REGEX, FILTER_SIZE, FILTER_WORDS,
5
5
  FOLLOW_REDIRECT, HEADER, LINES, MATCH_CODES,
6
6
  MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD,
@@ -16,8 +16,9 @@ from secator.tasks._categories import HttpFuzzer
16
16
  class feroxbuster(HttpFuzzer):
17
17
  """Simple, fast, recursive content discovery tool written in Rust"""
18
18
  cmd = 'feroxbuster --auto-bail --no-state'
19
- tags = ['url', 'fuzz']
20
19
  input_types = [URL]
20
+ output_types = [Url]
21
+ tags = ['url', 'fuzz']
21
22
  input_flag = '--url'
22
23
  input_chunk_size = 1
23
24
  file_flag = OPT_PIPE_INPUT
@@ -32,6 +33,7 @@ class feroxbuster(HttpFuzzer):
32
33
  }
33
34
  opt_key_map = {
34
35
  HEADER: 'headers',
36
+ DATA: 'data',
35
37
  DELAY: OPT_NOT_SUPPORTED,
36
38
  DEPTH: 'depth',
37
39
  FILTER_CODES: 'filter-status',
@@ -50,7 +52,8 @@ class feroxbuster(HttpFuzzer):
50
52
  THREADS: 'threads',
51
53
  TIMEOUT: 'timeout',
52
54
  USER_AGENT: 'user-agent',
53
- WORDLIST: 'wordlist'
55
+ WORDLIST: 'wordlist',
56
+ 'request_headers': 'headers'
54
57
  }
55
58
  item_loaders = [JSONSerializer()]
56
59
  output_map = {
@@ -84,3 +87,8 @@ class feroxbuster(HttpFuzzer):
84
87
  if isinstance(item, dict):
85
88
  return item['type'] == 'response'
86
89
  return True
90
+
91
+ @staticmethod
92
+ def on_item(self, item):
93
+ item.request_headers = self.get_opt_value('header', preprocess=True)
94
+ return item