secator 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  from secator.definitions import DEBUG
2
- from secator.exporters import CsvExporter, JsonExporter
3
2
  from secator.output_types import Target
3
+ from secator.config import CONFIG
4
4
  from secator.runners._base import Runner
5
5
  from secator.runners.task import Task
6
6
  from secator.utils import merge_opts
@@ -8,10 +8,7 @@ from secator.utils import merge_opts
8
8
 
9
9
  class Workflow(Runner):
10
10
 
11
- default_exporters = [
12
- JsonExporter,
13
- CsvExporter
14
- ]
11
+ default_exporters = CONFIG.workflows.exporters
15
12
 
16
13
  @classmethod
17
14
  def delay(cls, *args, **kwargs):
@@ -84,7 +81,7 @@ class Workflow(Runner):
84
81
  """Get tasks recursively as Celery chains / chords.
85
82
 
86
83
  Args:
87
- obj (secator.config.ConfigLoader): Config.
84
+ obj (secator.config.TemplateLoader): Config.
88
85
  targets (list): List of targets.
89
86
  workflow_opts (dict): Workflow options.
90
87
  run_opts (dict): Run options.
@@ -1,21 +1,20 @@
1
1
  import json
2
- import logging
3
2
  import os
4
3
 
5
4
  import requests
6
5
  from bs4 import BeautifulSoup
7
6
  from cpe import CPE
8
7
 
9
- from secator.definitions import (CIDR_RANGE, CONFIDENCE, CVSS_SCORE, DATA_FOLDER, DEFAULT_HTTP_WORDLIST,
10
- DEFAULT_SKIP_CVE_SEARCH, DELAY, DEPTH, DESCRIPTION, FILTER_CODES, FILTER_REGEX,
11
- FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID, MATCH_CODES, MATCH_REGEX,
12
- MATCH_SIZE, MATCH_WORDS, METHOD, NAME, PATH, PROVIDER, PROXY, RATE_LIMIT, REFERENCES,
13
- RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, URL, USER_AGENT, USERNAME, WORDLIST)
8
+ from secator.definitions import (CIDR_RANGE, CVSS_SCORE, DELAY, DEPTH, DESCRIPTION, FILTER_CODES,
9
+ FILTER_REGEX, FILTER_SIZE, FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST, ID,
10
+ MATCH_CODES, MATCH_REGEX, MATCH_SIZE, MATCH_WORDS, METHOD, NAME, PATH, PROVIDER, PROXY,
11
+ RATE_LIMIT, REFERENCES, RETRIES, SEVERITY, TAGS, THREADS, TIMEOUT, URL, USER_AGENT,
12
+ USERNAME, WORDLIST)
14
13
  from secator.output_types import Ip, Port, Subdomain, Tag, Url, UserAccount, Vulnerability
15
- from secator.rich import console
14
+ from secator.config import CONFIG
16
15
  from secator.runners import Command
16
+ from secator.utils import debug
17
17
 
18
- logger = logging.getLogger(__name__)
19
18
 
20
19
  OPTS = {
21
20
  HEADER: {'type': str, 'help': 'Custom header to add to each request in the form "KEY1:VALUE1; KEY2:VALUE2"'},
@@ -37,7 +36,7 @@ OPTS = {
37
36
  THREADS: {'type': int, 'help': 'Number of threads to run', 'default': 50},
38
37
  TIMEOUT: {'type': int, 'help': 'Request timeout'},
39
38
  USER_AGENT: {'type': str, 'short': 'ua', 'help': 'User agent, e.g "Mozilla Firefox 1.0"'},
40
- WORDLIST: {'type': str, 'short': 'w', 'default': DEFAULT_HTTP_WORDLIST, 'help': 'Wordlist to use'}
39
+ WORDLIST: {'type': str, 'short': 'w', 'default': CONFIG.wordlists.defaults.http, 'help': 'Wordlist to use'}
41
40
  }
42
41
 
43
42
  OPTS_HTTP = [
@@ -121,7 +120,7 @@ class Vuln(Command):
121
120
 
122
121
  @staticmethod
123
122
  def lookup_local_cve(cve_id):
124
- cve_path = f'{DATA_FOLDER}/cves/{cve_id}.json'
123
+ cve_path = f'{CONFIG.dirs.data}/cves/{cve_id}.json'
125
124
  if os.path.exists(cve_path):
126
125
  with open(cve_path, 'r') as f:
127
126
  return json.load(f)
@@ -131,13 +130,54 @@ class Vuln(Command):
131
130
  # def lookup_exploitdb(exploit_id):
132
131
  # print('looking up exploit')
133
132
  # try:
134
- # cve_info = requests.get(f'https://exploit-db.com/exploits/{exploit_id}', timeout=5).content
135
- # print(cve_info)
136
- # except Exception:
133
+ # resp = requests.get(f'https://exploit-db.com/exploits/{exploit_id}', timeout=5)
134
+ # resp.raise_for_status()
135
+ # content = resp.content
136
+ # except requests.RequestException as e:
137
+ # debug(f'Failed remote query for {exploit_id} ({str(e)}).', sub='cve')
137
138
  # logger.error(f'Could not fetch exploit info for exploit {exploit_id}. Skipping.')
138
139
  # return None
139
140
  # return cve_info
140
141
 
142
+ @staticmethod
143
+ def create_cpe_string(product_name, version):
144
+ """
145
+ Generate a CPE string for a given product and version.
146
+
147
+ Args:
148
+ product_name (str): The name of the product.
149
+ version (str): The version of the product.
150
+
151
+ Returns:
152
+ str: A CPE string formatted according to the CPE 2.3 specification.
153
+ """
154
+ cpe_version = "2.3" # CPE Specification version
155
+ part = "a" # 'a' for application
156
+ vendor = product_name.lower() # Vendor name, using product name
157
+ product = product_name.lower() # Product name
158
+ version = version # Product version
159
+ cpe_string = f"cpe:{cpe_version}:{part}:{vendor}:{product}:{version}:*:*:*:*:*:*:*"
160
+ return cpe_string
161
+
162
+ @staticmethod
163
+ def match_cpes(fs1, fs2):
164
+ """Check if two CPEs match. Partial matches consisting of <vendor>:<product>:<version> are considered a match.
165
+
166
+ Args:
167
+ fs1 (str): Format string 1.
168
+ fs2 (str): Format string 2.
169
+
170
+ Returns:
171
+ bool: True if the two CPEs match, False otherwise.
172
+ """
173
+ if fs1 == fs2:
174
+ return True
175
+ split_fs1 = fs1.split(':')
176
+ split_fs2 = fs2.split(':')
177
+ tup1 = split_fs1[3], split_fs1[4], split_fs1[5]
178
+ tup2 = split_fs2[3], split_fs2[4], split_fs2[5]
179
+ return tup1 == tup2
180
+
141
181
  @staticmethod
142
182
  def lookup_cve(cve_id, cpes=[]):
143
183
  """Search for a CVE in local db or using cve.circl.lu and return vulnerability data.
@@ -150,18 +190,21 @@ class Vuln(Command):
150
190
  dict: vulnerability data.
151
191
  """
152
192
  cve_info = Vuln.lookup_local_cve(cve_id)
193
+
194
+ # Online CVE lookup
153
195
  if not cve_info:
154
- if DEFAULT_SKIP_CVE_SEARCH:
155
- logger.debug(f'{cve_id} not found locally, and DEFAULT_SKIP_CVE_SEARCH is set: ignoring online search.')
196
+ if CONFIG.runners.skip_cve_search:
197
+ debug(f'Skip remote query for {cve_id} since config.runners.skip_cve_search is set.', sub='cve')
198
+ return None
199
+ if CONFIG.offline_mode:
200
+ debug(f'Skip remote query for {cve_id} since config.offline_mode is set.', sub='cve')
156
201
  return None
157
- # logger.debug(f'{cve_id} not found locally. Use `secator install cves` to install CVEs locally.')
158
202
  try:
159
- cve_info = requests.get(f'https://cve.circl.lu/api/cve/{cve_id}', timeout=5).json()
160
- if not cve_info:
161
- console.print(f'Could not fetch CVE info for cve {cve_id}. Skipping.', highlight=False)
162
- return None
163
- except Exception:
164
- console.print(f'Could not fetch CVE info for cve {cve_id}. Skipping.', highlight=False)
203
+ resp = requests.get(f'https://cve.circl.lu/api/cve/{cve_id}', timeout=5)
204
+ resp.raise_for_status()
205
+ cve_info = resp.json()
206
+ except requests.RequestException as e:
207
+ debug(f'Failed remote query for {cve_id} ({str(e)}).', sub='cve')
165
208
  return None
166
209
 
167
210
  # Match the CPE string against the affected products CPE FS strings from the CVE data if a CPE was passed.
@@ -177,14 +220,15 @@ class Vuln(Command):
177
220
  cpe_fs = cpe_obj.as_fs()
178
221
  # cpe_version = cpe_obj.get_version()[0]
179
222
  vulnerable_fs = cve_info['vulnerable_product']
180
- # logger.debug(f'Matching CPE {cpe} against {len(vulnerable_fs)} vulnerable products for {cve_id}')
181
223
  for fs in vulnerable_fs:
182
- if fs == cpe_fs:
183
- # logger.debug(f'Found matching CPE FS {cpe_fs} ! The CPE is vulnerable to CVE {cve_id}')
224
+ # debug(f'{cve_id}: Testing {cpe_fs} against {fs}', sub='cve') # for hardcore debugging
225
+ if Vuln.match_cpes(cpe_fs, fs):
226
+ debug(f'{cve_id}: CPE match found for {cpe}.', sub='cve')
184
227
  cpe_match = True
185
228
  tags.append('cpe-match')
186
- if not cpe_match:
187
- return None
229
+ break
230
+ if not cpe_match:
231
+ debug(f'{cve_id}: no CPE match found for {cpe}.', sub='cve')
188
232
 
189
233
  # Parse CVE id and CVSS
190
234
  name = id = cve_info['id']
@@ -223,17 +267,9 @@ class Vuln(Command):
223
267
  # Set vulnerability severity based on CVSS score
224
268
  severity = None
225
269
  if cvss:
226
- if cvss < 4:
227
- severity = 'low'
228
- elif cvss < 7:
229
- severity = 'medium'
230
- elif cvss < 9:
231
- severity = 'high'
232
- else:
233
- severity = 'critical'
270
+ severity = Vuln.cvss_to_severity(cvss)
234
271
 
235
272
  # Set confidence
236
- confidence = 'low' if not cpe_match else 'high'
237
273
  vuln = {
238
274
  ID: id,
239
275
  NAME: name,
@@ -243,7 +279,6 @@ class Vuln(Command):
243
279
  TAGS: tags,
244
280
  REFERENCES: [f'https://cve.circl.lu/cve/{id}'] + references,
245
281
  DESCRIPTION: description,
246
- CONFIDENCE: confidence
247
282
  }
248
283
  return vuln
249
284
 
@@ -257,17 +292,33 @@ class Vuln(Command):
257
292
  Returns:
258
293
  dict: vulnerability data.
259
294
  """
260
- reference = f'https://github.com/advisories/{ghsa_id}'
261
- response = requests.get(reference)
262
- soup = BeautifulSoup(response.text, 'lxml')
295
+ try:
296
+ resp = requests.get(f'https://github.com/advisories/{ghsa_id}', timeout=5)
297
+ resp.raise_for_status()
298
+ except requests.RequestException as e:
299
+ debug(f'Failed remote query for {ghsa_id} ({str(e)}).', sub='cve')
300
+ return None
301
+ soup = BeautifulSoup(resp.text, 'lxml')
263
302
  sidebar_items = soup.find_all('div', {'class': 'discussion-sidebar-item'})
264
303
  cve_id = sidebar_items[2].find('div').text.strip()
265
- data = Vuln.lookup_cve(cve_id)
266
- if data:
267
- data[TAGS].append('ghsa')
268
- return data
304
+ vuln = Vuln.lookup_cve(cve_id)
305
+ if vuln:
306
+ vuln[TAGS].append('ghsa')
307
+ return vuln
269
308
  return None
270
309
 
310
+ @staticmethod
311
+ def cvss_to_severity(cvss):
312
+ if cvss < 4:
313
+ severity = 'low'
314
+ elif cvss < 7:
315
+ severity = 'medium'
316
+ elif cvss < 9:
317
+ severity = 'high'
318
+ else:
319
+ severity = 'critical'
320
+ return severity
321
+
271
322
 
272
323
  class VulnHttp(Vuln):
273
324
  input_type = HOST
@@ -1,5 +1,6 @@
1
1
  from secator.decorators import task
2
- from secator.definitions import (DEFAULT_DNS_WORDLIST, DOMAIN, HOST, RATE_LIMIT, RETRIES, THREADS, WORDLIST, EXTRA_DATA)
2
+ from secator.definitions import (DOMAIN, HOST, RATE_LIMIT, RETRIES, THREADS, WORDLIST, EXTRA_DATA)
3
+ from secator.config import CONFIG
3
4
  from secator.output_types import Subdomain
4
5
  from secator.tasks._categories import ReconDns
5
6
 
@@ -17,7 +18,7 @@ class dnsxbrute(ReconDns):
17
18
  THREADS: 'threads',
18
19
  }
19
20
  opts = {
20
- WORDLIST: {'type': str, 'short': 'w', 'default': DEFAULT_DNS_WORDLIST, 'help': 'Wordlist'},
21
+ WORDLIST: {'type': str, 'short': 'w', 'default': CONFIG.wordlists.defaults.dns, 'help': 'Wordlist'},
21
22
  'trace': {'is_flag': True, 'default': False, 'help': 'Perform dns tracing'},
22
23
  }
23
24
  output_map = {
secator/tasks/ffuf.py CHANGED
@@ -7,7 +7,7 @@ from secator.definitions import (AUTO_CALIBRATION, CONTENT_LENGTH,
7
7
  MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED,
8
8
  PERCENT, PROXY, RATE_LIMIT, RETRIES,
9
9
  STATUS_CODE, THREADS, TIME, TIMEOUT,
10
- USER_AGENT, WORDLIST, WORDLISTS_FOLDER)
10
+ USER_AGENT, WORDLIST)
11
11
  from secator.output_types import Progress, Url
12
12
  from secator.serializers import JSONSerializer, RegexSerializer
13
13
  from secator.tasks._categories import HttpFuzzer
@@ -70,7 +70,7 @@ class ffuf(HttpFuzzer):
70
70
  },
71
71
  }
72
72
  encoding = 'ansi'
73
- install_cmd = f'go install -v github.com/ffuf/ffuf@latest && sudo git clone https://github.com/danielmiessler/SecLists {WORDLISTS_FOLDER}/seclists || true' # noqa: E501
73
+ install_cmd = 'go install -v github.com/ffuf/ffuf@latest'
74
74
  install_github_handle = 'ffuf/ffuf'
75
75
  proxychains = False
76
76
  proxy_socks5 = True
secator/tasks/httpx.py CHANGED
@@ -1,14 +1,14 @@
1
1
  import os
2
2
 
3
3
  from secator.decorators import task
4
- from secator.definitions import (DEFAULT_HTTPX_FLAGS,
5
- DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
4
+ from secator.definitions import (DEFAULT_HTTPX_FLAGS, DELAY, DEPTH,
6
5
  FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
7
6
  FILTER_WORDS, FOLLOW_REDIRECT, HEADER,
8
7
  MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
9
8
  MATCH_WORDS, METHOD, OPT_NOT_SUPPORTED, PROXY,
10
9
  RATE_LIMIT, RETRIES, THREADS,
11
10
  TIMEOUT, URL, USER_AGENT)
11
+ from secator.config import CONFIG
12
12
  from secator.tasks._categories import Http
13
13
  from secator.utils import sanitize_url
14
14
 
@@ -71,7 +71,7 @@ class httpx(Http):
71
71
  debug_resp = self.get_opt_value('debug_resp')
72
72
  if debug_resp:
73
73
  self.cmd = self.cmd.replace('-silent', '')
74
- if DEFAULT_STORE_HTTP_RESPONSES:
74
+ if CONFIG.http.store_responses:
75
75
  self.output_response_path = f'{self.reports_folder}/response'
76
76
  self.output_screenshot_path = f'{self.reports_folder}/screenshot'
77
77
  os.makedirs(self.output_response_path, exist_ok=True)
@@ -98,7 +98,7 @@ class httpx(Http):
98
98
 
99
99
  @staticmethod
100
100
  def on_end(self):
101
- if DEFAULT_STORE_HTTP_RESPONSES:
101
+ if CONFIG.http.store_responses:
102
102
  if os.path.exists(self.output_response_path + '/index.txt'):
103
103
  os.remove(self.output_response_path + '/index.txt')
104
104
  if os.path.exists(self.output_screenshot_path + '/index.txt'):
secator/tasks/katana.py CHANGED
@@ -4,7 +4,7 @@ from urllib.parse import urlparse
4
4
 
5
5
  from secator.decorators import task
6
6
  from secator.definitions import (CONTENT_TYPE, DEFAULT_KATANA_FLAGS,
7
- DEFAULT_STORE_HTTP_RESPONSES, DELAY, DEPTH,
7
+ DELAY, DEPTH,
8
8
  FILTER_CODES, FILTER_REGEX, FILTER_SIZE,
9
9
  FILTER_WORDS, FOLLOW_REDIRECT, HEADER, HOST,
10
10
  MATCH_CODES, MATCH_REGEX, MATCH_SIZE,
@@ -12,6 +12,7 @@ from secator.definitions import (CONTENT_TYPE, DEFAULT_KATANA_FLAGS,
12
12
  RATE_LIMIT, RETRIES, STATUS_CODE,
13
13
  STORED_RESPONSE_PATH, TECH,
14
14
  THREADS, TIME, TIMEOUT, URL, USER_AGENT, WEBSERVER, CONTENT_LENGTH)
15
+ from secator.config import CONFIG
15
16
  from secator.output_types import Url, Tag
16
17
  from secator.tasks._categories import HttpCrawler
17
18
 
@@ -106,14 +107,14 @@ class katana(HttpCrawler):
106
107
  debug_resp = self.get_opt_value('debug_resp')
107
108
  if debug_resp:
108
109
  self.cmd = self.cmd.replace('-silent', '')
109
- if DEFAULT_STORE_HTTP_RESPONSES:
110
+ if CONFIG.http.store_responses:
110
111
  self.cmd += f' -sr -srd {self.reports_folder}'
111
112
 
112
113
  @staticmethod
113
114
  def on_item(self, item):
114
115
  if not isinstance(item, Url):
115
116
  return item
116
- if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(item.stored_response_path):
117
+ if CONFIG.http.store_responses and os.path.exists(item.stored_response_path):
117
118
  with open(item.stored_response_path, 'r', encoding='latin-1') as fin:
118
119
  data = fin.read().splitlines(True)
119
120
  first_line = data[0]
@@ -125,5 +126,5 @@ class katana(HttpCrawler):
125
126
 
126
127
  @staticmethod
127
128
  def on_end(self):
128
- if DEFAULT_STORE_HTTP_RESPONSES and os.path.exists(self.reports_folder + '/index.txt'):
129
+ if CONFIG.http.store_responses and os.path.exists(self.reports_folder + '/index.txt'):
129
130
  os.remove(self.reports_folder + '/index.txt')
@@ -5,9 +5,8 @@ import logging
5
5
  from rich.panel import Panel
6
6
 
7
7
  from secator.decorators import task
8
- from secator.definitions import (DELAY, FOLLOW_REDIRECT, HEADER, HOST,
9
- OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
10
- DATA_FOLDER, THREADS, TIMEOUT, USER_AGENT)
8
+ from secator.definitions import (DELAY, FOLLOW_REDIRECT, HEADER, HOST, OPT_NOT_SUPPORTED, PROXY, RATE_LIMIT, RETRIES,
9
+ THREADS, TIMEOUT, USER_AGENT)
11
10
  from secator.tasks._categories import VulnMulti
12
11
  from secator.utils import get_file_timestamp
13
12
 
@@ -84,7 +83,7 @@ class msfconsole(VulnMulti):
84
83
 
85
84
  # Make a copy and replace vars inside by env vars passed on the CLI
86
85
  timestr = get_file_timestamp()
87
- out_path = f'{DATA_FOLDER}/msfconsole_{timestr}.rc'
86
+ out_path = f'{self.reports_folder}/.inputs/msfconsole_{timestr}.rc'
88
87
  logger.debug(
89
88
  f'Writing formatted resource script to new temp file {out_path}'
90
89
  )
secator/tasks/nmap.py CHANGED
@@ -4,16 +4,19 @@ import re
4
4
 
5
5
  import xmltodict
6
6
 
7
+ from secator.config import CONFIG
7
8
  from secator.decorators import task
8
9
  from secator.definitions import (CONFIDENCE, CVSS_SCORE, DELAY,
9
10
  DESCRIPTION, EXTRA_DATA, FOLLOW_REDIRECT,
10
11
  HEADER, HOST, ID, IP, MATCHED_AT, NAME,
11
12
  OPT_NOT_SUPPORTED, OUTPUT_PATH, PORT, PORTS, PROVIDER,
12
13
  PROXY, RATE_LIMIT, REFERENCE, REFERENCES,
13
- RETRIES, SCRIPT, SERVICE_NAME, STATE, TAGS,
14
- THREADS, TIMEOUT, USER_AGENT)
14
+ RETRIES, SCRIPT, SERVICE_NAME, SEVERITY, STATE, TAGS,
15
+ THREADS, TIMEOUT, TOP_PORTS, USER_AGENT)
15
16
  from secator.output_types import Exploit, Port, Vulnerability
17
+ from secator.rich import console
16
18
  from secator.tasks._categories import VulnMulti
19
+ from secator.utils import debug
17
20
 
18
21
  logger = logging.getLogger(__name__)
19
22
 
@@ -28,11 +31,12 @@ class nmap(VulnMulti):
28
31
  opt_prefix = '--'
29
32
  output_types = [Port, Vulnerability, Exploit]
30
33
  opts = {
31
- PORTS: {'type': str, 'help': 'Ports to scan', 'short': 'p'},
34
+ PORTS: {'type': str, 'short': 'p', 'help': 'Ports to scan'},
35
+ TOP_PORTS: {'type': int, 'short': 'tp', 'help': 'Top ports to scan [full, 100, 1000]'},
32
36
  SCRIPT: {'type': str, 'default': 'vulners', 'help': 'NSE scripts'},
33
37
  # 'tcp_connect': {'type': bool, 'short': 'sT', 'default': False, 'help': 'TCP Connect scan'},
34
38
  'tcp_syn_stealth': {'is_flag': True, 'short': 'sS', 'default': False, 'help': 'TCP SYN Stealth'},
35
- 'output_path': {'type': str, 'short': 'oX', 'default': None, 'help': 'Output XML file path'}
39
+ 'output_path': {'type': str, 'short': 'oX', 'default': None, 'help': 'Output XML file path'},
36
40
  }
37
41
  opt_key_map = {
38
42
  HEADER: OPT_NOT_SUPPORTED,
@@ -114,20 +118,18 @@ class nmapData(dict):
114
118
 
115
119
  # Get extra data
116
120
  extra_data = self._get_extra_data(port)
121
+ service_name = extra_data['service_name']
122
+ version_exact = extra_data.get('version_exact', False)
123
+ conf = extra_data.get('confidence')
124
+ if not version_exact:
125
+ console.print(
126
+ f'[bold orange1]nmap could not identify an exact version for {service_name} '
127
+ f'(detection confidence is {conf}): do not blindy trust the results ![/]'
128
+ )
117
129
 
118
130
  # Grab CPEs
119
131
  cpes = extra_data.get('cpe', [])
120
132
 
121
- # Grab service name
122
- service_name = ''
123
- if 'product' in extra_data:
124
- service_name = extra_data['product']
125
- elif 'name' in extra_data:
126
- service_name = extra_data['name']
127
- if 'version' in extra_data:
128
- version = extra_data['version']
129
- service_name += f'/{version}'
130
-
131
133
  # Get script output
132
134
  scripts = self._get_scripts(port)
133
135
 
@@ -160,10 +162,17 @@ class nmapData(dict):
160
162
  EXTRA_DATA: extra_data,
161
163
  }
162
164
  if not func:
163
- # logger.debug(f'Script output parser for "{script_id}" is not supported YET.')
165
+ debug(f'Script output parser for "{script_id}" is not supported YET.', sub='cve')
164
166
  continue
165
167
  for vuln in func(output, cpes=cpes):
166
168
  vuln.update(metadata)
169
+ confidence = 'low'
170
+ if 'cpe-match' in vuln[TAGS]:
171
+ confidence = 'high' if version_exact else 'medium'
172
+ vuln[CONFIDENCE] = confidence
173
+ if (CONFIG.runners.skip_cve_low_confidence and vuln[CONFIDENCE] == 'low'):
174
+ debug(f'{vuln[ID]}: ignored (low confidence).', sub='cve')
175
+ continue
167
176
  yield vuln
168
177
 
169
178
  #---------------------#
@@ -198,40 +207,74 @@ class nmapData(dict):
198
207
  return host_cfg.get('address', {}).get('@addr', None)
199
208
 
200
209
  def _get_extra_data(self, port_cfg):
201
- extra_datas = {
210
+ extra_data = {
202
211
  k.lstrip('@'): v
203
212
  for k, v in port_cfg.get('service', {}).items()
204
213
  }
205
214
 
206
215
  # Strip product / version strings
207
- if 'product' in extra_datas:
208
- extra_datas['product'] = extra_datas['product'].lower()
209
-
210
- if 'version' in extra_datas:
211
- version_split = extra_datas['version'].split(' ')
212
- version = None
216
+ if 'product' in extra_data:
217
+ extra_data['product'] = extra_data['product'].lower()
218
+
219
+ # Get version and post-process it
220
+ version = None
221
+ if 'version' in extra_data:
222
+ vsplit = extra_data['version'].split(' ')
223
+ version_exact = True
213
224
  os = None
214
- if len(version_split) == 3:
215
- version, os, extra_version = tuple(version_split)
225
+ if len(vsplit) == 3:
226
+ version, os, extra_version = tuple(vsplit)
227
+ if os == 'or' and extra_version == 'later':
228
+ version_exact = False
229
+ os = None
216
230
  version = f'{version}-{extra_version}'
217
- elif len(version_split) == 2:
218
- version, os = tuple(version_split)
219
- elif len(version_split) == 1:
220
- version = version_split[0]
231
+ elif len(vsplit) == 2:
232
+ version, os = tuple(vsplit)
233
+ elif len(vsplit) == 1:
234
+ version = vsplit[0]
221
235
  else:
222
- version = extra_datas['version']
236
+ version = extra_data['version']
223
237
  if os:
224
- extra_datas['os'] = os
238
+ extra_data['os'] = os
239
+ if version:
240
+ extra_data['version'] = version
241
+ extra_data['version_exact'] = version_exact
242
+
243
+ # Grap service name
244
+ product = extra_data.get('name', None) or extra_data.get('product', None)
245
+ if product:
246
+ service_name = product
225
247
  if version:
226
- extra_datas['version'] = version
248
+ service_name += f'/{version}'
249
+ extra_data['service_name'] = service_name
227
250
 
228
251
  # Grab CPEs
229
- cpes = extra_datas.get('cpe', [])
252
+ cpes = extra_data.get('cpe', [])
230
253
  if not isinstance(cpes, list):
231
254
  cpes = [cpes]
232
- extra_datas['cpe'] = cpes
255
+ extra_data['cpe'] = cpes
256
+ debug(f'Found CPEs: {",".join(cpes)}', sub='cve')
257
+
258
+ # Grab confidence
259
+ conf = int(extra_data.get('conf', 0))
260
+ if conf > 7:
261
+ confidence = 'high'
262
+ elif conf > 4:
263
+ confidence = 'medium'
264
+ else:
265
+ confidence = 'low'
266
+ extra_data['confidence'] = confidence
267
+
268
+ # Build custom CPE
269
+ if product and version:
270
+ vsplit = version.split('-')
271
+ version_cpe = vsplit[0] if not version_exact else version
272
+ cpe = VulnMulti.create_cpe_string(product, version_cpe)
273
+ if cpe not in cpes:
274
+ cpes.append(cpe)
275
+ debug(f'Added new CPE from identified product and version: {cpe}', sub='cve')
233
276
 
234
- return extra_datas
277
+ return extra_data
235
278
 
236
279
  def _get_scripts(self, port_cfg):
237
280
  scripts = port_cfg.get('script', [])
@@ -276,23 +319,23 @@ class nmapData(dict):
276
319
  TAGS: [vuln_id, provider_name]
277
320
  }
278
321
  if provider_name == 'MITRE CVE':
279
- vuln_data = VulnMulti.lookup_cve(vuln['id'], cpes=cpes)
280
- if vuln_data:
281
- vuln.update(vuln_data)
322
+ data = VulnMulti.lookup_cve(vuln['id'], cpes=cpes)
323
+ if data:
324
+ vuln.update(data)
282
325
  yield vuln
283
326
  else:
284
- # logger.debug(f'Vulscan provider {provider_name} is not supported YET.')
327
+ debug(f'Vulscan provider {provider_name} is not supported YET.', sub='cve')
285
328
  continue
286
329
 
287
330
  def _parse_vulners_output(self, out, **kwargs):
288
- cpes = []
331
+ cpes = kwargs.get('cpes', [])
289
332
  provider_name = 'vulners'
290
333
  for line in out.splitlines():
291
334
  if not line:
292
335
  continue
293
336
  line = line.strip()
294
337
  if line.startswith('cpe:'):
295
- cpes.append(line)
338
+ cpes.append(line.rstrip(':'))
296
339
  continue
297
340
  elems = tuple(line.split('\t'))
298
341
  vuln = {}
@@ -307,7 +350,8 @@ class nmapData(dict):
307
350
  NAME: name,
308
351
  PROVIDER: provider_name,
309
352
  REFERENCE: reference_url,
310
- '_type': 'exploit'
353
+ '_type': 'exploit',
354
+ TAGS: [exploit_id, provider_name]
311
355
  # CVSS_SCORE: cvss_score,
312
356
  # CONFIDENCE: 'low'
313
357
  }
@@ -319,23 +363,26 @@ class nmapData(dict):
319
363
 
320
364
  elif len(elems) == 3: # vuln
321
365
  vuln_id, vuln_cvss, reference_url = tuple(line.split('\t'))
366
+ vuln_cvss = float(vuln_cvss)
367
+ vuln_id = vuln_id.split(':')[-1]
322
368
  vuln_type = vuln_id.split('-')[0]
323
369
  vuln = {
324
370
  ID: vuln_id,
325
371
  NAME: vuln_id,
326
372
  PROVIDER: provider_name,
327
373
  CVSS_SCORE: vuln_cvss,
374
+ SEVERITY: VulnMulti.cvss_to_severity(vuln_cvss),
328
375
  REFERENCES: [reference_url],
329
- TAGS: [],
376
+ TAGS: [vuln_id, provider_name],
330
377
  CONFIDENCE: 'low'
331
378
  }
332
- if vuln_type == 'CVE':
379
+ if vuln_type == 'CVE' or vuln_type == 'PRION:CVE':
333
380
  vuln[TAGS].append('cve')
334
- vuln_data = VulnMulti.lookup_cve(vuln_id, cpes=cpes)
335
- if vuln_data:
336
- vuln.update(vuln_data)
381
+ data = VulnMulti.lookup_cve(vuln_id, cpes=cpes)
382
+ if data:
383
+ vuln.update(data)
337
384
  yield vuln
338
385
  else:
339
- logger.debug(f'Vulners parser for "{vuln_type}" is not implemented YET.')
386
+ debug(f'Vulners parser for "{vuln_type}" is not implemented YET.', sub='cve')
340
387
  else:
341
- logger.error(f'Unrecognized vulners output: {elems}')
388
+ debug(f'Unrecognized vulners output: {elems}', sub='cve')
secator/tasks/nuclei.py CHANGED
@@ -85,8 +85,12 @@ class nuclei(VulnMulti):
85
85
  def extra_data_extractor(item):
86
86
  data = {}
87
87
  data['data'] = item.get('extracted-results', [])
88
+ data['type'] = item.get('type', '')
88
89
  data['template_id'] = item['template-id']
89
90
  data['template_url'] = item.get('template-url', '')
91
+ for k, v in item.get('meta', {}).items():
92
+ data['data'].append(f'{k}: {v}')
93
+ data['metadata'] = item.get('metadata', {})
90
94
  return data
91
95
 
92
96
  @staticmethod