secator 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (84) hide show
  1. secator/celery.py +160 -185
  2. secator/celery_utils.py +268 -0
  3. secator/cli.py +327 -106
  4. secator/config.py +27 -11
  5. secator/configs/workflows/host_recon.yaml +5 -3
  6. secator/configs/workflows/port_scan.yaml +7 -3
  7. secator/configs/workflows/url_bypass.yaml +10 -0
  8. secator/configs/workflows/url_vuln.yaml +1 -1
  9. secator/decorators.py +169 -92
  10. secator/definitions.py +10 -3
  11. secator/exporters/__init__.py +7 -5
  12. secator/exporters/console.py +10 -0
  13. secator/exporters/csv.py +27 -19
  14. secator/exporters/gdrive.py +16 -11
  15. secator/exporters/json.py +3 -1
  16. secator/exporters/table.py +30 -2
  17. secator/exporters/txt.py +20 -16
  18. secator/hooks/gcs.py +53 -0
  19. secator/hooks/mongodb.py +53 -27
  20. secator/output_types/__init__.py +29 -11
  21. secator/output_types/_base.py +11 -1
  22. secator/output_types/error.py +36 -0
  23. secator/output_types/exploit.py +1 -1
  24. secator/output_types/info.py +24 -0
  25. secator/output_types/ip.py +7 -0
  26. secator/output_types/port.py +8 -1
  27. secator/output_types/progress.py +5 -0
  28. secator/output_types/record.py +3 -1
  29. secator/output_types/stat.py +33 -0
  30. secator/output_types/tag.py +6 -4
  31. secator/output_types/url.py +6 -3
  32. secator/output_types/vulnerability.py +3 -2
  33. secator/output_types/warning.py +24 -0
  34. secator/report.py +55 -23
  35. secator/rich.py +44 -39
  36. secator/runners/_base.py +622 -635
  37. secator/runners/_helpers.py +5 -91
  38. secator/runners/celery.py +18 -0
  39. secator/runners/command.py +364 -211
  40. secator/runners/scan.py +8 -24
  41. secator/runners/task.py +21 -55
  42. secator/runners/workflow.py +41 -40
  43. secator/scans/__init__.py +28 -0
  44. secator/serializers/dataclass.py +6 -0
  45. secator/serializers/json.py +10 -5
  46. secator/serializers/regex.py +12 -4
  47. secator/tasks/_categories.py +5 -2
  48. secator/tasks/bbot.py +293 -0
  49. secator/tasks/bup.py +98 -0
  50. secator/tasks/cariddi.py +38 -49
  51. secator/tasks/dalfox.py +3 -0
  52. secator/tasks/dirsearch.py +12 -23
  53. secator/tasks/dnsx.py +49 -30
  54. secator/tasks/dnsxbrute.py +2 -0
  55. secator/tasks/feroxbuster.py +8 -17
  56. secator/tasks/ffuf.py +3 -2
  57. secator/tasks/fping.py +3 -3
  58. secator/tasks/gau.py +5 -0
  59. secator/tasks/gf.py +2 -2
  60. secator/tasks/gospider.py +4 -0
  61. secator/tasks/grype.py +9 -9
  62. secator/tasks/h8mail.py +31 -41
  63. secator/tasks/httpx.py +58 -21
  64. secator/tasks/katana.py +18 -22
  65. secator/tasks/maigret.py +26 -24
  66. secator/tasks/mapcidr.py +2 -3
  67. secator/tasks/msfconsole.py +4 -16
  68. secator/tasks/naabu.py +3 -1
  69. secator/tasks/nmap.py +50 -35
  70. secator/tasks/nuclei.py +9 -2
  71. secator/tasks/searchsploit.py +17 -9
  72. secator/tasks/subfinder.py +5 -1
  73. secator/tasks/wpscan.py +79 -93
  74. secator/template.py +61 -45
  75. secator/thread.py +24 -0
  76. secator/utils.py +330 -80
  77. secator/utils_test.py +48 -23
  78. secator/workflows/__init__.py +28 -0
  79. {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/METADATA +11 -5
  80. secator-0.7.0.dist-info/RECORD +115 -0
  81. {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/WHEEL +1 -1
  82. secator-0.6.0.dist-info/RECORD +0 -101
  83. {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/entry_points.txt +0 -0
  84. {secator-0.6.0.dist-info → secator-0.7.0.dist-info}/licenses/LICENSE +0 -0
secator/exporters/csv.py CHANGED
@@ -1,29 +1,37 @@
1
1
  import csv as _csv
2
2
 
3
+ from dataclasses import fields
4
+
3
5
  from secator.exporters._base import Exporter
4
6
  from secator.rich import console
7
+ from secator.output_types import FINDING_TYPES
8
+ from secator.output_types import Info
5
9
 
6
10
 
7
11
  class CsvExporter(Exporter):
8
- def send(self):
9
- results = self.report.data['results']
10
- csv_paths = []
12
+ def send(self):
13
+ results = self.report.data['results']
14
+ if not results:
15
+ return
16
+ csv_paths = []
11
17
 
12
- for output_type, items in results.items():
13
- items = [i.toDict() for i in items]
14
- if not items:
15
- continue
16
- keys = list(items[0].keys())
17
- csv_path = f'{self.report.output_folder}/report_{output_type}.csv'
18
- csv_paths.append(csv_path)
19
- with open(csv_path, 'w', newline='') as output_file:
20
- dict_writer = _csv.DictWriter(output_file, keys)
21
- dict_writer.writeheader()
22
- dict_writer.writerows(items)
18
+ for output_type, items in results.items():
19
+ output_cls = [o for o in FINDING_TYPES if o._type == output_type][0]
20
+ keys = [o.name for o in fields(output_cls)]
21
+ items = [i.toDict() for i in items]
22
+ if not items:
23
+ continue
24
+ csv_path = f'{self.report.output_folder}/report_{output_type}.csv'
25
+ csv_paths.append(csv_path)
26
+ with open(csv_path, 'w', newline='') as output_file:
27
+ dict_writer = _csv.DictWriter(output_file, keys)
28
+ dict_writer.writeheader()
29
+ dict_writer.writerows(items)
23
30
 
24
- if len(csv_paths) == 1:
25
- csv_paths_str = csv_paths[0]
26
- else:
27
- csv_paths_str = '\n • ' + '\n • '.join(csv_paths)
31
+ if len(csv_paths) == 1:
32
+ csv_paths_str = csv_paths[0]
33
+ else:
34
+ csv_paths_str = '\n • ' + '\n • '.join(csv_paths)
28
35
 
29
- console.print(f':file_cabinet: Saved CSV reports to {csv_paths_str}')
36
+ info = Info(message=f'Saved CSV reports to {csv_paths_str}')
37
+ console.print(info)
@@ -4,6 +4,7 @@ import yaml
4
4
 
5
5
  from secator.config import CONFIG
6
6
  from secator.exporters._base import Exporter
7
+ from secator.output_types import Info, Error
7
8
  from secator.rich import console
8
9
  from secator.utils import pluralize
9
10
 
@@ -16,20 +17,22 @@ class GdriveExporter(Exporter):
16
17
  title = self.report.data['info']['title']
17
18
  sheet_title = f'{self.report.data["info"]["title"]}_{self.report.timestamp}'
18
19
  results = self.report.data['results']
19
- if not CONFIG.addons.google.credentials_path:
20
- console.print(':file_cabinet: Missing CONFIG.addons.google.credentials_path to save to Google Sheets', style='red')
20
+ if not CONFIG.addons.gdrive.credentials_path:
21
+ error = Error('Missing CONFIG.addons.gdrive.credentials_path to save to Google Sheets')
22
+ console.print(error)
21
23
  return
22
- if not CONFIG.addons.google.drive_parent_folder_id:
23
- console.print(':file_cabinet: Missing CONFIG.addons.google.drive_parent_folder_id to save to Google Sheets.', style='red') # noqa: E501
24
+ if not CONFIG.addons.gdrive.drive_parent_folder_id:
25
+ error = Error('Missing CONFIG.addons.gdrive.drive_parent_folder_id to save to Google Sheets.')
26
+ console.print(error)
24
27
  return
25
- client = gspread.service_account(CONFIG.addons.google.credentials_path)
28
+ client = gspread.service_account(CONFIG.addons.gdrive.credentials_path)
26
29
 
27
30
  # Create workspace folder if it doesn't exist
28
- folder_id = self.get_folder_by_name(ws, parent_id=CONFIG.addons.google.drive_parent_folder_id)
31
+ folder_id = self.get_folder_by_name(ws, parent_id=CONFIG.addons.gdrive.drive_parent_folder_id)
29
32
  if ws and not folder_id:
30
33
  folder_id = self.create_folder(
31
34
  folder_name=ws,
32
- parent_id=CONFIG.addons.google.drive_parent_folder_id)
35
+ parent_id=CONFIG.addons.gdrive.drive_parent_folder_id)
33
36
 
34
37
  # Create worksheet
35
38
  sheet = client.create(title, folder_id=folder_id)
@@ -57,8 +60,9 @@ class GdriveExporter(Exporter):
57
60
  ]
58
61
  csv_path = f'{self.report.output_folder}/report_{output_type}.csv'
59
62
  if not os.path.exists(csv_path):
60
- console.print(
63
+ error = Error(
61
64
  f'Unable to find CSV at {csv_path}. For Google sheets reports, please enable CSV reports as well.')
65
+ console.print(error)
62
66
  return
63
67
  sheet_title = pluralize(output_type).upper()
64
68
  ws = sheet.add_worksheet(sheet_title, rows=len(items), cols=len(keys))
@@ -79,12 +83,13 @@ class GdriveExporter(Exporter):
79
83
  ws = sheet.get_worksheet(0)
80
84
  sheet.del_worksheet(ws)
81
85
 
82
- console.print(f':file_cabinet: Saved Google Sheets reports to [u magenta]{sheet.url}[/]')
86
+ info = Info(message=f'Saved Google Sheets reports to [u magenta]{sheet.url}')
87
+ console.print(info)
83
88
 
84
89
  def create_folder(self, folder_name, parent_id=None):
85
90
  from googleapiclient.discovery import build
86
91
  from google.oauth2 import service_account
87
- creds = service_account.Credentials.from_service_account_file(CONFIG.addons.google.credentials_path)
92
+ creds = service_account.Credentials.from_service_account_file(CONFIG.addons.gdrive.credentials_path)
88
93
  service = build('drive', 'v3', credentials=creds)
89
94
  body = {
90
95
  'name': folder_name,
@@ -98,7 +103,7 @@ class GdriveExporter(Exporter):
98
103
  def list_folders(self, parent_id):
99
104
  from googleapiclient.discovery import build
100
105
  from google.oauth2 import service_account
101
- creds = service_account.Credentials.from_service_account_file(CONFIG.addons.google.credentials_path)
106
+ creds = service_account.Credentials.from_service_account_file(CONFIG.addons.gdrive.credentials_path)
102
107
  service = build('drive', 'v3', credentials=creds)
103
108
  driveid = service.files().get(fileId='root').execute()['id']
104
109
  response = service.files().list(
secator/exporters/json.py CHANGED
@@ -1,4 +1,5 @@
1
1
  from secator.exporters._base import Exporter
2
+ from secator.output_types import Info
2
3
  from secator.rich import console
3
4
  from secator.serializers.dataclass import dumps_dataclass
4
5
 
@@ -11,4 +12,5 @@ class JsonExporter(Exporter):
11
12
  with open(json_path, 'w') as f:
12
13
  f.write(dumps_dataclass(self.report.data, indent=2))
13
14
 
14
- console.print(f':file_cabinet: Saved JSON report to {json_path}')
15
+ info = Info(f'Saved JSON report to {json_path}')
16
+ console.print(info)
@@ -1,7 +1,35 @@
1
1
  from secator.exporters._base import Exporter
2
- from secator.utils import print_results_table
2
+ from secator.utils import pluralize
3
+ from secator.rich import build_table, console
4
+ from rich.markdown import Markdown
5
+ from secator.output_types import OutputType
3
6
 
4
7
 
5
8
  class TableExporter(Exporter):
6
9
  def send(self):
7
- print_results_table(self.report.runner.results, self.report.title)
10
+ results = self.report.data['results']
11
+ if not results:
12
+ return
13
+ title = self.report.title
14
+ _print = console.print
15
+ _print()
16
+ if title:
17
+ title = ' '.join(title.capitalize().split('_')) + ' results'
18
+ h1 = Markdown(f'# {title}')
19
+ _print(h1, style='bold magenta', width=50)
20
+ _print()
21
+ for output_type, items in results.items():
22
+ if output_type == 'progress':
23
+ continue
24
+ if items:
25
+ is_output_type = isinstance(items[0], OutputType)
26
+ output_fields = items[0]._table_fields if is_output_type else None
27
+ sort_by = items[0]._sort_by if is_output_type else []
28
+ _table = build_table(
29
+ items,
30
+ output_fields=output_fields,
31
+ sort_by=sort_by)
32
+ title = pluralize(items[0]._type).upper() if is_output_type else 'Results'
33
+ _print(f':wrench: {title}', style='bold gold3', justify='left')
34
+ _print(_table)
35
+ _print()
secator/exporters/txt.py CHANGED
@@ -1,24 +1,28 @@
1
1
  from secator.exporters._base import Exporter
2
+ from secator.output_types import Info
2
3
  from secator.rich import console
3
4
 
4
5
 
5
6
  class TxtExporter(Exporter):
6
- def send(self):
7
- results = self.report.data['results']
8
- txt_paths = []
7
+ def send(self):
8
+ results = self.report.data['results']
9
+ if not results:
10
+ return
11
+ txt_paths = []
9
12
 
10
- for output_type, items in results.items():
11
- items = [str(i) for i in items]
12
- if not items:
13
- continue
14
- txt_path = f'{self.report.output_folder}/report_{output_type}.txt'
15
- with open(txt_path, 'w') as f:
16
- f.write('\n'.join(items))
17
- txt_paths.append(txt_path)
13
+ for output_type, items in results.items():
14
+ items = [str(i) for i in items]
15
+ if not items:
16
+ continue
17
+ txt_path = f'{self.report.output_folder}/report_{output_type}.txt'
18
+ with open(txt_path, 'w') as f:
19
+ f.write('\n'.join(items))
20
+ txt_paths.append(txt_path)
18
21
 
19
- if len(txt_paths) == 1:
20
- txt_paths_str = txt_paths[0]
21
- else:
22
- txt_paths_str = '\n • ' + '\n • '.join(txt_paths)
22
+ if len(txt_paths) == 1:
23
+ txt_paths_str = txt_paths[0]
24
+ else:
25
+ txt_paths_str = '\n • ' + '\n • '.join(txt_paths)
23
26
 
24
- console.print(f':file_cabinet: Saved TXT reports to {txt_paths_str}')
27
+ info = Info(f'Saved TXT reports to {txt_paths_str}')
28
+ console.print(info)
secator/hooks/gcs.py ADDED
@@ -0,0 +1,53 @@
1
+ from pathlib import Path
2
+ from time import time
3
+
4
+ from google.cloud import storage
5
+
6
+ from secator.config import CONFIG
7
+ from secator.runners import Task
8
+ from secator.thread import Thread
9
+ from secator.utils import debug
10
+
11
+
12
+ GCS_BUCKET_NAME = CONFIG.addons.gcs.bucket_name
13
+ ITEMS_TO_SEND = {
14
+ 'url': ['screenshot_path']
15
+ }
16
+
17
+
18
+ def process_item(self, item):
19
+ if item._type not in ITEMS_TO_SEND.keys():
20
+ return item
21
+ if not GCS_BUCKET_NAME:
22
+ debug('skipped since addons.gcs.bucket_name is empty.', sub='hooks.gcs')
23
+ return item
24
+ to_send = ITEMS_TO_SEND[item._type]
25
+ for k, v in item.toDict().items():
26
+ if k in to_send and v:
27
+ path = Path(v)
28
+ if not path.exists():
29
+ continue
30
+ ext = path.suffix
31
+ blob_name = f'{item._uuid}_{k}{ext}'
32
+ t = Thread(target=upload_blob, args=(GCS_BUCKET_NAME, v, blob_name))
33
+ t.start()
34
+ self.threads.append(t)
35
+ setattr(item, k, f'gs://{GCS_BUCKET_NAME}/{blob_name}')
36
+ return item
37
+
38
+
39
+ def upload_blob(bucket_name, source_file_name, destination_blob_name):
40
+ """Uploads a file to the bucket."""
41
+ start_time = time()
42
+ storage_client = storage.Client()
43
+ bucket = storage_client.bucket(bucket_name)
44
+ blob = bucket.blob(destination_blob_name)
45
+ blob.upload_from_filename(source_file_name)
46
+ end_time = time()
47
+ elapsed = end_time - start_time
48
+ debug(f'in {elapsed:.4f}s', obj={'blob': 'CREATED', 'blob_name': destination_blob_name, 'bucket': bucket_name}, obj_after=False, sub='hooks.gcs', verbose=True) # noqa: E501
49
+
50
+
51
+ HOOKS = {
52
+ Task: {'on_item': [process_item]}
53
+ }
secator/hooks/mongodb.py CHANGED
@@ -6,7 +6,7 @@ from bson.objectid import ObjectId
6
6
  from celery import shared_task
7
7
 
8
8
  from secator.config import CONFIG
9
- from secator.output_types import OUTPUT_TYPES
9
+ from secator.output_types import FINDING_TYPES
10
10
  from secator.runners import Scan, Task, Workflow
11
11
  from secator.utils import debug, escape_mongodb_url
12
12
 
@@ -15,11 +15,27 @@ from secator.utils import debug, escape_mongodb_url
15
15
 
16
16
  MONGODB_URL = CONFIG.addons.mongodb.url
17
17
  MONGODB_UPDATE_FREQUENCY = CONFIG.addons.mongodb.update_frequency
18
- MAX_POOL_SIZE = 100
18
+ MONGODB_CONNECT_TIMEOUT = CONFIG.addons.mongodb.server_selection_timeout_ms
19
+ MONGODB_MAX_POOL_SIZE = CONFIG.addons.mongodb.max_pool_size
19
20
 
20
21
  logger = logging.getLogger(__name__)
21
22
 
22
- client = pymongo.MongoClient(escape_mongodb_url(MONGODB_URL), maxPoolSize=MAX_POOL_SIZE)
23
+ client = pymongo.MongoClient(
24
+ escape_mongodb_url(MONGODB_URL),
25
+ maxPoolSize=MONGODB_MAX_POOL_SIZE,
26
+ serverSelectionTimeoutMS=MONGODB_CONNECT_TIMEOUT
27
+ )
28
+
29
+
30
+ def get_runner_dbg(runner):
31
+ """Runner debug object"""
32
+ return {
33
+ runner.unique_name: runner.status,
34
+ 'type': runner.config.type,
35
+ 'class': runner.__class__.__name__,
36
+ 'caller': runner.config.name,
37
+ **runner.context
38
+ }
23
39
 
24
40
 
25
41
  def update_runner(self):
@@ -27,25 +43,19 @@ def update_runner(self):
27
43
  type = self.config.type
28
44
  collection = f'{type}s'
29
45
  update = self.toDict()
30
- debug_obj = {'type': 'runner', 'name': self.name, 'status': self.status}
31
46
  chunk = update.get('chunk')
32
47
  _id = self.context.get(f'{type}_chunk_id') if chunk else self.context.get(f'{type}_id')
33
- debug('update', sub='hooks.mongodb', id=_id, obj=update, obj_after=True, level=4)
48
+ debug('to_update', sub='hooks.mongodb', id=_id, obj=get_runner_dbg(self), obj_after=True, obj_breaklines=False, verbose=True) # noqa: E501
34
49
  start_time = time.time()
35
50
  if _id:
36
- delta = start_time - self.last_updated if self.last_updated else MONGODB_UPDATE_FREQUENCY
37
- if self.last_updated and delta < MONGODB_UPDATE_FREQUENCY and self.status == 'RUNNING':
38
- debug(f'skipped ({delta:>.2f}s < {MONGODB_UPDATE_FREQUENCY}s)',
39
- sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=3)
40
- return
41
51
  db = client.main
42
52
  start_time = time.time()
43
53
  db[collection].update_one({'_id': ObjectId(_id)}, {'$set': update})
44
54
  end_time = time.time()
45
55
  elapsed = end_time - start_time
46
56
  debug(
47
- f'[dim gold4]updated in {elapsed:.4f}s[/]', sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=2)
48
- self.last_updated = start_time
57
+ f'[dim gold4]updated in {elapsed:.4f}s[/]', sub='hooks.mongodb', id=_id, obj=get_runner_dbg(self), obj_after=False) # noqa: E501
58
+ self.last_updated_db = start_time
49
59
  else: # sync update and save result to runner object
50
60
  runner = db[collection].insert_one(update)
51
61
  _id = str(runner.inserted_id)
@@ -55,13 +65,16 @@ def update_runner(self):
55
65
  self.context[f'{type}_id'] = _id
56
66
  end_time = time.time()
57
67
  elapsed = end_time - start_time
58
- debug(f'created in {elapsed:.4f}s', sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=2)
68
+ debug(f'in {elapsed:.4f}s', sub='hooks.mongodb', id=_id, obj=get_runner_dbg(self), obj_after=False)
59
69
 
60
70
 
61
71
  def update_finding(self, item):
72
+ if type(item) not in FINDING_TYPES:
73
+ return item
62
74
  start_time = time.time()
63
75
  db = client.main
64
76
  update = item.toDict()
77
+ _type = item._type
65
78
  _id = ObjectId(item._uuid) if ObjectId.is_valid(item._uuid) else None
66
79
  if _id:
67
80
  finding = db['findings'].update_one({'_id': _id}, {'$set': update})
@@ -72,7 +85,14 @@ def update_finding(self, item):
72
85
  status = 'CREATED'
73
86
  end_time = time.time()
74
87
  elapsed = end_time - start_time
75
- debug(f'in {elapsed:.4f}s', sub='hooks.mongodb', id=str(item._uuid), obj={'finding': status}, obj_after=False)
88
+ debug_obj = {
89
+ _type: status,
90
+ 'type': 'finding',
91
+ 'class': self.__class__.__name__,
92
+ 'caller': self.config.name,
93
+ **self.context
94
+ }
95
+ debug(f'in {elapsed:.4f}s', sub='hooks.mongodb', id=str(item._uuid), obj=debug_obj, obj_after=False) # noqa: E501
76
96
  return item
77
97
 
78
98
 
@@ -80,20 +100,23 @@ def find_duplicates(self):
80
100
  ws_id = self.toDict().get('context', {}).get('workspace_id')
81
101
  if not ws_id:
82
102
  return
83
- celery_id = tag_duplicates.delay(ws_id)
84
- debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
103
+ if self.sync:
104
+ debug(f'running duplicate check on workspace {ws_id}', sub='hooks.mongodb')
105
+ tag_duplicates(ws_id)
106
+ else:
107
+ celery_id = tag_duplicates.delay(ws_id)
108
+ debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
85
109
 
86
110
 
87
111
  def load_finding(obj):
88
112
  finding_type = obj['_type']
89
113
  klass = None
90
- for otype in OUTPUT_TYPES:
114
+ for otype in FINDING_TYPES:
91
115
  if finding_type == otype.get_name():
92
116
  klass = otype
93
117
  item = klass.load(obj)
94
118
  item._uuid = str(obj['_id'])
95
119
  return item
96
- debug('could not load Secator output type from MongoDB object', obj=obj, sub='hooks.mongodb')
97
120
  return None
98
121
 
99
122
 
@@ -149,18 +172,19 @@ def tag_duplicates(ws_id: str = None):
149
172
  'seen dupes': len(seen_dupes)
150
173
  },
151
174
  id=ws_id,
152
- sub='hooks.mongodb')
175
+ sub='hooks.mongodb.duplicates',
176
+ verbose=True)
153
177
  tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
154
- debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb')
178
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
155
179
 
156
180
  # Update latest object as non-duplicate
157
181
  if tmp_duplicates:
158
182
  duplicates.extend([f for f in tmp_duplicates])
159
183
  db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
160
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb')
184
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
161
185
  non_duplicates.append(item)
162
186
  else:
163
- debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb')
187
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb.duplicates', verbose=True)
164
188
  non_duplicates.append(item)
165
189
 
166
190
  # debug(f'found {len(duplicates)} total duplicates')
@@ -184,19 +208,21 @@ def tag_duplicates(ws_id: str = None):
184
208
  'duplicates': len(duplicates_ids),
185
209
  'non-duplicates': len(non_duplicates_ids)
186
210
  },
187
- sub='hooks.mongodb')
211
+ sub='hooks.mongodb.duplicates')
188
212
 
189
213
 
190
- MONGODB_HOOKS = {
214
+ HOOKS = {
191
215
  Scan: {
216
+ 'on_init': [update_runner],
192
217
  'on_start': [update_runner],
193
- 'on_iter': [update_runner],
218
+ 'on_interval': [update_runner],
194
219
  'on_duplicate': [update_finding],
195
220
  'on_end': [update_runner],
196
221
  },
197
222
  Workflow: {
223
+ 'on_init': [update_runner],
198
224
  'on_start': [update_runner],
199
- 'on_iter': [update_runner],
225
+ 'on_interval': [update_runner],
200
226
  'on_duplicate': [update_finding],
201
227
  'on_end': [update_runner],
202
228
  },
@@ -205,7 +231,7 @@ MONGODB_HOOKS = {
205
231
  'on_start': [update_runner],
206
232
  'on_item': [update_finding],
207
233
  'on_duplicate': [update_finding],
208
- 'on_iter': [update_runner],
234
+ 'on_interval': [update_runner],
209
235
  'on_end': [update_runner, find_duplicates]
210
236
  }
211
237
  }
@@ -1,15 +1,20 @@
1
1
  __all__ = [
2
- 'OutputType',
3
- 'Ip',
4
- 'Port',
5
- 'Record',
6
- 'Subdomain',
7
- 'Url',
8
- 'UserAccount',
9
- 'Vulnerability'
2
+ 'Error',
3
+ 'OutputType',
4
+ 'Info',
5
+ 'Ip',
6
+ 'Port',
7
+ 'Progress',
8
+ 'Record',
9
+ 'Stat',
10
+ 'Subdomain',
11
+ 'Url',
12
+ 'UserAccount',
13
+ 'Vulnerability',
14
+ 'Warning',
10
15
  ]
11
- from secator.output_types._base import OutputType # noqa: F401
12
- from secator.output_types.progress import Progress # noqa: F401
16
+ from secator.output_types._base import OutputType
17
+ from secator.output_types.progress import Progress
13
18
  from secator.output_types.ip import Ip
14
19
  from secator.output_types.exploit import Exploit
15
20
  from secator.output_types.port import Port
@@ -20,5 +25,18 @@ from secator.output_types.url import Url
20
25
  from secator.output_types.user_account import UserAccount
21
26
  from secator.output_types.vulnerability import Vulnerability
22
27
  from secator.output_types.record import Record
28
+ from secator.output_types.info import Info
29
+ from secator.output_types.warning import Warning
30
+ from secator.output_types.error import Error
31
+ from secator.output_types.stat import Stat
23
32
 
24
- OUTPUT_TYPES = [Target, Progress, Subdomain, Ip, Port, Url, Tag, Exploit, UserAccount, Vulnerability, Record]
33
+ EXECUTION_TYPES = [
34
+ Target, Progress, Info, Warning, Error
35
+ ]
36
+ STAT_TYPES = [
37
+ Stat
38
+ ]
39
+ FINDING_TYPES = [
40
+ Subdomain, Ip, Port, Url, Tag, Exploit, UserAccount, Vulnerability
41
+ ]
42
+ OUTPUT_TYPES = FINDING_TYPES + EXECUTION_TYPES + STAT_TYPES
@@ -1,6 +1,8 @@
1
1
  import logging
2
2
  import re
3
3
  from dataclasses import _MISSING_TYPE, dataclass, fields
4
+ from secator.definitions import DEBUG
5
+ from secator.rich import console
4
6
 
5
7
  logger = logging.getLogger(__name__)
6
8
 
@@ -66,7 +68,15 @@ class OutputType:
66
68
  if key in output_map:
67
69
  mapped_key = output_map[key]
68
70
  if callable(mapped_key):
69
- mapped_val = mapped_key(item)
71
+ try:
72
+ mapped_val = mapped_key(item)
73
+ except Exception as e:
74
+ mapped_val = None
75
+ if DEBUG > 1:
76
+ console.print_exception(show_locals=True)
77
+ raise TypeError(
78
+ f'Fail to transform value for "{key}" using output_map function. Exception: '
79
+ f'{type(e).__name__}: {str(e)}')
70
80
  else:
71
81
  mapped_val = item.get(mapped_key)
72
82
  new_item[key] = mapped_val
@@ -0,0 +1,36 @@
1
+ from dataclasses import dataclass, field
2
+ import time
3
+ from secator.output_types import OutputType
4
+ from secator.utils import rich_to_ansi, traceback_as_string
5
+
6
+
7
+ @dataclass
8
+ class Error(OutputType):
9
+ message: str
10
+ traceback: str = field(default='', compare=False)
11
+ _source: str = field(default='', repr=True)
12
+ _type: str = field(default='error', repr=True)
13
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
14
+ _uuid: str = field(default='', repr=True, compare=False)
15
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
16
+ _duplicate: bool = field(default=False, repr=True, compare=False)
17
+ _related: list = field(default_factory=list, compare=False)
18
+
19
+ _table_fields = ['message', 'traceback']
20
+ _sort_by = ('_timestamp',)
21
+
22
+ def from_exception(e, **kwargs):
23
+ message = type(e).__name__
24
+ if str(e):
25
+ message += f': {str(e)}'
26
+ return Error(message=message, traceback=traceback_as_string(e), **kwargs)
27
+
28
+ def __str__(self):
29
+ return self.message
30
+
31
+ def __repr__(self):
32
+ s = f'[bold red]❌ {self.message}[/]'
33
+ if self.traceback:
34
+ traceback_pretty = ' ' + self.traceback.replace('\n', '\n ')
35
+ s += f'\n[dim]{traceback_pretty}[/]'
36
+ return rich_to_ansi(s)
@@ -17,7 +17,7 @@ class Exploit(OutputType):
17
17
  tags: list = field(default_factory=list, compare=False)
18
18
  extra_data: dict = field(default_factory=dict, compare=False)
19
19
  _source: str = field(default='', repr=True)
20
- _type: str = field(default='vulnerability', repr=True)
20
+ _type: str = field(default='exploit', repr=True)
21
21
  _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
22
22
  _uuid: str = field(default='', repr=True, compare=False)
23
23
  _context: dict = field(default_factory=dict, repr=True, compare=False)
@@ -0,0 +1,24 @@
1
+ from dataclasses import dataclass, field
2
+ import time
3
+ from secator.output_types import OutputType
4
+ from secator.utils import rich_to_ansi
5
+
6
+
7
+ @dataclass
8
+ class Info(OutputType):
9
+ message: str
10
+ task_id: str = field(default='', compare=False)
11
+ _source: str = field(default='', repr=True)
12
+ _type: str = field(default='info', repr=True)
13
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
14
+ _uuid: str = field(default='', repr=True, compare=False)
15
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
16
+ _duplicate: bool = field(default=False, repr=True, compare=False)
17
+ _related: list = field(default_factory=list, compare=False)
18
+
19
+ _table_fields = ['message', 'task_id']
20
+ _sort_by = ('_timestamp',)
21
+
22
+ def __repr__(self):
23
+ s = f" ℹ️ {self.message}"
24
+ return rich_to_ansi(s)
@@ -1,16 +1,23 @@
1
1
  import time
2
2
  from dataclasses import dataclass, field
3
+ from enum import Enum
3
4
 
4
5
  from secator.definitions import ALIVE, IP
5
6
  from secator.output_types import OutputType
6
7
  from secator.utils import rich_to_ansi
7
8
 
8
9
 
10
+ class IpProtocol(str, Enum):
11
+ IPv6 = 'IPv6'
12
+ IPv4 = 'IPv4'
13
+
14
+
9
15
  @dataclass
10
16
  class Ip(OutputType):
11
17
  ip: str
12
18
  host: str = ''
13
19
  alive: bool = False
20
+ protocol: str = field(default=IpProtocol.IPv4)
14
21
  _source: str = field(default='', repr=True)
15
22
  _type: str = field(default='ip', repr=True)
16
23
  _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
@@ -14,7 +14,9 @@ class Port(OutputType):
14
14
  service_name: str = field(default='', compare=False)
15
15
  cpes: list = field(default_factory=list, compare=False)
16
16
  host: str = field(default='', repr=True, compare=False)
17
+ protocol: str = field(default='tcp', repr=True, compare=False)
17
18
  extra_data: dict = field(default_factory=dict, compare=False)
19
+ confidence: str = field(default='low', repr=False, compare=False)
18
20
  _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
19
21
  _source: str = field(default='', repr=True, compare=False)
20
22
  _type: str = field(default='port', repr=True)
@@ -38,8 +40,13 @@ class Port(OutputType):
38
40
 
39
41
  def __repr__(self) -> str:
40
42
  s = f'🔓 {self.ip}:[bold red]{self.port:<4}[/] [bold yellow]{self.state.upper()}[/]'
43
+ if self.protocol != 'TCP':
44
+ s += f' \[[yellow3]{self.protocol}[/]]'
41
45
  if self.service_name:
42
- s += f' \[[bold purple]{self.service_name}[/]]'
46
+ conf = ''
47
+ if self.confidence == 'low':
48
+ conf = '?'
49
+ s += f' \[[bold purple]{self.service_name}{conf}[/]]'
43
50
  if self.host:
44
51
  s += f' \[[cyan]{self.host}[/]]'
45
52
  return rich_to_ansi(s)