secator 0.1.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of secator might be problematic. Click here for more details.

Files changed (99) hide show
  1. secator/.gitignore +162 -0
  2. secator/__init__.py +0 -0
  3. secator/celery.py +421 -0
  4. secator/cli.py +927 -0
  5. secator/config.py +137 -0
  6. secator/configs/__init__.py +0 -0
  7. secator/configs/profiles/__init__.py +0 -0
  8. secator/configs/profiles/aggressive.yaml +7 -0
  9. secator/configs/profiles/default.yaml +9 -0
  10. secator/configs/profiles/stealth.yaml +7 -0
  11. secator/configs/scans/__init__.py +0 -0
  12. secator/configs/scans/domain.yaml +18 -0
  13. secator/configs/scans/host.yaml +14 -0
  14. secator/configs/scans/network.yaml +17 -0
  15. secator/configs/scans/subdomain.yaml +8 -0
  16. secator/configs/scans/url.yaml +12 -0
  17. secator/configs/workflows/__init__.py +0 -0
  18. secator/configs/workflows/cidr_recon.yaml +28 -0
  19. secator/configs/workflows/code_scan.yaml +11 -0
  20. secator/configs/workflows/host_recon.yaml +41 -0
  21. secator/configs/workflows/port_scan.yaml +34 -0
  22. secator/configs/workflows/subdomain_recon.yaml +33 -0
  23. secator/configs/workflows/url_crawl.yaml +29 -0
  24. secator/configs/workflows/url_dirsearch.yaml +29 -0
  25. secator/configs/workflows/url_fuzz.yaml +35 -0
  26. secator/configs/workflows/url_nuclei.yaml +11 -0
  27. secator/configs/workflows/url_vuln.yaml +55 -0
  28. secator/configs/workflows/user_hunt.yaml +10 -0
  29. secator/configs/workflows/wordpress.yaml +14 -0
  30. secator/decorators.py +346 -0
  31. secator/definitions.py +183 -0
  32. secator/exporters/__init__.py +12 -0
  33. secator/exporters/_base.py +3 -0
  34. secator/exporters/csv.py +29 -0
  35. secator/exporters/gdrive.py +118 -0
  36. secator/exporters/json.py +14 -0
  37. secator/exporters/table.py +7 -0
  38. secator/exporters/txt.py +24 -0
  39. secator/hooks/__init__.py +0 -0
  40. secator/hooks/mongodb.py +212 -0
  41. secator/output_types/__init__.py +24 -0
  42. secator/output_types/_base.py +95 -0
  43. secator/output_types/exploit.py +50 -0
  44. secator/output_types/ip.py +33 -0
  45. secator/output_types/port.py +45 -0
  46. secator/output_types/progress.py +35 -0
  47. secator/output_types/record.py +34 -0
  48. secator/output_types/subdomain.py +42 -0
  49. secator/output_types/tag.py +46 -0
  50. secator/output_types/target.py +30 -0
  51. secator/output_types/url.py +76 -0
  52. secator/output_types/user_account.py +41 -0
  53. secator/output_types/vulnerability.py +97 -0
  54. secator/report.py +95 -0
  55. secator/rich.py +123 -0
  56. secator/runners/__init__.py +12 -0
  57. secator/runners/_base.py +873 -0
  58. secator/runners/_helpers.py +154 -0
  59. secator/runners/command.py +674 -0
  60. secator/runners/scan.py +67 -0
  61. secator/runners/task.py +107 -0
  62. secator/runners/workflow.py +137 -0
  63. secator/serializers/__init__.py +8 -0
  64. secator/serializers/dataclass.py +33 -0
  65. secator/serializers/json.py +15 -0
  66. secator/serializers/regex.py +17 -0
  67. secator/tasks/__init__.py +10 -0
  68. secator/tasks/_categories.py +304 -0
  69. secator/tasks/cariddi.py +102 -0
  70. secator/tasks/dalfox.py +66 -0
  71. secator/tasks/dirsearch.py +88 -0
  72. secator/tasks/dnsx.py +56 -0
  73. secator/tasks/dnsxbrute.py +34 -0
  74. secator/tasks/feroxbuster.py +89 -0
  75. secator/tasks/ffuf.py +85 -0
  76. secator/tasks/fping.py +44 -0
  77. secator/tasks/gau.py +43 -0
  78. secator/tasks/gf.py +34 -0
  79. secator/tasks/gospider.py +71 -0
  80. secator/tasks/grype.py +78 -0
  81. secator/tasks/h8mail.py +80 -0
  82. secator/tasks/httpx.py +104 -0
  83. secator/tasks/katana.py +128 -0
  84. secator/tasks/maigret.py +78 -0
  85. secator/tasks/mapcidr.py +32 -0
  86. secator/tasks/msfconsole.py +176 -0
  87. secator/tasks/naabu.py +52 -0
  88. secator/tasks/nmap.py +341 -0
  89. secator/tasks/nuclei.py +97 -0
  90. secator/tasks/searchsploit.py +53 -0
  91. secator/tasks/subfinder.py +40 -0
  92. secator/tasks/wpscan.py +177 -0
  93. secator/utils.py +404 -0
  94. secator/utils_test.py +183 -0
  95. secator-0.1.0.dist-info/METADATA +379 -0
  96. secator-0.1.0.dist-info/RECORD +99 -0
  97. secator-0.1.0.dist-info/WHEEL +5 -0
  98. secator-0.1.0.dist-info/entry_points.txt +2 -0
  99. secator-0.1.0.dist-info/licenses/LICENSE +60 -0
@@ -0,0 +1,14 @@
1
+ from secator.exporters._base import Exporter
2
+ from secator.rich import console
3
+ from secator.serializers.dataclass import dumps_dataclass
4
+
5
+
6
+ class JsonExporter(Exporter):
7
+ def send(self):
8
+ json_path = f'{self.report.output_folder}/report.json'
9
+
10
+ # Save JSON report to file
11
+ with open(json_path, 'w') as f:
12
+ f.write(dumps_dataclass(self.report.data, indent=2))
13
+
14
+ console.print(f':file_cabinet: Saved JSON report to {json_path}')
@@ -0,0 +1,7 @@
1
+ from secator.exporters._base import Exporter
2
+ from secator.utils import print_results_table
3
+
4
+
5
+ class TableExporter(Exporter):
6
+ def send(self):
7
+ print_results_table(self.report.runner.results, self.report.title)
@@ -0,0 +1,24 @@
1
+ from secator.exporters._base import Exporter
2
+ from secator.rich import console
3
+
4
+
5
+ class TxtExporter(Exporter):
6
+ def send(self):
7
+ results = self.report.data['results']
8
+ txt_paths = []
9
+
10
+ for output_type, items in results.items():
11
+ items = [str(i) for i in items]
12
+ if not items:
13
+ continue
14
+ txt_path = f'{self.report.output_folder}/report.txt'
15
+ with open(txt_path, 'w') as f:
16
+ f.write('\n'.join(items))
17
+ txt_paths.append(txt_path)
18
+
19
+ if len(txt_paths) == 1:
20
+ txt_paths_str = txt_paths[0]
21
+ else:
22
+ txt_paths_str = '\n • ' + '\n • '.join(txt_paths)
23
+
24
+ console.print(f':file_cabinet: Saved TXT reports to {txt_paths_str}')
File without changes
@@ -0,0 +1,212 @@
1
+ import logging
2
+ import os
3
+ import time
4
+
5
+ import pymongo
6
+ from bson.objectid import ObjectId
7
+ from celery import shared_task
8
+
9
+ from secator.definitions import DEFAULT_PROGRESS_UPDATE_FREQUENCY
10
+ from secator.output_types import OUTPUT_TYPES
11
+ from secator.runners import Scan, Task, Workflow
12
+ from secator.utils import debug, escape_mongodb_url
13
+
14
+ # import gevent.monkey
15
+ # gevent.monkey.patch_all()
16
+
17
+ MONGODB_URL = os.environ.get('MONGODB_URL', 'mongodb://localhost')
18
+ MONGODB_UPDATE_FREQUENCY = int(os.environ.get('MONGODB_UPDATE_FREQUENCY', DEFAULT_PROGRESS_UPDATE_FREQUENCY))
19
+ MAX_POOL_SIZE = 100
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ client = pymongo.MongoClient(escape_mongodb_url(MONGODB_URL), maxPoolSize=MAX_POOL_SIZE)
24
+
25
+
26
+ def update_runner(self):
27
+ db = client.main
28
+ type = self.config.type
29
+ collection = f'{type}s'
30
+ update = self.toDict()
31
+ debug_obj = {'type': 'runner', 'name': self.name, 'status': self.status}
32
+ chunk = update.get('chunk')
33
+ _id = self.context.get(f'{type}_chunk_id') if chunk else self.context.get(f'{type}_id')
34
+ debug('update', sub='hooks.mongodb', id=_id, obj=update, obj_after=True, level=4)
35
+ start_time = time.time()
36
+ if _id:
37
+ delta = start_time - self.last_updated if self.last_updated else MONGODB_UPDATE_FREQUENCY
38
+ if self.last_updated and delta < MONGODB_UPDATE_FREQUENCY and self.status == 'RUNNING':
39
+ debug(f'skipped ({delta:>.2f}s < {MONGODB_UPDATE_FREQUENCY}s)',
40
+ sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=3)
41
+ return
42
+ db = client.main
43
+ start_time = time.time()
44
+ db[collection].update_one({'_id': ObjectId(_id)}, {'$set': update})
45
+ end_time = time.time()
46
+ elapsed = end_time - start_time
47
+ debug(
48
+ f'[dim gold4]updated in {elapsed:.4f}s[/]', sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=2)
49
+ self.last_updated = start_time
50
+ else: # sync update and save result to runner object
51
+ runner = db[collection].insert_one(update)
52
+ _id = str(runner.inserted_id)
53
+ if chunk:
54
+ self.context[f'{type}_chunk_id'] = _id
55
+ else:
56
+ self.context[f'{type}_id'] = _id
57
+ end_time = time.time()
58
+ elapsed = end_time - start_time
59
+ debug(f'created in {elapsed:.4f}s', sub='hooks.mongodb', id=_id, obj=debug_obj, obj_after=False, level=2)
60
+
61
+
62
+ def update_finding(self, item):
63
+ start_time = time.time()
64
+ db = client.main
65
+ update = item.toDict()
66
+ _id = ObjectId(item._uuid) if ObjectId.is_valid(item._uuid) else None
67
+ if _id:
68
+ finding = db['findings'].update_one({'_id': _id}, {'$set': update})
69
+ status = 'UPDATED'
70
+ else:
71
+ finding = db['findings'].insert_one(update)
72
+ item._uuid = str(finding.inserted_id)
73
+ status = 'CREATED'
74
+ end_time = time.time()
75
+ elapsed = end_time - start_time
76
+ debug(f'in {elapsed:.4f}s', sub='hooks.mongodb', id=str(item._uuid), obj={'finding': status}, obj_after=False)
77
+ return item
78
+
79
+
80
+ def find_duplicates(self):
81
+ ws_id = self.toDict().get('context', {}).get('workspace_id')
82
+ if not ws_id:
83
+ return
84
+ celery_id = tag_duplicates.delay(ws_id)
85
+ debug(f'running duplicate check on workspace {ws_id}', id=celery_id, sub='hooks.mongodb')
86
+
87
+
88
+ def load_finding(obj):
89
+ finding_type = obj['_type']
90
+ klass = None
91
+ for otype in OUTPUT_TYPES:
92
+ if finding_type == otype.get_name():
93
+ klass = otype
94
+ item = klass.load(obj)
95
+ item._uuid = str(obj['_id'])
96
+ return item
97
+ debug('could not load Secator output type from MongoDB object', obj=obj, sub='hooks.mongodb')
98
+ return None
99
+
100
+
101
+ def load_findings(objs):
102
+ findings = [load_finding(obj) for obj in objs]
103
+ return [f for f in findings if f is not None]
104
+
105
+
106
+ @shared_task
107
+ def tag_duplicates(ws_id: str = None):
108
+ """Tag duplicates in workspace.
109
+
110
+ Args:
111
+ ws_id (str): Workspace id.
112
+ """
113
+ db = client.main
114
+ workspace_query = list(
115
+ db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': True}).sort('_timestamp', -1))
116
+ untagged_query = list(
117
+ db.findings.find({'_context.workspace_id': str(ws_id)}).sort('_timestamp', -1))
118
+ # TODO: use this instead when duplicate removal logic is final
119
+ # untagged_query = list(
120
+ # db.findings.find({'_context.workspace_id': str(ws_id), '_tagged': False}).sort('_timestamp', -1))
121
+ if not untagged_query:
122
+ debug('no untagged findings. Skipping.', id=ws_id, sub='hooks.mongodb')
123
+ return
124
+
125
+ untagged_findings = load_findings(untagged_query)
126
+ workspace_findings = load_findings(workspace_query)
127
+ non_duplicates = []
128
+ duplicates = []
129
+ for item in untagged_findings:
130
+ # If already seen in duplicates
131
+ seen = [f for f in duplicates if f._uuid == item._uuid]
132
+ if seen:
133
+ continue
134
+
135
+ # Check for duplicates
136
+ tmp_duplicates = []
137
+
138
+ # Check if already present in list of workspace_findings findings, list of duplicates, or untagged_findings
139
+ workspace_dupes = [f for f in workspace_findings if f == item and f._uuid != item._uuid]
140
+ untagged_dupes = [f for f in untagged_findings if f == item and f._uuid != item._uuid]
141
+ seen_dupes = [f for f in duplicates if f == item and f._uuid != item._uuid]
142
+ tmp_duplicates.extend(workspace_dupes)
143
+ tmp_duplicates.extend(untagged_dupes)
144
+ tmp_duplicates.extend(seen_dupes)
145
+ debug(
146
+ f'for item {item._uuid}',
147
+ obj={
148
+ 'workspace dupes': len(workspace_dupes),
149
+ 'untagged dupes': len(untagged_dupes),
150
+ 'seen dupes': len(seen_dupes)
151
+ },
152
+ id=ws_id,
153
+ sub='hooks.mongodb')
154
+ tmp_duplicates_ids = list(dict.fromkeys([i._uuid for i in tmp_duplicates]))
155
+ debug(f'duplicate ids: {tmp_duplicates_ids}', id=ws_id, sub='hooks.mongodb')
156
+
157
+ # Update latest object as non-duplicate
158
+ if tmp_duplicates:
159
+ duplicates.extend([f for f in tmp_duplicates])
160
+ db.findings.update_one({'_id': ObjectId(item._uuid)}, {'$set': {'_related': tmp_duplicates_ids}})
161
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb')
162
+ non_duplicates.append(item)
163
+ else:
164
+ debug(f'adding {item._uuid} as non-duplicate', id=ws_id, sub='hooks.mongodb')
165
+ non_duplicates.append(item)
166
+
167
+ # debug(f'found {len(duplicates)} total duplicates')
168
+
169
+ # Update objects with _tagged and _duplicate fields
170
+ duplicates_ids = list(dict.fromkeys([n._uuid for n in duplicates]))
171
+ non_duplicates_ids = list(dict.fromkeys([n._uuid for n in non_duplicates]))
172
+
173
+ search = {'_id': {'$in': [ObjectId(d) for d in duplicates_ids]}}
174
+ update = {'$set': {'_context.workspace_duplicate': True, '_tagged': True}}
175
+ db.findings.update_many(search, update)
176
+
177
+ search = {'_id': {'$in': [ObjectId(d) for d in non_duplicates_ids]}}
178
+ update = {'$set': {'_context.workspace_duplicate': False, '_tagged': True}}
179
+ db.findings.update_many(search, update)
180
+ debug(
181
+ 'completed duplicates check for workspace.',
182
+ id=ws_id,
183
+ obj={
184
+ 'processed': len(untagged_findings),
185
+ 'duplicates': len(duplicates_ids),
186
+ 'non-duplicates': len(non_duplicates_ids)
187
+ },
188
+ sub='hooks.mongodb')
189
+
190
+
191
+ MONGODB_HOOKS = {
192
+ Scan: {
193
+ 'on_start': [update_runner],
194
+ 'on_iter': [update_runner],
195
+ 'on_duplicate': [update_finding],
196
+ 'on_end': [update_runner],
197
+ },
198
+ Workflow: {
199
+ 'on_start': [update_runner],
200
+ 'on_iter': [update_runner],
201
+ 'on_duplicate': [update_finding],
202
+ 'on_end': [update_runner],
203
+ },
204
+ Task: {
205
+ 'on_init': [update_runner],
206
+ 'on_start': [update_runner],
207
+ 'on_item': [update_finding],
208
+ 'on_duplicate': [update_finding],
209
+ 'on_iter': [update_runner],
210
+ 'on_end': [update_runner]
211
+ }
212
+ }
@@ -0,0 +1,24 @@
1
+ __all__ = [
2
+ 'OutputType',
3
+ 'Ip',
4
+ 'Port',
5
+ 'Record',
6
+ 'Subdomain',
7
+ 'Url',
8
+ 'UserAccount',
9
+ 'Vulnerability'
10
+ ]
11
+ from secator.output_types._base import OutputType # noqa: F401
12
+ from secator.output_types.progress import Progress # noqa: F401
13
+ from secator.output_types.ip import Ip
14
+ from secator.output_types.exploit import Exploit
15
+ from secator.output_types.port import Port
16
+ from secator.output_types.subdomain import Subdomain
17
+ from secator.output_types.tag import Tag
18
+ from secator.output_types.target import Target
19
+ from secator.output_types.url import Url
20
+ from secator.output_types.user_account import UserAccount
21
+ from secator.output_types.vulnerability import Vulnerability
22
+ from secator.output_types.record import Record
23
+
24
+ OUTPUT_TYPES = [Target, Progress, Subdomain, Ip, Port, Url, Tag, Exploit, UserAccount, Vulnerability, Record]
@@ -0,0 +1,95 @@
1
+ import logging
2
+ import re
3
+ from dataclasses import _MISSING_TYPE, dataclass, fields
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+
8
+ @dataclass
9
+ class OutputType:
10
+ _table_fields = []
11
+ _sort_by = ()
12
+
13
+ def __gt__(self, other):
14
+ if not self.__eq__(other):
15
+ return False
16
+
17
+ # Point-based system based on number of non-empty extra-data present.
18
+ # In this configuration, a > b if a == b AND a has more non-empty fields than b
19
+ # extra_fields = [f for f in fields(self) if not f.compare]
20
+ # points1 = 0
21
+ # points2 = 0
22
+ # for field in extra_fields:
23
+ # v1 = getattr(self, field.name)
24
+ # v2 = getattr(other, field.name)
25
+ # if v1 and not v2:
26
+ # points1 += 1
27
+ # elif v2 and not v1:
28
+ # points2 += 1
29
+ # if points1 > points2:
30
+ # return True
31
+
32
+ # Timestamp-based system: return newest object
33
+ return self._timestamp > other._timestamp
34
+
35
+ def __ge__(self, other):
36
+ return self == other
37
+
38
+ def __lt__(self, other):
39
+ return other > self
40
+
41
+ def __le__(self, other):
42
+ return self == other
43
+
44
+ def __post_init__(self):
45
+ """Initialize default fields to their proper types."""
46
+ for field in fields(self):
47
+ default_factory = field.default_factory
48
+ default = field.default
49
+ if getattr(self, field.name) is None:
50
+ if not isinstance(default, _MISSING_TYPE):
51
+ setattr(self, field.name, field.default)
52
+ elif not isinstance(default_factory, _MISSING_TYPE):
53
+ setattr(self, field.name, default_factory())
54
+
55
+ @classmethod
56
+ def load(cls, item, output_map={}):
57
+ new_item = {}
58
+
59
+ # Check for explicit _type keys
60
+ _type = item.get('_type')
61
+ if _type and _type != cls.get_name():
62
+ raise TypeError(f'Item has different _type set: {_type}')
63
+
64
+ for field in fields(cls):
65
+ key = field.name
66
+ if key in output_map:
67
+ mapped_key = output_map[key]
68
+ if callable(mapped_key):
69
+ mapped_val = mapped_key(item)
70
+ else:
71
+ mapped_val = item.get(mapped_key)
72
+ new_item[key] = mapped_val
73
+ elif key in item:
74
+ new_item[key] = item[key]
75
+
76
+ # All values None, raise an error
77
+ if all(val is None for val in new_item.values()):
78
+ raise TypeError(f'Item does not match {cls} schema')
79
+
80
+ new_item['_type'] = cls.get_name()
81
+ return cls(**new_item)
82
+
83
+ @classmethod
84
+ def get_name(cls):
85
+ return re.sub(r'(?<!^)(?=[A-Z])', '_', cls.__name__).lower()
86
+
87
+ @classmethod
88
+ def keys(cls):
89
+ return [f.name for f in fields(cls)]
90
+
91
+ def toDict(self, exclude=[]):
92
+ data = self.__dict__.copy()
93
+ if exclude:
94
+ return {k: v for k, v in data.items() if k not in exclude}
95
+ return data
@@ -0,0 +1,50 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+ from secator.output_types import OutputType
4
+ from secator.utils import rich_to_ansi
5
+ from secator.definitions import MATCHED_AT, NAME, ID, EXTRA_DATA, REFERENCE
6
+
7
+
8
+ @dataclass
9
+ class Exploit(OutputType):
10
+ name: str
11
+ id: str
12
+ provider: str
13
+ matched_at: str = ''
14
+ ip: str = ''
15
+ reference: str = ''
16
+ cves: list = field(default_factory=list, compare=False)
17
+ tags: list = field(default_factory=list, compare=False)
18
+ extra_data: dict = field(default_factory=dict, compare=False)
19
+ _source: str = field(default='', repr=True)
20
+ _type: str = field(default='vulnerability', repr=True)
21
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
22
+ _uuid: str = field(default='', repr=True, compare=False)
23
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
24
+ _tagged: bool = field(default=False, repr=True, compare=False)
25
+ _duplicate: bool = field(default=False, repr=True, compare=False)
26
+ _related: list = field(default_factory=list, compare=False)
27
+
28
+ _table_fields = [
29
+ MATCHED_AT,
30
+ NAME,
31
+ ID,
32
+ EXTRA_DATA,
33
+ REFERENCE
34
+ ]
35
+ _sort_by = ('matched_at', 'name')
36
+
37
+ def __repr__(self):
38
+ s = f'[bold red]⍼[/] \[[bold red]{self.name}'
39
+ if self.reference:
40
+ s += f' [link={self.reference}]🡕[/link]'
41
+ s += '[/]]'
42
+ if self.matched_at:
43
+ s += f' {self.matched_at}'
44
+ if self.tags:
45
+ tags_str = ', '.join(self.tags)
46
+ s += f' \[[cyan]{tags_str}[/]]'
47
+ if self.extra_data:
48
+ data = ', '.join([f'{k}:{v}' for k, v in self.extra_data.items()])
49
+ s += f' \[[yellow]{str(data)}[/]]'
50
+ return rich_to_ansi(s)
@@ -0,0 +1,33 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+
4
+ from secator.definitions import ALIVE, IP
5
+ from secator.output_types import OutputType
6
+ from secator.utils import rich_to_ansi
7
+
8
+
9
+ @dataclass
10
+ class Ip(OutputType):
11
+ ip: str
12
+ host: str = ''
13
+ alive: bool = False
14
+ _source: str = field(default='', repr=True)
15
+ _type: str = field(default='ip', repr=True)
16
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
17
+ _uuid: str = field(default='', repr=True, compare=False)
18
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
19
+ _tagged: bool = field(default=False, repr=True, compare=False)
20
+ _duplicate: bool = field(default=False, repr=True, compare=False)
21
+ _related: list = field(default_factory=list, compare=False)
22
+
23
+ _table_fields = [IP, ALIVE]
24
+ _sort_by = (IP,)
25
+
26
+ def __str__(self) -> str:
27
+ return self.ip
28
+
29
+ def __repr__(self) -> str:
30
+ s = f'💻 [bold white]{self.ip}[/]'
31
+ if self.host:
32
+ s += f' \[[bold magenta]{self.host}[/]]'
33
+ return rich_to_ansi(s)
@@ -0,0 +1,45 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+
4
+ from secator.definitions import CPES, EXTRA_DATA, HOST, IP, PORT
5
+ from secator.output_types import OutputType
6
+ from secator.utils import rich_to_ansi
7
+
8
+
9
+ @dataclass
10
+ class Port(OutputType):
11
+ port: int
12
+ ip: str
13
+ state: str = 'UNKNOWN'
14
+ service_name: str = field(default='', compare=False)
15
+ cpes: list = field(default_factory=list, compare=False)
16
+ host: str = field(default='', repr=True, compare=False)
17
+ extra_data: dict = field(default_factory=dict, compare=False)
18
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
19
+ _source: str = field(default='', repr=True, compare=False)
20
+ _type: str = field(default='port', repr=True)
21
+ _uuid: str = field(default='', repr=True, compare=False)
22
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
23
+ _tagged: bool = field(default=False, repr=True, compare=False)
24
+ _duplicate: bool = field(default=False, repr=True, compare=False)
25
+ _related: list = field(default_factory=list, compare=False)
26
+
27
+ _table_fields = [IP, PORT, HOST, CPES, EXTRA_DATA]
28
+ _sort_by = (PORT, IP)
29
+
30
+ def __gt__(self, other):
31
+ # favor nmap over other port detection tools
32
+ if self._source == 'nmap' and other._source != 'nmap':
33
+ return True
34
+ return super().__gt__(other)
35
+
36
+ def __str__(self) -> str:
37
+ return f'{self.host}:{self.port}'
38
+
39
+ def __repr__(self) -> str:
40
+ s = f'🔓 {self.ip}:[bold red]{self.port:<4}[/] [bold yellow]{self.state.upper()}[/]'
41
+ if self.service_name:
42
+ s += f' \[[bold purple]{self.service_name}[/]]'
43
+ if self.host:
44
+ s += f' \[[cyan]{self.host}[/]]'
45
+ return rich_to_ansi(s)
@@ -0,0 +1,35 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+
4
+ from secator.output_types import OutputType
5
+ from secator.utils import rich_to_ansi
6
+
7
+
8
+ @dataclass
9
+ class Progress(OutputType):
10
+ duration: str
11
+ percent: int
12
+ errors: list = field(default_factory=list)
13
+ extra_data: dict = field(default_factory=dict)
14
+ _source: str = field(default='', repr=True)
15
+ _type: str = field(default='progress', repr=True)
16
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
17
+ _uuid: str = field(default='', repr=True, compare=False)
18
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
19
+ _tagged: bool = field(default=False, repr=True, compare=False)
20
+ _duplicate: bool = field(default=False, repr=True, compare=False)
21
+ _related: list = field(default_factory=list, compare=False)
22
+
23
+ _table_fields = ['percent', 'duration']
24
+ _sort_by = ('percent',)
25
+
26
+ def __str__(self) -> str:
27
+ return f'{self.percent}%'
28
+
29
+ def __repr__(self) -> str:
30
+ s = f'[dim]⏳ {self.percent}% ' + '█' * (self.percent // 10) + '[/]'
31
+ if self.errors:
32
+ s += f' [dim red]errors={self.errors}[/]'
33
+ ed = ' '.join([f'{k}={v}' for k, v in self.extra_data.items() if k != 'startedAt' and v])
34
+ s += f' [dim yellow]{ed}[/]'
35
+ return rich_to_ansi(s)
@@ -0,0 +1,34 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+
4
+ from secator.definitions import HOST, NAME, TYPE
5
+ from secator.output_types import OutputType
6
+ from secator.utils import rich_to_ansi
7
+
8
+
9
+ @dataclass
10
+ class Record(OutputType):
11
+ name: str
12
+ type: str
13
+ host: str = ''
14
+ extra_data: dict = field(default_factory=dict, compare=False)
15
+ _source: str = field(default='', repr=True)
16
+ _type: str = field(default='record', repr=True)
17
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
18
+ _uuid: str = field(default='', repr=True, compare=False)
19
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
20
+ _tagged: bool = field(default=False, repr=True, compare=False)
21
+ _duplicate: bool = field(default=False, repr=True, compare=False)
22
+ _related: list = field(default_factory=list, compare=False)
23
+
24
+ _table_fields = [NAME, HOST, TYPE]
25
+ _sort_by = (TYPE, NAME)
26
+
27
+ def __str__(self) -> str:
28
+ return self.name
29
+
30
+ def __repr__(self) -> str:
31
+ s = f'🎤 [bold white]{self.name}[/] \[[green]{self.type}[/]] \[[magenta]{self.host}[/]]'
32
+ if self.extra_data:
33
+ s += ' \[[bold yellow]' + ','.join(f'{k}={v}' for k, v in self.extra_data.items()) + '[/]]'
34
+ return rich_to_ansi(s)
@@ -0,0 +1,42 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+ from typing import List
4
+
5
+ from secator.definitions import DOMAIN, HOST, SOURCES
6
+ from secator.output_types import OutputType
7
+ from secator.utils import rich_to_ansi
8
+
9
+
10
+ @dataclass
11
+ class Subdomain(OutputType):
12
+ host: str
13
+ domain: str
14
+ sources: List[str] = field(default_factory=list, compare=False)
15
+ extra_data: dict = field(default_factory=dict, compare=False)
16
+ _source: str = field(default='', repr=True)
17
+ _type: str = field(default='subdomain', repr=True)
18
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
19
+ _uuid: str = field(default='', repr=True, compare=False)
20
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
21
+ _tagged: bool = field(default=False, repr=True, compare=False)
22
+ _duplicate: bool = field(default=False, repr=True, compare=False)
23
+ _related: list = field(default_factory=list, compare=False)
24
+
25
+ _table_fields = [
26
+ HOST,
27
+ DOMAIN,
28
+ SOURCES
29
+ ]
30
+ _sort_by = (HOST,)
31
+
32
+ def __str__(self):
33
+ return self.host
34
+
35
+ def __repr__(self):
36
+ sources_str = ', '.join([f'[magenta]{source}[/]' for source in self.sources])
37
+ s = f'🏰 [white]{self.host}[/]'
38
+ if sources_str:
39
+ s += f' [{sources_str}]'
40
+ if self.extra_data:
41
+ s += ' \[[bold yellow]' + ', '.join(f'{k}:{v}' for k, v in self.extra_data.items()) + '[/]]'
42
+ return rich_to_ansi(s)
@@ -0,0 +1,46 @@
1
+ import time
2
+ from dataclasses import dataclass, field
3
+
4
+ from secator.output_types import OutputType
5
+ from secator.utils import rich_to_ansi
6
+
7
+
8
+ @dataclass
9
+ class Tag(OutputType):
10
+ name: str
11
+ match: str
12
+ extra_data: dict = field(default_factory=dict, repr=True, compare=False)
13
+ _source: str = field(default='', repr=True)
14
+ _type: str = field(default='tag', repr=True)
15
+ _timestamp: int = field(default_factory=lambda: time.time(), compare=False)
16
+ _uuid: str = field(default='', repr=True, compare=False)
17
+ _context: dict = field(default_factory=dict, repr=True, compare=False)
18
+ _tagged: bool = field(default=False, repr=True, compare=False)
19
+ _duplicate: bool = field(default=False, repr=True, compare=False)
20
+ _related: list = field(default_factory=list, compare=False)
21
+
22
+ _table_fields = ['match', 'name', 'extra_data']
23
+ _sort_by = ('match', 'name')
24
+
25
+ def __post_init__(self):
26
+ super().__post_init__()
27
+
28
+ def __str__(self) -> str:
29
+ return self.match
30
+
31
+ def __repr__(self) -> str:
32
+ s = f'🏷️ [bold magenta]{self.name}[/]'
33
+ s += f' found @ [bold]{self.match}[/]'
34
+ ed = ''
35
+ if self.extra_data:
36
+ for k, v in self.extra_data.items():
37
+ sep = ' '
38
+ if not v:
39
+ continue
40
+ if len(v) >= 80:
41
+ v = v.replace('\n', '\n' + ' ').replace('...TRUNCATED', '\n[italic bold red]...truncated to 1000 chars[/]')
42
+ sep = '\n '
43
+ ed += f'\n [dim red]{k}[/]:{sep}[dim yellow]{v}[/]'
44
+ if ed:
45
+ s += ed
46
+ return rich_to_ansi(s)