secator 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- secator/.gitignore +162 -0
- secator/__init__.py +0 -0
- secator/celery.py +453 -0
- secator/celery_signals.py +138 -0
- secator/celery_utils.py +320 -0
- secator/cli.py +2035 -0
- secator/cli_helper.py +395 -0
- secator/click.py +87 -0
- secator/config.py +670 -0
- secator/configs/__init__.py +0 -0
- secator/configs/profiles/__init__.py +0 -0
- secator/configs/profiles/aggressive.yaml +8 -0
- secator/configs/profiles/all_ports.yaml +7 -0
- secator/configs/profiles/full.yaml +31 -0
- secator/configs/profiles/http_headless.yaml +7 -0
- secator/configs/profiles/http_record.yaml +8 -0
- secator/configs/profiles/insane.yaml +8 -0
- secator/configs/profiles/paranoid.yaml +8 -0
- secator/configs/profiles/passive.yaml +11 -0
- secator/configs/profiles/polite.yaml +8 -0
- secator/configs/profiles/sneaky.yaml +8 -0
- secator/configs/profiles/tor.yaml +5 -0
- secator/configs/scans/__init__.py +0 -0
- secator/configs/scans/domain.yaml +31 -0
- secator/configs/scans/host.yaml +23 -0
- secator/configs/scans/network.yaml +30 -0
- secator/configs/scans/subdomain.yaml +27 -0
- secator/configs/scans/url.yaml +19 -0
- secator/configs/workflows/__init__.py +0 -0
- secator/configs/workflows/cidr_recon.yaml +48 -0
- secator/configs/workflows/code_scan.yaml +29 -0
- secator/configs/workflows/domain_recon.yaml +46 -0
- secator/configs/workflows/host_recon.yaml +95 -0
- secator/configs/workflows/subdomain_recon.yaml +120 -0
- secator/configs/workflows/url_bypass.yaml +15 -0
- secator/configs/workflows/url_crawl.yaml +98 -0
- secator/configs/workflows/url_dirsearch.yaml +62 -0
- secator/configs/workflows/url_fuzz.yaml +68 -0
- secator/configs/workflows/url_params_fuzz.yaml +66 -0
- secator/configs/workflows/url_secrets_hunt.yaml +23 -0
- secator/configs/workflows/url_vuln.yaml +91 -0
- secator/configs/workflows/user_hunt.yaml +29 -0
- secator/configs/workflows/wordpress.yaml +38 -0
- secator/cve.py +718 -0
- secator/decorators.py +7 -0
- secator/definitions.py +168 -0
- secator/exporters/__init__.py +14 -0
- secator/exporters/_base.py +3 -0
- secator/exporters/console.py +10 -0
- secator/exporters/csv.py +37 -0
- secator/exporters/gdrive.py +123 -0
- secator/exporters/json.py +16 -0
- secator/exporters/table.py +36 -0
- secator/exporters/txt.py +28 -0
- secator/hooks/__init__.py +0 -0
- secator/hooks/gcs.py +80 -0
- secator/hooks/mongodb.py +281 -0
- secator/installer.py +694 -0
- secator/loader.py +128 -0
- secator/output_types/__init__.py +49 -0
- secator/output_types/_base.py +108 -0
- secator/output_types/certificate.py +78 -0
- secator/output_types/domain.py +50 -0
- secator/output_types/error.py +42 -0
- secator/output_types/exploit.py +58 -0
- secator/output_types/info.py +24 -0
- secator/output_types/ip.py +47 -0
- secator/output_types/port.py +55 -0
- secator/output_types/progress.py +36 -0
- secator/output_types/record.py +36 -0
- secator/output_types/stat.py +41 -0
- secator/output_types/state.py +29 -0
- secator/output_types/subdomain.py +45 -0
- secator/output_types/tag.py +69 -0
- secator/output_types/target.py +38 -0
- secator/output_types/url.py +112 -0
- secator/output_types/user_account.py +41 -0
- secator/output_types/vulnerability.py +101 -0
- secator/output_types/warning.py +30 -0
- secator/report.py +140 -0
- secator/rich.py +130 -0
- secator/runners/__init__.py +14 -0
- secator/runners/_base.py +1240 -0
- secator/runners/_helpers.py +218 -0
- secator/runners/celery.py +18 -0
- secator/runners/command.py +1178 -0
- secator/runners/python.py +126 -0
- secator/runners/scan.py +87 -0
- secator/runners/task.py +81 -0
- secator/runners/workflow.py +168 -0
- secator/scans/__init__.py +29 -0
- secator/serializers/__init__.py +8 -0
- secator/serializers/dataclass.py +39 -0
- secator/serializers/json.py +45 -0
- secator/serializers/regex.py +25 -0
- secator/tasks/__init__.py +8 -0
- secator/tasks/_categories.py +487 -0
- secator/tasks/arjun.py +113 -0
- secator/tasks/arp.py +53 -0
- secator/tasks/arpscan.py +70 -0
- secator/tasks/bbot.py +372 -0
- secator/tasks/bup.py +118 -0
- secator/tasks/cariddi.py +193 -0
- secator/tasks/dalfox.py +87 -0
- secator/tasks/dirsearch.py +84 -0
- secator/tasks/dnsx.py +186 -0
- secator/tasks/feroxbuster.py +93 -0
- secator/tasks/ffuf.py +135 -0
- secator/tasks/fping.py +85 -0
- secator/tasks/gau.py +102 -0
- secator/tasks/getasn.py +60 -0
- secator/tasks/gf.py +36 -0
- secator/tasks/gitleaks.py +96 -0
- secator/tasks/gospider.py +84 -0
- secator/tasks/grype.py +109 -0
- secator/tasks/h8mail.py +75 -0
- secator/tasks/httpx.py +167 -0
- secator/tasks/jswhois.py +36 -0
- secator/tasks/katana.py +203 -0
- secator/tasks/maigret.py +87 -0
- secator/tasks/mapcidr.py +42 -0
- secator/tasks/msfconsole.py +179 -0
- secator/tasks/naabu.py +85 -0
- secator/tasks/nmap.py +487 -0
- secator/tasks/nuclei.py +151 -0
- secator/tasks/search_vulns.py +225 -0
- secator/tasks/searchsploit.py +109 -0
- secator/tasks/sshaudit.py +299 -0
- secator/tasks/subfinder.py +48 -0
- secator/tasks/testssl.py +283 -0
- secator/tasks/trivy.py +130 -0
- secator/tasks/trufflehog.py +240 -0
- secator/tasks/urlfinder.py +100 -0
- secator/tasks/wafw00f.py +106 -0
- secator/tasks/whois.py +34 -0
- secator/tasks/wpprobe.py +116 -0
- secator/tasks/wpscan.py +202 -0
- secator/tasks/x8.py +94 -0
- secator/tasks/xurlfind3r.py +83 -0
- secator/template.py +294 -0
- secator/thread.py +24 -0
- secator/tree.py +196 -0
- secator/utils.py +922 -0
- secator/utils_test.py +297 -0
- secator/workflows/__init__.py +29 -0
- secator-0.22.0.dist-info/METADATA +447 -0
- secator-0.22.0.dist-info/RECORD +150 -0
- secator-0.22.0.dist-info/WHEEL +4 -0
- secator-0.22.0.dist-info/entry_points.txt +2 -0
- secator-0.22.0.dist-info/licenses/LICENSE +60 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.definitions import HOST, NAME, TYPE
|
|
5
|
+
from secator.output_types import OutputType
|
|
6
|
+
from secator.utils import rich_to_ansi, format_object
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Record(OutputType):
|
|
11
|
+
name: str
|
|
12
|
+
type: str
|
|
13
|
+
host: str = ''
|
|
14
|
+
extra_data: dict = field(default_factory=dict, compare=False)
|
|
15
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
16
|
+
_type: str = field(default='record', repr=True)
|
|
17
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
18
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
19
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
20
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
21
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
22
|
+
_related: list = field(default_factory=list, compare=False)
|
|
23
|
+
|
|
24
|
+
_table_fields = [NAME, HOST, TYPE]
|
|
25
|
+
_sort_by = (TYPE, NAME)
|
|
26
|
+
|
|
27
|
+
def __str__(self) -> str:
|
|
28
|
+
return self.name
|
|
29
|
+
|
|
30
|
+
def __repr__(self) -> str:
|
|
31
|
+
s = rf'🎤 [bold white]{self.name}[/] \[[green]{self.type}[/]]'
|
|
32
|
+
if self.host:
|
|
33
|
+
s += rf' \[[magenta]{self.host}[/]]'
|
|
34
|
+
if self.extra_data:
|
|
35
|
+
s += format_object(self.extra_data, 'yellow')
|
|
36
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.output_types import OutputType
|
|
5
|
+
from secator.utils import rich_to_ansi
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Stat(OutputType):
|
|
10
|
+
name: str
|
|
11
|
+
pid: int
|
|
12
|
+
cpu: int
|
|
13
|
+
memory: int
|
|
14
|
+
memory_limit: int
|
|
15
|
+
net_conns: int = field(default=None, repr=True)
|
|
16
|
+
extra_data: dict = field(default_factory=dict)
|
|
17
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
18
|
+
_type: str = field(default='stat', repr=True)
|
|
19
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
20
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
21
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
22
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
23
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
24
|
+
_related: list = field(default_factory=list, compare=False)
|
|
25
|
+
|
|
26
|
+
_table_fields = ['name', 'pid', 'cpu', 'memory']
|
|
27
|
+
_sort_by = ('name', 'pid')
|
|
28
|
+
|
|
29
|
+
def __str__(self) -> str:
|
|
30
|
+
return f'{self.name} ([bold]pid[/]:{self.pid}) ([bold]cpu[/]:{self.cpu:.2f}%) ([bold]memory[/]:{self.memory:.2f}MB / {self.memory_limit}MB)' # noqa: E501
|
|
31
|
+
|
|
32
|
+
def __repr__(self) -> str:
|
|
33
|
+
s = rf'[dim yellow3]📊 {self.name} ([bold]pid[/]:{self.pid}) ([bold]cpu[/]:{self.cpu:.2f}%)'
|
|
34
|
+
s += rf' ([bold]memory[/]:{self.memory:.2f}MB'
|
|
35
|
+
if self.memory_limit != -1:
|
|
36
|
+
s += rf' / {self.memory_limit}MB'
|
|
37
|
+
s += ')'
|
|
38
|
+
if self.net_conns:
|
|
39
|
+
s += rf' ([bold]connections[/]:{self.net_conns})'
|
|
40
|
+
s += ' [/]'
|
|
41
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.output_types._base import OutputType
|
|
5
|
+
from secator.utils import rich_to_ansi
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class State(OutputType):
|
|
10
|
+
"""Represents the state of a Celery task."""
|
|
11
|
+
|
|
12
|
+
task_id: str
|
|
13
|
+
state: str
|
|
14
|
+
_type: str = field(default='state', repr=True)
|
|
15
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
16
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
17
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
18
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
19
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
20
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
21
|
+
_related: list = field(default_factory=list, compare=False)
|
|
22
|
+
_icon = '📊'
|
|
23
|
+
_color = 'bright_blue'
|
|
24
|
+
|
|
25
|
+
def __str__(self) -> str:
|
|
26
|
+
return f"Task {self.task_id} is {self.state}"
|
|
27
|
+
|
|
28
|
+
def __repr__(self) -> str:
|
|
29
|
+
return rich_to_ansi(f"{self._icon} [bold {self._color}]{self.state}[/] {self.task_id}")
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from secator.definitions import DOMAIN, HOST, SOURCES
|
|
6
|
+
from secator.output_types import OutputType
|
|
7
|
+
from secator.utils import rich_to_ansi, format_object
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class Subdomain(OutputType):
|
|
12
|
+
host: str
|
|
13
|
+
domain: str
|
|
14
|
+
verified: bool = field(default=False, compare=False)
|
|
15
|
+
sources: List[str] = field(default_factory=list, compare=False)
|
|
16
|
+
extra_data: dict = field(default_factory=dict, compare=False)
|
|
17
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
18
|
+
_type: str = field(default='subdomain', repr=True)
|
|
19
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
20
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
21
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
22
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
23
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
24
|
+
_related: list = field(default_factory=list, compare=False)
|
|
25
|
+
|
|
26
|
+
_table_fields = [
|
|
27
|
+
HOST,
|
|
28
|
+
DOMAIN,
|
|
29
|
+
SOURCES
|
|
30
|
+
]
|
|
31
|
+
_sort_by = (HOST,)
|
|
32
|
+
|
|
33
|
+
def __str__(self):
|
|
34
|
+
return self.host
|
|
35
|
+
|
|
36
|
+
def __repr__(self):
|
|
37
|
+
sources_str = ', '.join([f'[magenta]{source}[/]' for source in self.sources])
|
|
38
|
+
s = f'🏰 [white]{self.host}[/]'
|
|
39
|
+
if sources_str:
|
|
40
|
+
s += f' [{sources_str}]'
|
|
41
|
+
if self.extra_data:
|
|
42
|
+
s += format_object(self.extra_data, 'yellow')
|
|
43
|
+
if not self.verified:
|
|
44
|
+
s = f'[dim]{s}[/]'
|
|
45
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.output_types import OutputType
|
|
5
|
+
from secator.utils import rich_to_ansi, trim_string, rich_escape as _s
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Tag(OutputType):
|
|
10
|
+
name: str
|
|
11
|
+
value: str
|
|
12
|
+
match: str
|
|
13
|
+
category: str = field(default='general')
|
|
14
|
+
extra_data: dict = field(default_factory=dict, repr=True, compare=False)
|
|
15
|
+
stored_response_path: str = field(default='', compare=False)
|
|
16
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
17
|
+
_type: str = field(default='tag', repr=True)
|
|
18
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
19
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
20
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
21
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
22
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
23
|
+
_related: list = field(default_factory=list, compare=False)
|
|
24
|
+
|
|
25
|
+
_table_fields = ['match', 'category', 'name', 'extra_data']
|
|
26
|
+
_sort_by = ('match', 'name')
|
|
27
|
+
|
|
28
|
+
def __post_init__(self):
|
|
29
|
+
super().__post_init__()
|
|
30
|
+
|
|
31
|
+
def __str__(self) -> str:
|
|
32
|
+
return self.match
|
|
33
|
+
|
|
34
|
+
def __repr__(self) -> str:
|
|
35
|
+
content = self.value
|
|
36
|
+
s = rf'🏷️ \[[bold yellow]{self.category}[/]] [bold magenta]{self.name}[/]'
|
|
37
|
+
small_content = False
|
|
38
|
+
if len(content) < 100:
|
|
39
|
+
small_content = True
|
|
40
|
+
# content_xs = trim_string(content, max_length=50).replace('\n', '/')
|
|
41
|
+
if small_content:
|
|
42
|
+
s += f' [bold orange4]{content}[/]'
|
|
43
|
+
s += f' found @ [bold]{_s(self.match)}[/]'
|
|
44
|
+
ed = ''
|
|
45
|
+
if self.stored_response_path:
|
|
46
|
+
s += rf' [link=file://{self.stored_response_path}]:incoming_envelope:[/]'
|
|
47
|
+
if not small_content:
|
|
48
|
+
sep = ' '
|
|
49
|
+
content = trim_string(content, max_length=1000)
|
|
50
|
+
content = content.replace('\n', '\n ')
|
|
51
|
+
sep = '\n '
|
|
52
|
+
ed += f'\n [bold red]value[/]:{sep}[yellow]{_s(content)}[/]'
|
|
53
|
+
if self.extra_data:
|
|
54
|
+
for k, v in self.extra_data.items():
|
|
55
|
+
sep = ' '
|
|
56
|
+
if not v:
|
|
57
|
+
continue
|
|
58
|
+
if isinstance(v, str):
|
|
59
|
+
v = trim_string(v, max_length=1000)
|
|
60
|
+
if len(v) > 1000:
|
|
61
|
+
v = v.replace('\n', '\n' + sep)
|
|
62
|
+
sep = '\n '
|
|
63
|
+
if k == 'content' and not small_content:
|
|
64
|
+
ed += f'\n [bold red]{_s(k)}[/]:{sep}[yellow]{_s(v)}[/]'
|
|
65
|
+
else:
|
|
66
|
+
ed += f'\n [dim red]{_s(k)}[/]:{sep}[dim yellow]{_s(v)}[/]'
|
|
67
|
+
if ed:
|
|
68
|
+
s += ed
|
|
69
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.output_types import OutputType
|
|
5
|
+
from secator.utils import autodetect_type, rich_to_ansi, rich_escape as _s
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Target(OutputType):
|
|
10
|
+
name: str
|
|
11
|
+
type: str = ''
|
|
12
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
13
|
+
_type: str = field(default='target', repr=True)
|
|
14
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
15
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
16
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
17
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
18
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
19
|
+
_related: list = field(default_factory=list, compare=False)
|
|
20
|
+
|
|
21
|
+
_table_fields = [
|
|
22
|
+
'name',
|
|
23
|
+
'type',
|
|
24
|
+
]
|
|
25
|
+
_sort_by = ('type', 'name')
|
|
26
|
+
|
|
27
|
+
def __post_init__(self):
|
|
28
|
+
if not self.type:
|
|
29
|
+
self.type = autodetect_type(self.name)
|
|
30
|
+
|
|
31
|
+
def __str__(self):
|
|
32
|
+
return self.name
|
|
33
|
+
|
|
34
|
+
def __repr__(self):
|
|
35
|
+
s = f'🎯 {_s(self.name)}'
|
|
36
|
+
if self.type:
|
|
37
|
+
s += f' ({self.type})'
|
|
38
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import time
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
|
|
7
|
+
from secator.definitions import (CONTENT_LENGTH, CONTENT_TYPE, STATUS_CODE,
|
|
8
|
+
TECH, TITLE, URL, WEBSERVER, METHOD)
|
|
9
|
+
from secator.output_types import OutputType
|
|
10
|
+
from secator.utils import rich_to_ansi, trim_string, format_object, rich_escape as _s
|
|
11
|
+
from secator.config import CONFIG
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class Url(OutputType):
|
|
16
|
+
url: str
|
|
17
|
+
host: str = field(default='', compare=False)
|
|
18
|
+
verified: bool = field(default=False, compare=False)
|
|
19
|
+
status_code: int = field(default=0, compare=False)
|
|
20
|
+
title: str = field(default='', compare=False)
|
|
21
|
+
webserver: str = field(default='', compare=False)
|
|
22
|
+
tech: list = field(default_factory=list, compare=False)
|
|
23
|
+
content_type: str = field(default='', compare=False)
|
|
24
|
+
content_length: int = field(default=0, compare=False)
|
|
25
|
+
time: str = field(default='', compare=False)
|
|
26
|
+
method: str = field(default='', compare=False)
|
|
27
|
+
words: int = field(default=0, compare=False)
|
|
28
|
+
lines: int = field(default=0, compare=False)
|
|
29
|
+
screenshot_path: str = field(default='', compare=False)
|
|
30
|
+
stored_response_path: str = field(default='', compare=False)
|
|
31
|
+
response_headers: dict = field(default_factory=dict, repr=True, compare=False)
|
|
32
|
+
request_headers: dict = field(default_factory=dict, repr=True, compare=False)
|
|
33
|
+
is_directory: dict = field(default='', compare=False)
|
|
34
|
+
extra_data: dict = field(default_factory=dict, compare=False)
|
|
35
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
36
|
+
_type: str = field(default='url', repr=True)
|
|
37
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
38
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
39
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
40
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
41
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
42
|
+
_related: list = field(default_factory=list, compare=False)
|
|
43
|
+
|
|
44
|
+
_table_fields = [
|
|
45
|
+
URL,
|
|
46
|
+
METHOD,
|
|
47
|
+
STATUS_CODE,
|
|
48
|
+
TITLE,
|
|
49
|
+
WEBSERVER,
|
|
50
|
+
TECH,
|
|
51
|
+
CONTENT_TYPE,
|
|
52
|
+
CONTENT_LENGTH,
|
|
53
|
+
'stored_response_path',
|
|
54
|
+
'screenshot_path',
|
|
55
|
+
]
|
|
56
|
+
_sort_by = (URL,)
|
|
57
|
+
|
|
58
|
+
def __post_init__(self):
|
|
59
|
+
super().__post_init__()
|
|
60
|
+
if not self.host:
|
|
61
|
+
self.host = urlparse(self.url).hostname
|
|
62
|
+
if self.status_code != 0:
|
|
63
|
+
self.verified = True
|
|
64
|
+
if self.title and 'Index of' in self.title:
|
|
65
|
+
self.is_directory = True
|
|
66
|
+
|
|
67
|
+
def __gt__(self, other):
|
|
68
|
+
# favor httpx over other url info tools
|
|
69
|
+
if self._source == 'httpx' and other._source != 'httpx':
|
|
70
|
+
return True
|
|
71
|
+
return super().__gt__(other)
|
|
72
|
+
|
|
73
|
+
def __str__(self):
|
|
74
|
+
return self.url
|
|
75
|
+
|
|
76
|
+
def __repr__(self):
|
|
77
|
+
s = f'🔗 [white]{_s(self.url)}'
|
|
78
|
+
if self.method and self.method != 'GET':
|
|
79
|
+
s += rf' \[[turquoise4]{self.method}[/]]'
|
|
80
|
+
if self.request_headers:
|
|
81
|
+
s += rf'{format_object(self.request_headers, "gold3", skip_keys=["user_agent"])}'
|
|
82
|
+
if self.status_code and self.status_code != 0:
|
|
83
|
+
if self.status_code < 400:
|
|
84
|
+
s += rf' \[[green]{self.status_code}[/]]'
|
|
85
|
+
else:
|
|
86
|
+
s += rf' \[[red]{self.status_code}[/]]'
|
|
87
|
+
if self.title:
|
|
88
|
+
s += rf' \[[spring_green3]{trim_string(self.title)}[/]]'
|
|
89
|
+
if self.is_directory:
|
|
90
|
+
s += r' \[[bold gold3]directory[/]]'
|
|
91
|
+
if self.webserver:
|
|
92
|
+
s += rf' \[[bold magenta]{_s(self.webserver)}[/]]'
|
|
93
|
+
if self.tech:
|
|
94
|
+
techs_str = ', '.join([f'[magenta]{_s(tech)}[/]' for tech in self.tech])
|
|
95
|
+
s += f' [{techs_str}]'
|
|
96
|
+
if self.content_type:
|
|
97
|
+
s += rf' \[[magenta]{_s(self.content_type)}[/]]'
|
|
98
|
+
if self.content_length:
|
|
99
|
+
cl = str(self.content_length)
|
|
100
|
+
cl += '[bold red]+[/]' if self.content_length == CONFIG.http.response_max_size_bytes else ''
|
|
101
|
+
s += rf' \[[magenta]{cl}[/]]'
|
|
102
|
+
if self.response_headers and CONFIG.cli.show_http_response_headers:
|
|
103
|
+
s += rf'{format_object(self.response_headers, "magenta", skip_keys=CONFIG.cli.exclude_http_response_headers)}' # noqa: E501
|
|
104
|
+
if self.extra_data:
|
|
105
|
+
s += format_object(self.extra_data, 'yellow')
|
|
106
|
+
if self.screenshot_path:
|
|
107
|
+
s += rf' [link=file://{self.screenshot_path}]:camera:[/]'
|
|
108
|
+
if self.stored_response_path:
|
|
109
|
+
s += rf' [link=file://{self.stored_response_path}]:pencil:[/]'
|
|
110
|
+
if not self.verified:
|
|
111
|
+
s = f'[dim]{s}[/]'
|
|
112
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
|
|
4
|
+
from secator.definitions import SITE_NAME, URL, USERNAME
|
|
5
|
+
from secator.output_types import OutputType
|
|
6
|
+
from secator.utils import rich_to_ansi, rich_escape as _s, format_object
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class UserAccount(OutputType):
|
|
11
|
+
username: str
|
|
12
|
+
url: str = ''
|
|
13
|
+
email: str = ''
|
|
14
|
+
site_name: str = ''
|
|
15
|
+
extra_data: dict = field(default_factory=dict, compare=False)
|
|
16
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
17
|
+
_type: str = field(default='user_account', repr=True)
|
|
18
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
19
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
20
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
21
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
22
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
23
|
+
_related: list = field(default_factory=list, compare=False)
|
|
24
|
+
|
|
25
|
+
_table_fields = [SITE_NAME, USERNAME, URL]
|
|
26
|
+
_sort_by = (URL, USERNAME)
|
|
27
|
+
|
|
28
|
+
def __str__(self) -> str:
|
|
29
|
+
return self.url
|
|
30
|
+
|
|
31
|
+
def __repr__(self) -> str:
|
|
32
|
+
s = f'👤 [green]{_s(self.username)}[/]'
|
|
33
|
+
if self.email:
|
|
34
|
+
s += rf' \[[bold yellow]{_s(self.email)}[/]]'
|
|
35
|
+
if self.site_name:
|
|
36
|
+
s += rf' \[[bold blue]{self.site_name}[/]]'
|
|
37
|
+
if self.url:
|
|
38
|
+
s += rf' \[[white]{_s(self.url)}[/]]'
|
|
39
|
+
if self.extra_data:
|
|
40
|
+
s += format_object(self.extra_data, 'yellow')
|
|
41
|
+
return rich_to_ansi(s)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from secator.definitions import (CONFIDENCE, CVSS_SCORE, EXTRA_DATA, ID,
|
|
6
|
+
MATCHED_AT, NAME, REFERENCE, SEVERITY, TAGS)
|
|
7
|
+
from secator.output_types import OutputType
|
|
8
|
+
from secator.utils import rich_to_ansi, rich_escape as _s, format_object
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class Vulnerability(OutputType):
|
|
13
|
+
name: str
|
|
14
|
+
provider: str = ''
|
|
15
|
+
id: str = ''
|
|
16
|
+
matched_at: str = ''
|
|
17
|
+
ip: str = field(default='', compare=False)
|
|
18
|
+
confidence: str = 'low'
|
|
19
|
+
severity: str = 'unknown'
|
|
20
|
+
cvss_score: float = 0
|
|
21
|
+
cvss_vec: str = ''
|
|
22
|
+
epss_score: float = 0
|
|
23
|
+
tags: List[str] = field(default_factory=list, compare=False)
|
|
24
|
+
extra_data: dict = field(default_factory=dict, compare=False)
|
|
25
|
+
description: str = field(default='', compare=False)
|
|
26
|
+
references: List[str] = field(default_factory=list, compare=False)
|
|
27
|
+
reference: str = field(default='', compare=False)
|
|
28
|
+
confidence_nb: int = 0
|
|
29
|
+
severity_nb: int = 0
|
|
30
|
+
_source: str = field(default='', repr=True, compare=False)
|
|
31
|
+
_type: str = field(default='vulnerability', repr=True)
|
|
32
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
33
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
34
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
35
|
+
_tagged: bool = field(default=False, repr=True, compare=False)
|
|
36
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
37
|
+
_related: list = field(default_factory=list, compare=False)
|
|
38
|
+
|
|
39
|
+
_table_fields = [
|
|
40
|
+
MATCHED_AT,
|
|
41
|
+
SEVERITY,
|
|
42
|
+
CONFIDENCE,
|
|
43
|
+
NAME,
|
|
44
|
+
ID,
|
|
45
|
+
CVSS_SCORE,
|
|
46
|
+
TAGS,
|
|
47
|
+
EXTRA_DATA,
|
|
48
|
+
REFERENCE
|
|
49
|
+
]
|
|
50
|
+
_sort_by = ('confidence_nb', 'severity_nb', 'matched_at', 'cvss_score')
|
|
51
|
+
|
|
52
|
+
def __post_init__(self):
|
|
53
|
+
super().__post_init__()
|
|
54
|
+
severity_map = {
|
|
55
|
+
'critical': 0,
|
|
56
|
+
'high': 1,
|
|
57
|
+
'medium': 2,
|
|
58
|
+
'low': 3,
|
|
59
|
+
'info': 4,
|
|
60
|
+
'unknown': 5,
|
|
61
|
+
None: 6
|
|
62
|
+
}
|
|
63
|
+
self.severity = self.severity.lower() # normalize severity
|
|
64
|
+
self.severity_nb = severity_map.get(self.severity, 6)
|
|
65
|
+
self.confidence_nb = severity_map[self.confidence]
|
|
66
|
+
if len(self.references) > 0:
|
|
67
|
+
self.reference = self.references[0]
|
|
68
|
+
|
|
69
|
+
def __repr__(self):
|
|
70
|
+
data = self.extra_data
|
|
71
|
+
|
|
72
|
+
# TODO: review this
|
|
73
|
+
if 'data' in data and isinstance(data['data'], list):
|
|
74
|
+
data = data['data']
|
|
75
|
+
|
|
76
|
+
tags = self.tags
|
|
77
|
+
colors = {
|
|
78
|
+
'critical': 'bold red',
|
|
79
|
+
'high': 'red',
|
|
80
|
+
'medium': 'yellow',
|
|
81
|
+
'low': 'green',
|
|
82
|
+
'info': 'magenta',
|
|
83
|
+
'unknown': 'dim magenta'
|
|
84
|
+
}
|
|
85
|
+
c = colors.get(self.severity, 'dim magenta')
|
|
86
|
+
name = self.name
|
|
87
|
+
if self.reference:
|
|
88
|
+
name += rf' [link={_s(self.reference)}]🡕[/link]'
|
|
89
|
+
s = rf'🚨 \[[green]{name}[/]]'
|
|
90
|
+
s += rf' \[[{c}]{self.severity}[/]] {_s(self.matched_at)}' # noqa: E501
|
|
91
|
+
if tags:
|
|
92
|
+
tags_str = ','.join(tags)
|
|
93
|
+
s += rf' \[[cyan]{_s(tags_str)}[/]]'
|
|
94
|
+
if data:
|
|
95
|
+
s += format_object(data, 'yellow')
|
|
96
|
+
if self.confidence == 'low':
|
|
97
|
+
s = f'[dim]{s}[/]'
|
|
98
|
+
return rich_to_ansi(s)
|
|
99
|
+
|
|
100
|
+
def __str__(self):
|
|
101
|
+
return self.matched_at + ' -> ' + self.name
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
import time
|
|
3
|
+
from secator.output_types import OutputType
|
|
4
|
+
from secator.utils import strip_rich_markup, rich_to_ansi
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class Warning(OutputType):
|
|
9
|
+
message: str
|
|
10
|
+
message_color: str = field(default='', compare=False)
|
|
11
|
+
task_id: str = field(default='', compare=False)
|
|
12
|
+
_source: str = field(default='', repr=True)
|
|
13
|
+
_type: str = field(default='warning', repr=True)
|
|
14
|
+
_timestamp: int = field(default_factory=lambda: time.time(), compare=False)
|
|
15
|
+
_uuid: str = field(default='', repr=True, compare=False)
|
|
16
|
+
_context: dict = field(default_factory=dict, repr=True, compare=False)
|
|
17
|
+
_duplicate: bool = field(default=False, repr=True, compare=False)
|
|
18
|
+
_related: list = field(default_factory=list, compare=False)
|
|
19
|
+
|
|
20
|
+
_table_fields = ['task_name', 'message']
|
|
21
|
+
_sort_by = ('_timestamp',)
|
|
22
|
+
|
|
23
|
+
def __post_init__(self):
|
|
24
|
+
super().__post_init__()
|
|
25
|
+
self.message_color = self.message
|
|
26
|
+
self.message = strip_rich_markup(self.message)
|
|
27
|
+
|
|
28
|
+
def __repr__(self):
|
|
29
|
+
s = rf"\[[yellow]WRN[/]] {self.message_color}"
|
|
30
|
+
return rich_to_ansi(s)
|
secator/report.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import operator
|
|
2
|
+
|
|
3
|
+
from secator.config import CONFIG
|
|
4
|
+
from secator.output_types import FINDING_TYPES, OutputType
|
|
5
|
+
from secator.utils import get_file_timestamp, traceback_as_string
|
|
6
|
+
from secator.rich import console
|
|
7
|
+
from secator.runners._helpers import extract_from_results
|
|
8
|
+
|
|
9
|
+
import concurrent.futures
|
|
10
|
+
from threading import Lock
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def remove_duplicates(objects):
|
|
14
|
+
unique_objects = []
|
|
15
|
+
lock = Lock()
|
|
16
|
+
|
|
17
|
+
def add_if_unique(obj):
|
|
18
|
+
nonlocal unique_objects # noqa: F824
|
|
19
|
+
with lock:
|
|
20
|
+
# Perform linear search to check for duplicates
|
|
21
|
+
if all(obj != existing_obj for existing_obj in unique_objects):
|
|
22
|
+
unique_objects.append(obj)
|
|
23
|
+
|
|
24
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
|
|
25
|
+
# Execute the function concurrently for each object
|
|
26
|
+
executor.map(add_if_unique, objects)
|
|
27
|
+
|
|
28
|
+
return unique_objects
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# TODO: initialize from data, not from runner
|
|
32
|
+
class Report:
|
|
33
|
+
"""Report class.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
runner (secator.runners.Runner): Runner instance.
|
|
37
|
+
title (str): Report title.
|
|
38
|
+
exporters (list): List of exporter classes.
|
|
39
|
+
"""
|
|
40
|
+
def __init__(self, runner, title=None, exporters=[]):
|
|
41
|
+
self.title = title or f'{runner.config.type}_{runner.config.name}'
|
|
42
|
+
self.runner = runner
|
|
43
|
+
self.timestamp = get_file_timestamp()
|
|
44
|
+
self.exporters = exporters
|
|
45
|
+
self.workspace_name = runner.workspace_name
|
|
46
|
+
self.output_folder = runner.reports_folder
|
|
47
|
+
|
|
48
|
+
def send(self):
|
|
49
|
+
for report_cls in self.exporters:
|
|
50
|
+
try:
|
|
51
|
+
report_cls(self).send()
|
|
52
|
+
except Exception as e:
|
|
53
|
+
console.print(
|
|
54
|
+
f'[bold red]Could not create exporter {report_cls.__name__} for {self.__class__.__name__}: '
|
|
55
|
+
f'{str(e)}[/]\n[dim]{traceback_as_string(e)}[/]',
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def build(self, extractors=[], dedupe=CONFIG.runners.remove_duplicates):
|
|
59
|
+
# Prepare report structure
|
|
60
|
+
runner_fields = {
|
|
61
|
+
'name',
|
|
62
|
+
'status',
|
|
63
|
+
'targets',
|
|
64
|
+
'start_time',
|
|
65
|
+
'end_time',
|
|
66
|
+
'elapsed',
|
|
67
|
+
'elapsed_human',
|
|
68
|
+
'run_opts',
|
|
69
|
+
'results_count'
|
|
70
|
+
}
|
|
71
|
+
data = {
|
|
72
|
+
'info': {k: v for k, v in self.runner.toDict().items() if k in runner_fields},
|
|
73
|
+
'results': {}
|
|
74
|
+
}
|
|
75
|
+
if 'results' in data['info']:
|
|
76
|
+
del data['info']['results']
|
|
77
|
+
data['info']['title'] = self.title
|
|
78
|
+
data['info']['errors'] = self.runner.errors
|
|
79
|
+
|
|
80
|
+
# Fill report
|
|
81
|
+
for output_type in FINDING_TYPES:
|
|
82
|
+
output_name = output_type.get_name()
|
|
83
|
+
sort_by, _ = get_table_fields(output_type)
|
|
84
|
+
items = [
|
|
85
|
+
item for item in self.runner.results
|
|
86
|
+
if isinstance(item, OutputType) and item._type == output_name
|
|
87
|
+
]
|
|
88
|
+
if items:
|
|
89
|
+
if sort_by and all(sort_by):
|
|
90
|
+
try:
|
|
91
|
+
items = sorted(items, key=operator.attrgetter(*sort_by))
|
|
92
|
+
except TypeError as e:
|
|
93
|
+
console.print(f'[bold red]Could not sort {output_name} by {sort_by}: {str(e)}[/]')
|
|
94
|
+
console.print(f'[dim]{traceback_as_string(e)}[/]')
|
|
95
|
+
if dedupe:
|
|
96
|
+
items = remove_duplicates(items)
|
|
97
|
+
if extractors:
|
|
98
|
+
all_res = []
|
|
99
|
+
extractors_type = [extractor for extractor in extractors if extractor.get('type') == output_name]
|
|
100
|
+
for extractor in extractors_type:
|
|
101
|
+
op = extractor.get('op', 'or')
|
|
102
|
+
res, errors = extract_from_results(items, extractors=[extractor])
|
|
103
|
+
# console.print(f'{extractor} --> {len(res)} results')
|
|
104
|
+
if not res:
|
|
105
|
+
continue
|
|
106
|
+
if errors:
|
|
107
|
+
data['info']['errors'] = errors
|
|
108
|
+
if res:
|
|
109
|
+
if op == 'or':
|
|
110
|
+
all_res = all_res + res
|
|
111
|
+
else:
|
|
112
|
+
if not all_res:
|
|
113
|
+
all_res = res
|
|
114
|
+
else:
|
|
115
|
+
all_res = [item for item in res if item in all_res]
|
|
116
|
+
items = remove_duplicates(all_res) if dedupe else all_res
|
|
117
|
+
data['results'][output_name] = items
|
|
118
|
+
|
|
119
|
+
# Save data
|
|
120
|
+
self.data = data
|
|
121
|
+
|
|
122
|
+
def is_empty(self):
|
|
123
|
+
return all(not items for items in self.data['results'].values())
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def get_table_fields(output_type):
|
|
127
|
+
"""Get output fields and sort fields based on output type.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
output_type (str): Output type.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
tuple: Tuple of sort_by (tuple), output_fields (list).
|
|
134
|
+
"""
|
|
135
|
+
sort_by = ()
|
|
136
|
+
output_fields = []
|
|
137
|
+
if output_type in FINDING_TYPES:
|
|
138
|
+
sort_by = output_type._sort_by
|
|
139
|
+
output_fields = output_type._table_fields
|
|
140
|
+
return sort_by, output_fields
|