aiptx 2.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (187) hide show
  1. aipt_v2/__init__.py +110 -0
  2. aipt_v2/__main__.py +24 -0
  3. aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
  4. aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
  5. aipt_v2/agents/__init__.py +46 -0
  6. aipt_v2/agents/base.py +520 -0
  7. aipt_v2/agents/exploit_agent.py +688 -0
  8. aipt_v2/agents/ptt.py +406 -0
  9. aipt_v2/agents/state.py +168 -0
  10. aipt_v2/app.py +957 -0
  11. aipt_v2/browser/__init__.py +31 -0
  12. aipt_v2/browser/automation.py +458 -0
  13. aipt_v2/browser/crawler.py +453 -0
  14. aipt_v2/cli.py +2933 -0
  15. aipt_v2/compliance/__init__.py +71 -0
  16. aipt_v2/compliance/compliance_report.py +449 -0
  17. aipt_v2/compliance/framework_mapper.py +424 -0
  18. aipt_v2/compliance/nist_mapping.py +345 -0
  19. aipt_v2/compliance/owasp_mapping.py +330 -0
  20. aipt_v2/compliance/pci_mapping.py +297 -0
  21. aipt_v2/config.py +341 -0
  22. aipt_v2/core/__init__.py +43 -0
  23. aipt_v2/core/agent.py +630 -0
  24. aipt_v2/core/llm.py +395 -0
  25. aipt_v2/core/memory.py +305 -0
  26. aipt_v2/core/ptt.py +329 -0
  27. aipt_v2/database/__init__.py +14 -0
  28. aipt_v2/database/models.py +232 -0
  29. aipt_v2/database/repository.py +384 -0
  30. aipt_v2/docker/__init__.py +23 -0
  31. aipt_v2/docker/builder.py +260 -0
  32. aipt_v2/docker/manager.py +222 -0
  33. aipt_v2/docker/sandbox.py +371 -0
  34. aipt_v2/evasion/__init__.py +58 -0
  35. aipt_v2/evasion/request_obfuscator.py +272 -0
  36. aipt_v2/evasion/tls_fingerprint.py +285 -0
  37. aipt_v2/evasion/ua_rotator.py +301 -0
  38. aipt_v2/evasion/waf_bypass.py +439 -0
  39. aipt_v2/execution/__init__.py +23 -0
  40. aipt_v2/execution/executor.py +302 -0
  41. aipt_v2/execution/parser.py +544 -0
  42. aipt_v2/execution/terminal.py +337 -0
  43. aipt_v2/health.py +437 -0
  44. aipt_v2/intelligence/__init__.py +194 -0
  45. aipt_v2/intelligence/adaptation.py +474 -0
  46. aipt_v2/intelligence/auth.py +520 -0
  47. aipt_v2/intelligence/chaining.py +775 -0
  48. aipt_v2/intelligence/correlation.py +536 -0
  49. aipt_v2/intelligence/cve_aipt.py +334 -0
  50. aipt_v2/intelligence/cve_info.py +1111 -0
  51. aipt_v2/intelligence/knowledge_graph.py +590 -0
  52. aipt_v2/intelligence/learning.py +626 -0
  53. aipt_v2/intelligence/llm_analyzer.py +502 -0
  54. aipt_v2/intelligence/llm_tool_selector.py +518 -0
  55. aipt_v2/intelligence/payload_generator.py +562 -0
  56. aipt_v2/intelligence/rag.py +239 -0
  57. aipt_v2/intelligence/scope.py +442 -0
  58. aipt_v2/intelligence/searchers/__init__.py +5 -0
  59. aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
  60. aipt_v2/intelligence/searchers/github_searcher.py +467 -0
  61. aipt_v2/intelligence/searchers/google_searcher.py +281 -0
  62. aipt_v2/intelligence/tools.json +443 -0
  63. aipt_v2/intelligence/triage.py +670 -0
  64. aipt_v2/interactive_shell.py +559 -0
  65. aipt_v2/interface/__init__.py +5 -0
  66. aipt_v2/interface/cli.py +230 -0
  67. aipt_v2/interface/main.py +501 -0
  68. aipt_v2/interface/tui.py +1276 -0
  69. aipt_v2/interface/utils.py +583 -0
  70. aipt_v2/llm/__init__.py +39 -0
  71. aipt_v2/llm/config.py +26 -0
  72. aipt_v2/llm/llm.py +514 -0
  73. aipt_v2/llm/memory.py +214 -0
  74. aipt_v2/llm/request_queue.py +89 -0
  75. aipt_v2/llm/utils.py +89 -0
  76. aipt_v2/local_tool_installer.py +1467 -0
  77. aipt_v2/models/__init__.py +15 -0
  78. aipt_v2/models/findings.py +295 -0
  79. aipt_v2/models/phase_result.py +224 -0
  80. aipt_v2/models/scan_config.py +207 -0
  81. aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
  82. aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
  83. aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
  84. aipt_v2/monitoring/prometheus.yml +60 -0
  85. aipt_v2/orchestration/__init__.py +52 -0
  86. aipt_v2/orchestration/pipeline.py +398 -0
  87. aipt_v2/orchestration/progress.py +300 -0
  88. aipt_v2/orchestration/scheduler.py +296 -0
  89. aipt_v2/orchestrator.py +2427 -0
  90. aipt_v2/payloads/__init__.py +27 -0
  91. aipt_v2/payloads/cmdi.py +150 -0
  92. aipt_v2/payloads/sqli.py +263 -0
  93. aipt_v2/payloads/ssrf.py +204 -0
  94. aipt_v2/payloads/templates.py +222 -0
  95. aipt_v2/payloads/traversal.py +166 -0
  96. aipt_v2/payloads/xss.py +204 -0
  97. aipt_v2/prompts/__init__.py +60 -0
  98. aipt_v2/proxy/__init__.py +29 -0
  99. aipt_v2/proxy/history.py +352 -0
  100. aipt_v2/proxy/interceptor.py +452 -0
  101. aipt_v2/recon/__init__.py +44 -0
  102. aipt_v2/recon/dns.py +241 -0
  103. aipt_v2/recon/osint.py +367 -0
  104. aipt_v2/recon/subdomain.py +372 -0
  105. aipt_v2/recon/tech_detect.py +311 -0
  106. aipt_v2/reports/__init__.py +17 -0
  107. aipt_v2/reports/generator.py +313 -0
  108. aipt_v2/reports/html_report.py +378 -0
  109. aipt_v2/runtime/__init__.py +53 -0
  110. aipt_v2/runtime/base.py +30 -0
  111. aipt_v2/runtime/docker.py +401 -0
  112. aipt_v2/runtime/local.py +346 -0
  113. aipt_v2/runtime/tool_server.py +205 -0
  114. aipt_v2/runtime/vps.py +830 -0
  115. aipt_v2/scanners/__init__.py +28 -0
  116. aipt_v2/scanners/base.py +273 -0
  117. aipt_v2/scanners/nikto.py +244 -0
  118. aipt_v2/scanners/nmap.py +402 -0
  119. aipt_v2/scanners/nuclei.py +273 -0
  120. aipt_v2/scanners/web.py +454 -0
  121. aipt_v2/scripts/security_audit.py +366 -0
  122. aipt_v2/setup_wizard.py +941 -0
  123. aipt_v2/skills/__init__.py +80 -0
  124. aipt_v2/skills/agents/__init__.py +14 -0
  125. aipt_v2/skills/agents/api_tester.py +706 -0
  126. aipt_v2/skills/agents/base.py +477 -0
  127. aipt_v2/skills/agents/code_review.py +459 -0
  128. aipt_v2/skills/agents/security_agent.py +336 -0
  129. aipt_v2/skills/agents/web_pentest.py +818 -0
  130. aipt_v2/skills/prompts/__init__.py +647 -0
  131. aipt_v2/system_detector.py +539 -0
  132. aipt_v2/telemetry/__init__.py +7 -0
  133. aipt_v2/telemetry/tracer.py +347 -0
  134. aipt_v2/terminal/__init__.py +28 -0
  135. aipt_v2/terminal/executor.py +400 -0
  136. aipt_v2/terminal/sandbox.py +350 -0
  137. aipt_v2/tools/__init__.py +44 -0
  138. aipt_v2/tools/active_directory/__init__.py +78 -0
  139. aipt_v2/tools/active_directory/ad_config.py +238 -0
  140. aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
  141. aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
  142. aipt_v2/tools/active_directory/ldap_enum.py +533 -0
  143. aipt_v2/tools/active_directory/smb_attacks.py +505 -0
  144. aipt_v2/tools/agents_graph/__init__.py +19 -0
  145. aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
  146. aipt_v2/tools/api_security/__init__.py +76 -0
  147. aipt_v2/tools/api_security/api_discovery.py +608 -0
  148. aipt_v2/tools/api_security/graphql_scanner.py +622 -0
  149. aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
  150. aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
  151. aipt_v2/tools/browser/__init__.py +5 -0
  152. aipt_v2/tools/browser/browser_actions.py +238 -0
  153. aipt_v2/tools/browser/browser_instance.py +535 -0
  154. aipt_v2/tools/browser/tab_manager.py +344 -0
  155. aipt_v2/tools/cloud/__init__.py +70 -0
  156. aipt_v2/tools/cloud/cloud_config.py +273 -0
  157. aipt_v2/tools/cloud/cloud_scanner.py +639 -0
  158. aipt_v2/tools/cloud/prowler_tool.py +571 -0
  159. aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
  160. aipt_v2/tools/executor.py +307 -0
  161. aipt_v2/tools/parser.py +408 -0
  162. aipt_v2/tools/proxy/__init__.py +5 -0
  163. aipt_v2/tools/proxy/proxy_actions.py +103 -0
  164. aipt_v2/tools/proxy/proxy_manager.py +789 -0
  165. aipt_v2/tools/registry.py +196 -0
  166. aipt_v2/tools/scanners/__init__.py +343 -0
  167. aipt_v2/tools/scanners/acunetix_tool.py +712 -0
  168. aipt_v2/tools/scanners/burp_tool.py +631 -0
  169. aipt_v2/tools/scanners/config.py +156 -0
  170. aipt_v2/tools/scanners/nessus_tool.py +588 -0
  171. aipt_v2/tools/scanners/zap_tool.py +612 -0
  172. aipt_v2/tools/terminal/__init__.py +5 -0
  173. aipt_v2/tools/terminal/terminal_actions.py +37 -0
  174. aipt_v2/tools/terminal/terminal_manager.py +153 -0
  175. aipt_v2/tools/terminal/terminal_session.py +449 -0
  176. aipt_v2/tools/tool_processing.py +108 -0
  177. aipt_v2/utils/__init__.py +17 -0
  178. aipt_v2/utils/logging.py +202 -0
  179. aipt_v2/utils/model_manager.py +187 -0
  180. aipt_v2/utils/searchers/__init__.py +269 -0
  181. aipt_v2/verify_install.py +793 -0
  182. aiptx-2.0.7.dist-info/METADATA +345 -0
  183. aiptx-2.0.7.dist-info/RECORD +187 -0
  184. aiptx-2.0.7.dist-info/WHEEL +5 -0
  185. aiptx-2.0.7.dist-info/entry_points.txt +7 -0
  186. aiptx-2.0.7.dist-info/licenses/LICENSE +21 -0
  187. aiptx-2.0.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2427 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ AIPT Orchestrator - Full Penetration Testing Pipeline
4
+ =====================================================
5
+
6
+ Orchestrates the complete pentest workflow:
7
+ RECON → SCAN → EXPLOIT → REPORT
8
+
9
+ Each phase uses specialized tools and integrates with enterprise scanners
10
+ (Acunetix, Burp Suite) for comprehensive coverage.
11
+
12
+ Usage:
13
+ from aipt_v2.orchestrator import Orchestrator
14
+
15
+ orch = Orchestrator("example.com")
16
+ results = await orch.run()
17
+
18
+ Or via CLI:
19
+ python -m aipt_v2.orchestrator example.com --output ./results
20
+ """
21
+
22
+ import asyncio
23
+ import json
24
+ import logging
25
+ import os
26
+ import re
27
+ import shlex
28
+ import subprocess
29
+ import time
30
+ from dataclasses import dataclass, field
31
+ from datetime import datetime, timezone
32
+ from enum import Enum
33
+ from pathlib import Path
34
+ from typing import Any, Callable, Dict, List, Optional
35
+
36
+ # Scanner integrations
37
+ from aipt_v2.tools.scanners import (
38
+ AcunetixTool,
39
+ AcunetixConfig,
40
+ ScanProfile,
41
+ BurpTool,
42
+ BurpConfig,
43
+ get_acunetix,
44
+ get_burp,
45
+ acunetix_scan,
46
+ acunetix_vulns,
47
+ test_all_connections,
48
+ )
49
+
50
+ # Intelligence module - Advanced analysis capabilities
51
+ from aipt_v2.intelligence import (
52
+ # Vulnerability Chaining - Connect related findings into attack paths
53
+ VulnerabilityChainer,
54
+ AttackChain,
55
+ # AI-Powered Triage - Prioritize by real-world impact
56
+ AITriage,
57
+ TriageResult,
58
+ # Scope Enforcement - Stay within authorization
59
+ ScopeEnforcer,
60
+ ScopeConfig,
61
+ ScopeDecision,
62
+ create_scope_from_target,
63
+ # Authentication - Test protected resources
64
+ AuthenticationManager,
65
+ AuthCredentials,
66
+ AuthMethod,
67
+ )
68
+
69
+ logger = logging.getLogger(__name__)
70
+
71
+
72
+ # ==================== SECURITY: Input Validation ====================
73
+
74
+ # Domain validation pattern (RFC 1123 compliant)
75
+ # Allows: alphanumeric, hyphens (not at start/end), dots for subdomains
76
+ DOMAIN_PATTERN = re.compile(
77
+ r'^(?!-)' # Cannot start with hyphen
78
+ r'(?:[a-zA-Z0-9]' # Start with alphanumeric
79
+ r'(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?' # Middle can have hyphens
80
+ r'\.)*' # Subdomains separated by dots
81
+ r'[a-zA-Z0-9]' # Domain start
82
+ r'(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?' # Domain middle
83
+ r'\.[a-zA-Z]{2,}$' # TLD (at least 2 chars)
84
+ )
85
+
86
+ # IP address pattern (IPv4)
87
+ IPV4_PATTERN = re.compile(
88
+ r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
89
+ r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
90
+ )
91
+
92
+ # Characters that are dangerous in shell commands
93
+ SHELL_DANGEROUS_CHARS = set(';|&$`\n\r\\\'\"(){}[]<>!')
94
+
95
+
96
+ def validate_domain(domain: str) -> str:
97
+ """
98
+ Validate domain format to prevent command injection (CWE-78).
99
+
100
+ Args:
101
+ domain: Domain string to validate
102
+
103
+ Returns:
104
+ Validated domain string
105
+
106
+ Raises:
107
+ ValueError: If domain format is invalid or contains dangerous characters
108
+ """
109
+ if not domain:
110
+ raise ValueError("Domain cannot be empty")
111
+
112
+ domain = domain.strip().lower()
113
+
114
+ # Check length
115
+ if len(domain) > 253:
116
+ raise ValueError(f"Domain too long: {len(domain)} chars (max 253)")
117
+
118
+ # Check for dangerous shell characters
119
+ dangerous_found = set(domain) & SHELL_DANGEROUS_CHARS
120
+ if dangerous_found:
121
+ raise ValueError(
122
+ f"Domain contains dangerous characters: {dangerous_found}. "
123
+ "Possible command injection attempt."
124
+ )
125
+
126
+ # Validate as IP or domain
127
+ if IPV4_PATTERN.match(domain):
128
+ return domain
129
+
130
+ if DOMAIN_PATTERN.match(domain):
131
+ return domain
132
+
133
+ raise ValueError(
134
+ f"Invalid domain format: {domain}. "
135
+ "Expected format: example.com or sub.example.com"
136
+ )
137
+
138
+
139
+ def sanitize_for_shell(value: str) -> str:
140
+ """
141
+ Sanitize a value for safe use in shell commands using shlex.quote.
142
+
143
+ Args:
144
+ value: String to sanitize
145
+
146
+ Returns:
147
+ Shell-escaped string safe for command interpolation
148
+ """
149
+ return shlex.quote(value)
150
+
151
+
152
+ class Phase(Enum):
153
+ """Pentest phases."""
154
+ RECON = "recon"
155
+ SCAN = "scan"
156
+ ANALYZE = "analyze" # Intelligence analysis (chaining, triage)
157
+ EXPLOIT = "exploit"
158
+ POST_EXPLOIT = "post_exploit" # Privilege escalation & lateral movement
159
+ REPORT = "report"
160
+
161
+
162
+ class Severity(Enum):
163
+ """Finding severity levels."""
164
+ CRITICAL = "critical"
165
+ HIGH = "high"
166
+ MEDIUM = "medium"
167
+ LOW = "low"
168
+ INFO = "info"
169
+
170
+
171
+ @dataclass
172
+ class Finding:
173
+ """Security finding from any tool."""
174
+ type: str
175
+ value: str
176
+ description: str
177
+ severity: str
178
+ phase: str
179
+ tool: str
180
+ target: str = ""
181
+ evidence: str = ""
182
+ remediation: str = ""
183
+ metadata: Dict[str, Any] = field(default_factory=dict)
184
+ timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
185
+
186
+
187
+ @dataclass
188
+ class PhaseResult:
189
+ """Result of a phase execution."""
190
+ phase: Phase
191
+ status: str
192
+ started_at: str
193
+ finished_at: str
194
+ duration: float
195
+ findings: List[Finding]
196
+ tools_run: List[str]
197
+ errors: List[str] = field(default_factory=list)
198
+ metadata: Dict[str, Any] = field(default_factory=dict)
199
+
200
+
201
+ @dataclass
202
+ class OrchestratorConfig:
203
+ """Configuration for the orchestrator."""
204
+ # Target
205
+ target: str
206
+ output_dir: str = "./scan_results"
207
+
208
+ # Scan mode
209
+ full_mode: bool = False # Enable all tools including exploitation
210
+
211
+ # Output control
212
+ verbose: bool = True # Show verbose output and command results in real-time
213
+ show_command_output: bool = True # Display command stdout/stderr as it runs
214
+
215
+ # Phase control
216
+ skip_recon: bool = False
217
+ skip_scan: bool = False
218
+ skip_exploit: bool = False
219
+ skip_post_exploit: bool = True # Disabled by default, auto-enables on shell access
220
+ skip_report: bool = False
221
+
222
+ # Recon settings - ENHANCED with 10 tools
223
+ recon_tools: List[str] = field(default_factory=lambda: [
224
+ "subfinder", "assetfinder", "amass", "httpx", "nmap",
225
+ "waybackurls", "theHarvester", "dnsrecon", "wafw00f", "whatweb"
226
+ ])
227
+
228
+ # Scan settings - ENHANCED with 8 tools
229
+ scan_tools: List[str] = field(default_factory=lambda: [
230
+ "nuclei", "ffuf", "sslscan", "nikto", "wpscan",
231
+ "testssl", "gobuster", "dirsearch"
232
+ ])
233
+
234
+ # Exploit settings - NEW exploitation tools (enabled in full_mode)
235
+ exploit_tools: List[str] = field(default_factory=lambda: [
236
+ "sqlmap", "commix", "xsstrike", "hydra", "searchsploit"
237
+ ])
238
+
239
+ # Post-exploit settings - NEW privilege escalation tools
240
+ post_exploit_tools: List[str] = field(default_factory=lambda: [
241
+ "linpeas", "winpeas", "pspy", "lazagne"
242
+ ])
243
+
244
+ # Enterprise scanners
245
+ use_acunetix: bool = True
246
+ use_burp: bool = False
247
+ use_nessus: bool = False # NEW
248
+ use_zap: bool = False # NEW
249
+ acunetix_profile: str = "full"
250
+ wait_for_scanners: bool = False
251
+ scanner_timeout: int = 3600
252
+
253
+ # Exploit settings
254
+ validate_findings: bool = True
255
+ check_sensitive_paths: bool = True
256
+ enable_exploitation: bool = False # Requires explicit opt-in or full_mode
257
+
258
+ # SQLMap settings
259
+ sqlmap_level: int = 2
260
+ sqlmap_risk: int = 2
261
+ sqlmap_timeout: int = 600
262
+
263
+ # Hydra settings
264
+ hydra_threads: int = 4
265
+ hydra_timeout: int = 300
266
+ wordlist_users: str = "/usr/share/wordlists/metasploit/unix_users.txt"
267
+ wordlist_passwords: str = "/usr/share/wordlists/rockyou.txt"
268
+
269
+ # Container/DevSecOps settings
270
+ enable_container_scan: bool = False
271
+ enable_secret_detection: bool = False
272
+ trivy_severity: str = "HIGH,CRITICAL"
273
+
274
+ # Report settings
275
+ report_format: str = "html"
276
+ report_template: str = "professional"
277
+
278
+ # Shell access tracking (set during exploitation)
279
+ shell_obtained: bool = False
280
+ target_os: str = "" # "linux", "windows", or ""
281
+
282
+ # Intelligence module settings
283
+ enable_intelligence: bool = True # Enable chaining and triage
284
+ scope_config: Optional[ScopeConfig] = None # Authorization boundary
285
+ auth_credentials: Optional[AuthCredentials] = None # Authentication for protected resources
286
+
287
+
288
+ class Orchestrator:
289
+ """
290
+ AIPT Orchestrator - Full pentest pipeline controller.
291
+
292
+ Coordinates reconnaissance, scanning, exploitation, and reporting
293
+ phases with integrated support for enterprise scanners.
294
+ """
295
+
296
+ def __init__(self, target: str, config: Optional[OrchestratorConfig] = None):
297
+ """
298
+ Initialize the orchestrator.
299
+
300
+ Args:
301
+ target: Target domain or URL
302
+ config: Optional configuration
303
+ """
304
+ self.target = self._normalize_target(target)
305
+ self.domain = self._extract_domain(target)
306
+ self.config = config or OrchestratorConfig(target=target)
307
+ self.config.target = self.target
308
+
309
+ # State
310
+ self.findings: List[Finding] = []
311
+ self.phase_results: Dict[Phase, PhaseResult] = {}
312
+ self.subdomains: List[str] = []
313
+ self.live_hosts: List[str] = []
314
+ self.scan_ids: Dict[str, str] = {} # Scanner -> scan_id mapping
315
+
316
+ # Setup output directory
317
+ self.timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
318
+ self.output_dir = Path(self.config.output_dir) / f"{self.domain}_scan_{self.timestamp}"
319
+ self.output_dir.mkdir(parents=True, exist_ok=True)
320
+
321
+ # Callbacks
322
+ self.on_phase_start: Optional[Callable[[Phase], None]] = None
323
+ self.on_phase_complete: Optional[Callable[[PhaseResult], None]] = None
324
+ self.on_finding: Optional[Callable[[Finding], None]] = None
325
+ self.on_tool_start: Optional[Callable[[str, str], None]] = None
326
+ self.on_tool_complete: Optional[Callable[[str, str, Any], None]] = None
327
+ self.on_chain_discovered: Optional[Callable[[AttackChain], None]] = None
328
+
329
+ # =====================================================================
330
+ # Intelligence Module Components
331
+ # =====================================================================
332
+ if self.config.enable_intelligence:
333
+ # Scope Enforcement - Ensure testing stays within authorization
334
+ if self.config.scope_config:
335
+ self._scope_enforcer = ScopeEnforcer(self.config.scope_config)
336
+ issues = self._scope_enforcer.validate_scope_config()
337
+ for issue in issues:
338
+ logger.warning(f"Scope config: {issue}")
339
+ else:
340
+ self._scope_enforcer = ScopeEnforcer(create_scope_from_target(self.target))
341
+
342
+ # Vulnerability Chainer - Connect related findings
343
+ self._vuln_chainer = VulnerabilityChainer()
344
+
345
+ # AI Triage - Prioritize findings by real-world impact
346
+ self._ai_triage = AITriage()
347
+
348
+ # Authentication Manager
349
+ self._auth_manager: Optional[AuthenticationManager] = None
350
+ if self.config.auth_credentials and self.config.auth_credentials.method != AuthMethod.NONE:
351
+ self._auth_manager = AuthenticationManager(self.config.auth_credentials)
352
+ logger.info(f"Authentication configured: {self.config.auth_credentials.method.value}")
353
+
354
+ # Analysis results storage
355
+ self.attack_chains: List[AttackChain] = []
356
+ self.triage_result: Optional[TriageResult] = None
357
+ else:
358
+ self._scope_enforcer = None
359
+ self._vuln_chainer = None
360
+ self._ai_triage = None
361
+ self._auth_manager = None
362
+ self.attack_chains = []
363
+ self.triage_result = None
364
+
365
+ logger.info(f"Orchestrator initialized for {self.domain}")
366
+ logger.info(f"Output directory: {self.output_dir}")
367
+ if self.config.enable_intelligence:
368
+ logger.info("Intelligence module enabled (chaining, triage, scope)")
369
+
370
+ @staticmethod
371
+ def _normalize_target(target: str) -> str:
372
+ """Normalize target URL."""
373
+ if not target.startswith(("http://", "https://")):
374
+ return f"https://{target}"
375
+ return target
376
+
377
+ @staticmethod
378
+ def _extract_domain(target: str) -> str:
379
+ """
380
+ Extract and validate domain from target.
381
+
382
+ Security: Validates domain format to prevent command injection (CWE-78).
383
+ """
384
+ domain = target.replace("https://", "").replace("http://", "")
385
+ domain = domain.split("/")[0]
386
+ domain = domain.split(":")[0]
387
+
388
+ # Security: Validate domain format
389
+ return validate_domain(domain)
390
+
391
+ @property
392
+ def safe_domain(self) -> str:
393
+ """
394
+ Get shell-safe domain for command interpolation.
395
+
396
+ Returns:
397
+ Shell-escaped domain string
398
+ """
399
+ return sanitize_for_shell(self.domain)
400
+
401
+ def _log_phase(self, phase: Phase, message: str):
402
+ """Log a phase message."""
403
+ print(f"\n{'='*60}", flush=True)
404
+ print(f" [{phase.value.upper()}] {message}", flush=True)
405
+ print(f"{'='*60}\n", flush=True)
406
+
407
+ def _log_tool(self, tool: str, status: str = "running", elapsed: float = None, error: str = None):
408
+ """Log tool execution with status indicator and elapsed time."""
409
+ icon = "◉" if status == "running" else "✓" if status == "done" else "✗"
410
+ color_start = "\033[33m" if status == "running" else "\033[32m" if status == "done" else "\033[31m"
411
+ color_end = "\033[0m"
412
+
413
+ # Build status line with optional elapsed time
414
+ status_line = f" [{color_start}{icon}{color_end}] {tool}"
415
+ if elapsed is not None and status != "running":
416
+ status_line += f" \033[90m({elapsed:.1f}s)\033[0m"
417
+
418
+ print(status_line, flush=True)
419
+
420
+ if status == "running" and self.config.verbose:
421
+ print(f" → Executing...", flush=True)
422
+ elif status == "error" and error:
423
+ print(f" \033[31m→ Error: {error[:100]}\033[0m", flush=True)
424
+ elif status == "done" and self.config.verbose:
425
+ pass # Output already shown during execution
426
+
427
+ async def _run_command(self, cmd: str, timeout: int = 300) -> tuple[int, str]:
428
+ """
429
+ Run a shell command asynchronously with optional real-time output.
430
+
431
+ In verbose mode, streams output to console as it's produced.
432
+ Always captures output for return value.
433
+ """
434
+ try:
435
+ if self.config.show_command_output:
436
+ # Stream output in real-time while also capturing it
437
+ proc = await asyncio.create_subprocess_shell(
438
+ cmd,
439
+ stdout=asyncio.subprocess.PIPE,
440
+ stderr=asyncio.subprocess.STDOUT # Merge stderr into stdout
441
+ )
442
+
443
+ output_lines = []
444
+
445
+ async def read_stream():
446
+ """Read and display output line by line with heartbeat."""
447
+ import sys
448
+ last_output_time = time.time()
449
+ heartbeat_interval = 30 # Show heartbeat every 30 seconds if no output
450
+
451
+ while True:
452
+ try:
453
+ # Use wait_for to enable heartbeat checking
454
+ line = await asyncio.wait_for(proc.stdout.readline(), timeout=heartbeat_interval)
455
+ if not line:
456
+ break
457
+ decoded = line.decode('utf-8', errors='replace').rstrip()
458
+ output_lines.append(decoded)
459
+ last_output_time = time.time()
460
+ if self.config.verbose:
461
+ # Print with indentation for readability
462
+ print(f" {decoded}", flush=True)
463
+ except asyncio.TimeoutError:
464
+ # No output for a while, show heartbeat
465
+ elapsed = time.time() - last_output_time
466
+ if self.config.verbose:
467
+ print(f" \033[90m... still running ({elapsed:.0f}s since last output)\033[0m", flush=True)
468
+
469
+ try:
470
+ await asyncio.wait_for(read_stream(), timeout=timeout)
471
+ await proc.wait()
472
+ except asyncio.TimeoutError:
473
+ proc.kill()
474
+ return -1, f"Command timed out after {timeout}s"
475
+
476
+ output = "\n".join(output_lines)
477
+ return proc.returncode or 0, output
478
+ else:
479
+ # Silent mode - capture output without displaying
480
+ proc = await asyncio.create_subprocess_shell(
481
+ cmd,
482
+ stdout=asyncio.subprocess.PIPE,
483
+ stderr=asyncio.subprocess.PIPE
484
+ )
485
+ stdout, stderr = await asyncio.wait_for(
486
+ proc.communicate(),
487
+ timeout=timeout
488
+ )
489
+ output = (stdout.decode() if stdout else "") + (stderr.decode() if stderr else "")
490
+ return proc.returncode or 0, output
491
+ except asyncio.TimeoutError:
492
+ return -1, f"Command timed out after {timeout}s"
493
+ except Exception as e:
494
+ return -1, str(e)
495
+
496
+ def _add_finding(self, finding: Finding):
497
+ """Add a finding and trigger callback."""
498
+ self.findings.append(finding)
499
+ if self.on_finding:
500
+ self.on_finding(finding)
501
+
502
+ # ==================== RECON PHASE ====================
503
+
504
+ async def run_recon(self) -> PhaseResult:
505
+ """Execute reconnaissance phase."""
506
+ phase = Phase.RECON
507
+ started_at = datetime.now(timezone.utc).isoformat()
508
+ start_time = time.time()
509
+ findings = []
510
+ tools_run = []
511
+ errors = []
512
+
513
+ if self.on_phase_start:
514
+ self.on_phase_start(phase)
515
+
516
+ self._log_phase(phase, f"Reconnaissance on {self.domain}")
517
+
518
+ # 1. Subdomain Enumeration
519
+ self._log_tool("Subdomain Enumeration")
520
+
521
+ # Subfinder
522
+ if "subfinder" in self.config.recon_tools:
523
+ self._log_tool("subfinder", "running")
524
+ tool_start = time.time()
525
+ # Security: Use safe_domain to prevent command injection
526
+ ret, output = await self._run_command(
527
+ f"subfinder -d {self.safe_domain} -silent"
528
+ )
529
+ tool_elapsed = time.time() - tool_start
530
+ if ret == 0:
531
+ subs = [s.strip() for s in output.split("\n") if s.strip()]
532
+ self.subdomains.extend(subs)
533
+ (self.output_dir / f"subfinder_{self.domain}.txt").write_text(output)
534
+ tools_run.append("subfinder")
535
+ self._log_tool(f"subfinder - {len(subs)} subdomains", "done", tool_elapsed)
536
+ else:
537
+ errors.append(f"subfinder failed: {output[:100] if output else 'unknown error'}")
538
+ self._log_tool("subfinder", "error", tool_elapsed, output[:100] if output else "command failed")
539
+
540
+ # Assetfinder
541
+ if "assetfinder" in self.config.recon_tools:
542
+ self._log_tool("assetfinder", "running")
543
+ tool_start = time.time()
544
+ # Security: Use safe_domain to prevent command injection
545
+ ret, output = await self._run_command(
546
+ f"assetfinder --subs-only {self.safe_domain}"
547
+ )
548
+ tool_elapsed = time.time() - tool_start
549
+ if ret == 0:
550
+ subs = [s.strip() for s in output.split("\n") if s.strip()]
551
+ self.subdomains.extend(subs)
552
+ (self.output_dir / f"assetfinder_{self.domain}.txt").write_text(output)
553
+ tools_run.append("assetfinder")
554
+ self._log_tool(f"assetfinder - {len(subs)} assets", "done", tool_elapsed)
555
+ else:
556
+ errors.append(f"assetfinder failed: {output[:100] if output else 'unknown error'}")
557
+ self._log_tool("assetfinder", "error", tool_elapsed, output[:100] if output else "command failed")
558
+
559
+ # Deduplicate subdomains
560
+ self.subdomains = list(set(self.subdomains))
561
+ all_subs_file = self.output_dir / f"all_subs_{self.domain}.txt"
562
+ all_subs_file.write_text("\n".join(self.subdomains))
563
+
564
+ findings.append(Finding(
565
+ type="subdomain_count",
566
+ value=str(len(self.subdomains)),
567
+ description=f"Discovered {len(self.subdomains)} unique subdomains",
568
+ severity="info",
569
+ phase="recon",
570
+ tool="subdomain_enum",
571
+ target=self.domain
572
+ ))
573
+
574
+ # 2. Live Host Detection with HTTPX
575
+ if "httpx" in self.config.recon_tools and self.subdomains:
576
+ self._log_tool("httpx", "running")
577
+ subs_input = "\n".join(self.subdomains)
578
+
579
+ ret, output = await self._run_command(
580
+ f"echo '{subs_input}' | httpx -silent -status-code -title -tech-detect -json 2>/dev/null",
581
+ timeout=180
582
+ )
583
+ if ret == 0:
584
+ httpx_file = self.output_dir / "httpx_results.json"
585
+ httpx_file.write_text(output)
586
+
587
+ # Parse live hosts
588
+ for line in output.split("\n"):
589
+ if line.strip():
590
+ try:
591
+ data = json.loads(line)
592
+ url = data.get("url", "")
593
+ if url:
594
+ self.live_hosts.append(url)
595
+ except json.JSONDecodeError:
596
+ continue
597
+
598
+ tools_run.append("httpx")
599
+ self._log_tool(f"httpx - {len(self.live_hosts)} live hosts", "done")
600
+
601
+ findings.append(Finding(
602
+ type="live_hosts",
603
+ value=str(len(self.live_hosts)),
604
+ description=f"Found {len(self.live_hosts)} live hosts",
605
+ severity="info",
606
+ phase="recon",
607
+ tool="httpx",
608
+ target=self.domain
609
+ ))
610
+
611
+ # 3. Port Scanning with Nmap
612
+ if "nmap" in self.config.recon_tools:
613
+ self._log_tool("nmap", "running")
614
+ # Security: Use safe_domain to prevent command injection
615
+ ret, output = await self._run_command(
616
+ f"nmap -sV --top-ports 100 {self.safe_domain} 2>/dev/null",
617
+ timeout=300
618
+ )
619
+ if ret == 0:
620
+ (self.output_dir / f"nmap_{self.domain}.txt").write_text(output)
621
+ tools_run.append("nmap")
622
+
623
+ # Parse open ports
624
+ for line in output.split("\n"):
625
+ if "/tcp" in line and "open" in line:
626
+ parts = line.split()
627
+ if len(parts) >= 3:
628
+ port = parts[0]
629
+ service = parts[2] if len(parts) > 2 else "unknown"
630
+ findings.append(Finding(
631
+ type="open_port",
632
+ value=port,
633
+ description=f"Port {port} open running {service}",
634
+ severity="info",
635
+ phase="recon",
636
+ tool="nmap",
637
+ target=self.domain
638
+ ))
639
+
640
+ self._log_tool("nmap - completed", "done")
641
+
642
+ # 4. Wayback URLs
643
+ if "waybackurls" in self.config.recon_tools:
644
+ self._log_tool("waybackurls", "running")
645
+ tool_start = time.time()
646
+ # Security: Use safe_domain to prevent command injection
647
+ ret, output = await self._run_command(
648
+ f"echo {self.safe_domain} | waybackurls | head -5000"
649
+ )
650
+ tool_elapsed = time.time() - tool_start
651
+ if ret == 0:
652
+ (self.output_dir / f"wayback_{self.domain}.txt").write_text(output)
653
+ url_count = len([u for u in output.split("\n") if u.strip()])
654
+ tools_run.append("waybackurls")
655
+ self._log_tool(f"waybackurls - {url_count} URLs", "done", tool_elapsed)
656
+ else:
657
+ errors.append(f"waybackurls failed: {output[:100] if output else 'unknown error'}")
658
+ self._log_tool("waybackurls", "error", tool_elapsed, output[:100] if output else "command failed")
659
+
660
+ # 5. Amass - Advanced Subdomain Enumeration (NEW)
661
+ if "amass" in self.config.recon_tools:
662
+ self._log_tool("amass", "running")
663
+ tool_start = time.time()
664
+ ret, output = await self._run_command(
665
+ f"amass enum -passive -d {self.safe_domain} -timeout 5",
666
+ timeout=360
667
+ )
668
+ tool_elapsed = time.time() - tool_start
669
+ if ret == 0:
670
+ subs = [s.strip() for s in output.split("\n") if s.strip()]
671
+ self.subdomains.extend(subs)
672
+ (self.output_dir / f"amass_{self.domain}.txt").write_text(output)
673
+ tools_run.append("amass")
674
+ self._log_tool(f"amass - {len(subs)} subdomains", "done", tool_elapsed)
675
+ else:
676
+ errors.append(f"amass failed: {output[:100] if output else 'unknown error'}")
677
+ self._log_tool("amass", "error", tool_elapsed, output[:100] if output else "command failed")
678
+
679
+ # 6. theHarvester - OSINT Email & Subdomain Gathering (NEW)
680
+ if "theHarvester" in self.config.recon_tools:
681
+ self._log_tool("theHarvester", "running")
682
+ ret, output = await self._run_command(
683
+ f"theHarvester -d {self.safe_domain} -b all -l 100 2>/dev/null",
684
+ timeout=300
685
+ )
686
+ if ret == 0:
687
+ (self.output_dir / f"theharvester_{self.domain}.txt").write_text(output)
688
+ # Extract emails and hosts
689
+ emails = []
690
+ for line in output.split("\n"):
691
+ if "@" in line and self.domain in line:
692
+ emails.append(line.strip())
693
+ if emails:
694
+ findings.append(Finding(
695
+ type="email_discovered",
696
+ value=str(len(emails)),
697
+ description=f"Discovered {len(emails)} email addresses",
698
+ severity="info",
699
+ phase="recon",
700
+ tool="theHarvester",
701
+ target=self.domain,
702
+ metadata={"emails": emails[:20]} # Store first 20
703
+ ))
704
+ tools_run.append("theHarvester")
705
+ self._log_tool(f"theHarvester - {len(emails)} emails", "done")
706
+
707
+ # 7. dnsrecon - DNS Enumeration & Zone Transfer (NEW)
708
+ if "dnsrecon" in self.config.recon_tools:
709
+ self._log_tool("dnsrecon", "running")
710
+ ret, output = await self._run_command(
711
+ f"dnsrecon -d {self.safe_domain} -t std,brt -j {self.output_dir}/dnsrecon_{self.domain}.json 2>/dev/null",
712
+ timeout=180
713
+ )
714
+ if ret == 0:
715
+ tools_run.append("dnsrecon")
716
+ # Check for zone transfer vulnerability
717
+ if "Zone Transfer" in output and "Success" in output:
718
+ findings.append(Finding(
719
+ type="dns_zone_transfer",
720
+ value="Zone transfer allowed",
721
+ description="DNS zone transfer is allowed - critical information disclosure",
722
+ severity="high",
723
+ phase="recon",
724
+ tool="dnsrecon",
725
+ target=self.domain
726
+ ))
727
+ self._log_tool("dnsrecon - completed", "done")
728
+
729
+ # 8. wafw00f - WAF Fingerprinting (NEW)
730
+ if "wafw00f" in self.config.recon_tools:
731
+ self._log_tool("wafw00f", "running")
732
+ ret, output = await self._run_command(
733
+ f"wafw00f {self.target} 2>/dev/null"
734
+ )
735
+ if ret == 0:
736
+ (self.output_dir / f"wafw00f_{self.domain}.txt").write_text(output)
737
+ # Parse WAF detection
738
+ waf_name = "Unknown"
739
+ if "is behind" in output:
740
+ # Extract WAF name
741
+ for line in output.split("\n"):
742
+ if "is behind" in line:
743
+ parts = line.split("is behind")
744
+ if len(parts) > 1:
745
+ waf_name = parts[1].strip().split()[0]
746
+ break
747
+ findings.append(Finding(
748
+ type="waf_detected",
749
+ value=waf_name,
750
+ description=f"Web Application Firewall detected: {waf_name}",
751
+ severity="info",
752
+ phase="recon",
753
+ tool="wafw00f",
754
+ target=self.target
755
+ ))
756
+ elif "No WAF" in output:
757
+ findings.append(Finding(
758
+ type="no_waf",
759
+ value="No WAF detected",
760
+ description="No Web Application Firewall detected - target may be more vulnerable",
761
+ severity="low",
762
+ phase="recon",
763
+ tool="wafw00f",
764
+ target=self.target
765
+ ))
766
+ tools_run.append("wafw00f")
767
+ self._log_tool(f"wafw00f - {waf_name if 'is behind' in output else 'No WAF'}", "done")
768
+
769
+ # 9. whatweb - Technology Fingerprinting (NEW)
770
+ if "whatweb" in self.config.recon_tools:
771
+ self._log_tool("whatweb", "running")
772
+ ret, output = await self._run_command(
773
+ f"whatweb -a 3 {self.target} --log-json={self.output_dir}/whatweb_{self.domain}.json 2>/dev/null"
774
+ )
775
+ if ret == 0:
776
+ (self.output_dir / f"whatweb_{self.domain}.txt").write_text(output)
777
+ tools_run.append("whatweb")
778
+ self._log_tool("whatweb - completed", "done")
779
+
780
+ # Deduplicate subdomains again after new tools
781
+ self.subdomains = list(set(self.subdomains))
782
+ all_subs_file.write_text("\n".join(self.subdomains))
783
+
784
+ # Add findings to global list
785
+ for f in findings:
786
+ self._add_finding(f)
787
+
788
+ duration = time.time() - start_time
789
+ result = PhaseResult(
790
+ phase=phase,
791
+ status="completed",
792
+ started_at=started_at,
793
+ finished_at=datetime.now(timezone.utc).isoformat(),
794
+ duration=duration,
795
+ findings=findings,
796
+ tools_run=tools_run,
797
+ errors=errors,
798
+ metadata={
799
+ "subdomains_count": len(self.subdomains),
800
+ "live_hosts_count": len(self.live_hosts)
801
+ }
802
+ )
803
+
804
+ self.phase_results[phase] = result
805
+ if self.on_phase_complete:
806
+ self.on_phase_complete(result)
807
+
808
+ return result
809
+
810
+ # ==================== SCAN PHASE ====================
811
+
812
+ async def run_scan(self) -> PhaseResult:
813
+ """Execute vulnerability scanning phase."""
814
+ phase = Phase.SCAN
815
+ started_at = datetime.now(timezone.utc).isoformat()
816
+ start_time = time.time()
817
+ findings = []
818
+ tools_run = []
819
+ errors = []
820
+
821
+ if self.on_phase_start:
822
+ self.on_phase_start(phase)
823
+
824
+ self._log_phase(phase, f"Vulnerability Scanning on {self.domain}")
825
+
826
+ # 1. Nuclei Scanning
827
+ if "nuclei" in self.config.scan_tools:
828
+ self._log_tool("nuclei", "running")
829
+ ret, output = await self._run_command(
830
+ f"nuclei -u {self.target} -severity low,medium,high,critical -silent 2>/dev/null",
831
+ timeout=600
832
+ )
833
+ if ret == 0:
834
+ (self.output_dir / f"nuclei_{self.domain}.txt").write_text(output)
835
+ tools_run.append("nuclei")
836
+
837
+ # Parse nuclei findings
838
+ for line in output.split("\n"):
839
+ if line.strip():
840
+ # Format: [template-id] [severity] [matched-at]
841
+ parts = line.split()
842
+ if len(parts) >= 2:
843
+ findings.append(Finding(
844
+ type="vulnerability",
845
+ value=parts[0] if parts else line,
846
+ description=line,
847
+ severity=self._parse_nuclei_severity(line),
848
+ phase="scan",
849
+ tool="nuclei",
850
+ target=self.domain
851
+ ))
852
+
853
+ self._log_tool(f"nuclei - {len([f for f in findings if f.tool == 'nuclei'])} findings", "done")
854
+
855
+ # 2. SSL/TLS Scanning
856
+ if "sslscan" in self.config.scan_tools:
857
+ self._log_tool("sslscan", "running")
858
+ # Security: Use safe_domain to prevent command injection
859
+ ret, output = await self._run_command(
860
+ f"sslscan {self.safe_domain} 2>/dev/null"
861
+ )
862
+ if ret == 0:
863
+ (self.output_dir / "sslscan_results.txt").write_text(output)
864
+ tools_run.append("sslscan")
865
+
866
+ # Check for weak ciphers
867
+ if "Accepted" in output and ("RC4" in output or "DES" in output or "NULL" in output):
868
+ findings.append(Finding(
869
+ type="weak_cipher",
870
+ value="Weak TLS ciphers detected",
871
+ description="Server accepts weak cryptographic ciphers",
872
+ severity="medium",
873
+ phase="scan",
874
+ tool="sslscan",
875
+ target=self.domain
876
+ ))
877
+
878
+ self._log_tool("sslscan - completed", "done")
879
+
880
+ # 3. Directory Fuzzing
881
+ if "ffuf" in self.config.scan_tools:
882
+ self._log_tool("ffuf", "running")
883
+ ret, output = await self._run_command(
884
+ f"ffuf -u {self.target}/FUZZ -w /usr/share/wordlists/dirb/common.txt -mc 200,301,302,403 -s 2>/dev/null | head -50",
885
+ timeout=300
886
+ )
887
+ if ret == 0:
888
+ (self.output_dir / f"ffuf_{self.domain}.txt").write_text(output)
889
+ tools_run.append("ffuf")
890
+ self._log_tool("ffuf - completed", "done")
891
+
892
+ # 4. Nikto - Web Server Vulnerability Scanner (NEW)
893
+ if "nikto" in self.config.scan_tools:
894
+ self._log_tool("nikto", "running")
895
+ ret, output = await self._run_command(
896
+ f"nikto -h {self.target} -Format txt -output {self.output_dir}/nikto_{self.domain}.txt -Tuning 123bde 2>/dev/null",
897
+ timeout=600
898
+ )
899
+ if ret == 0:
900
+ tools_run.append("nikto")
901
+ # Parse nikto findings
902
+ for line in output.split("\n"):
903
+ if "+ " in line and ("OSVDB" in line or "vulnerability" in line.lower() or "outdated" in line.lower()):
904
+ severity = "medium"
905
+ if "critical" in line.lower() or "remote" in line.lower():
906
+ severity = "high"
907
+ findings.append(Finding(
908
+ type="web_vulnerability",
909
+ value=line.strip(),
910
+ description=line.strip(),
911
+ severity=severity,
912
+ phase="scan",
913
+ tool="nikto",
914
+ target=self.target
915
+ ))
916
+ nikto_findings = len([f for f in findings if f.tool == "nikto"])
917
+ self._log_tool(f"nikto - {nikto_findings} findings", "done")
918
+
919
+ # 5. WPScan - WordPress Vulnerability Scanner (NEW)
920
+ if "wpscan" in self.config.scan_tools:
921
+ self._log_tool("wpscan", "running")
922
+ # Check if WordPress
923
+ ret, check_output = await self._run_command(
924
+ f"curl -sL {self.target}/wp-login.php --connect-timeout 5 | head -1"
925
+ )
926
+ if "wp-" in check_output.lower() or "wordpress" in check_output.lower():
927
+ wpscan_token = os.getenv("WPSCAN_API_TOKEN", "")
928
+ token_flag = f"--api-token {wpscan_token}" if wpscan_token else ""
929
+ ret, output = await self._run_command(
930
+ f"wpscan --url {self.target} {token_flag} --enumerate vp,vt,u --format json --output {self.output_dir}/wpscan_{self.domain}.json 2>/dev/null",
931
+ timeout=600
932
+ )
933
+ if ret == 0:
934
+ tools_run.append("wpscan")
935
+ # Parse JSON output
936
+ try:
937
+ wpscan_file = self.output_dir / f"wpscan_{self.domain}.json"
938
+ if wpscan_file.exists():
939
+ wpscan_data = json.loads(wpscan_file.read_text())
940
+ vulns = wpscan_data.get("vulnerabilities", [])
941
+ for vuln in vulns:
942
+ findings.append(Finding(
943
+ type="wordpress_vulnerability",
944
+ value=vuln.get("title", "Unknown"),
945
+ description=vuln.get("description", vuln.get("title", "")),
946
+ severity=self._map_wpscan_severity(vuln.get("severity", "medium")),
947
+ phase="scan",
948
+ tool="wpscan",
949
+ target=self.target,
950
+ metadata={"cve": vuln.get("cve", [])}
951
+ ))
952
+ except (json.JSONDecodeError, FileNotFoundError):
953
+ pass
954
+ self._log_tool(f"wpscan - WordPress detected", "done")
955
+ else:
956
+ self._log_tool("wpscan - Not WordPress, skipped", "done")
957
+
958
+ # 6. testssl.sh - Comprehensive SSL/TLS Testing (NEW)
959
+ if "testssl" in self.config.scan_tools:
960
+ self._log_tool("testssl", "running")
961
+ ret, output = await self._run_command(
962
+ f"testssl --jsonfile {self.output_dir}/testssl_{self.domain}.json --severity LOW {self.safe_domain} 2>/dev/null",
963
+ timeout=300
964
+ )
965
+ if ret == 0:
966
+ (self.output_dir / f"testssl_{self.domain}.txt").write_text(output)
967
+ tools_run.append("testssl")
968
+ # Parse for critical SSL issues
969
+ ssl_issues = []
970
+ for line in output.split("\n"):
971
+ if "VULNERABLE" in line or "NOT ok" in line:
972
+ ssl_issues.append(line.strip())
973
+ severity = "high" if "VULNERABLE" in line else "medium"
974
+ findings.append(Finding(
975
+ type="ssl_vulnerability",
976
+ value=line.strip()[:100],
977
+ description=line.strip(),
978
+ severity=severity,
979
+ phase="scan",
980
+ tool="testssl",
981
+ target=self.domain
982
+ ))
983
+ self._log_tool(f"testssl - {len(ssl_issues)} issues", "done")
984
+
985
+ # 7. Gobuster - Directory/Vhost Enumeration (NEW)
986
+ if "gobuster" in self.config.scan_tools:
987
+ self._log_tool("gobuster", "running")
988
+ ret, output = await self._run_command(
989
+ f"gobuster dir -u {self.target} -w /usr/share/wordlists/dirb/common.txt -q -t 20 --no-error 2>/dev/null | head -100",
990
+ timeout=300
991
+ )
992
+ if ret == 0:
993
+ (self.output_dir / f"gobuster_{self.domain}.txt").write_text(output)
994
+ tools_run.append("gobuster")
995
+ # Parse discovered paths
996
+ for line in output.split("\n"):
997
+ if line.strip() and ("Status:" in line or "(Status:" in line):
998
+ # Check for interesting paths
999
+ if any(p in line.lower() for p in ["admin", "backup", "config", "api", "debug", ".git"]):
1000
+ findings.append(Finding(
1001
+ type="interesting_path",
1002
+ value=line.strip(),
1003
+ description=f"Potentially sensitive path discovered: {line.strip()}",
1004
+ severity="low",
1005
+ phase="scan",
1006
+ tool="gobuster",
1007
+ target=self.target
1008
+ ))
1009
+ self._log_tool("gobuster - completed", "done")
1010
+
1011
+ # 8. Dirsearch - Advanced Directory Discovery (NEW)
1012
+ if "dirsearch" in self.config.scan_tools:
1013
+ self._log_tool("dirsearch", "running")
1014
+ ret, output = await self._run_command(
1015
+ f"dirsearch -u {self.target} -e php,asp,aspx,jsp,html,js -t 20 --format plain -o {self.output_dir}/dirsearch_{self.domain}.txt 2>/dev/null",
1016
+ timeout=300
1017
+ )
1018
+ if ret == 0:
1019
+ tools_run.append("dirsearch")
1020
+ self._log_tool("dirsearch - completed", "done")
1021
+
1022
+ # 9. Acunetix DAST Scan (Enterprise)
1023
+ if self.config.use_acunetix:
1024
+ self._log_tool("Acunetix DAST", "running")
1025
+ try:
1026
+ acunetix = get_acunetix()
1027
+ if acunetix.connect():
1028
+ # Start scan
1029
+ profile_map = {
1030
+ "full": ScanProfile.FULL_SCAN,
1031
+ "high_risk": ScanProfile.HIGH_RISK,
1032
+ "xss": ScanProfile.XSS_SCAN,
1033
+ "sqli": ScanProfile.SQL_INJECTION,
1034
+ }
1035
+ profile = profile_map.get(self.config.acunetix_profile, ScanProfile.FULL_SCAN)
1036
+
1037
+ scan_id = acunetix.scan_url(self.target, profile, f"AIPT Scan - {self.timestamp}")
1038
+ self.scan_ids["acunetix"] = scan_id
1039
+
1040
+ # Save scan info
1041
+ scan_info = {
1042
+ "scan_id": scan_id,
1043
+ "target": self.target,
1044
+ "profile": self.config.acunetix_profile,
1045
+ "started_at": datetime.now(timezone.utc).isoformat(),
1046
+ "dashboard_url": f"{acunetix.config.base_url}/#/scans/{scan_id}"
1047
+ }
1048
+ (self.output_dir / "acunetix_scan.json").write_text(json.dumps(scan_info, indent=2))
1049
+
1050
+ tools_run.append("acunetix")
1051
+ self._log_tool(f"Acunetix - Scan started: {scan_id[:8]}...", "done")
1052
+
1053
+ # Optionally wait for completion
1054
+ if self.config.wait_for_scanners:
1055
+ self._log_tool("Acunetix - Waiting for completion...", "running")
1056
+ result = acunetix.wait_for_scan(
1057
+ scan_id,
1058
+ timeout=self.config.scanner_timeout,
1059
+ poll_interval=30
1060
+ )
1061
+
1062
+ # Get vulnerabilities
1063
+ vulns = acunetix.get_scan_vulnerabilities(scan_id)
1064
+ for vuln in vulns:
1065
+ findings.append(Finding(
1066
+ type="vulnerability",
1067
+ value=vuln.name,
1068
+ description=vuln.description or vuln.name,
1069
+ severity=vuln.severity,
1070
+ phase="scan",
1071
+ tool="acunetix",
1072
+ target=vuln.affected_url,
1073
+ metadata={
1074
+ "vuln_id": vuln.vuln_id,
1075
+ "cvss": vuln.cvss_score,
1076
+ "cwe": vuln.cwe_id
1077
+ }
1078
+ ))
1079
+
1080
+ self._log_tool(f"Acunetix - {len(vulns)} vulnerabilities found", "done")
1081
+ else:
1082
+ errors.append("Acunetix connection failed")
1083
+ self._log_tool("Acunetix - Connection failed", "error")
1084
+ except Exception as e:
1085
+ errors.append(f"Acunetix error: {str(e)}")
1086
+ self._log_tool(f"Acunetix - Error: {str(e)}", "error")
1087
+
1088
+ # 5. Burp Suite Scan (Enterprise)
1089
+ if self.config.use_burp:
1090
+ self._log_tool("Burp Suite", "running")
1091
+ try:
1092
+ burp = get_burp()
1093
+ if burp.connect():
1094
+ scan_id = burp.scan_url(self.target)
1095
+ self.scan_ids["burp"] = scan_id
1096
+ tools_run.append("burp")
1097
+ self._log_tool(f"Burp Suite - Scan started: {scan_id}", "done")
1098
+ else:
1099
+ errors.append("Burp Suite connection failed")
1100
+ except Exception as e:
1101
+ errors.append(f"Burp Suite error: {str(e)}")
1102
+
1103
+ # ==================== CONTAINER SECURITY (DevSecOps) ====================
1104
+ # 10. Trivy - Container/Image Vulnerability Scanner
1105
+ if self.config.enable_container_scan or self.config.full_mode:
1106
+ self._log_tool("trivy", "running")
1107
+ try:
1108
+ # Scan any discovered container images or Docker configuration
1109
+ docker_compose = self.output_dir / "docker-compose.yml"
1110
+ dockerfile = self.output_dir / "Dockerfile"
1111
+
1112
+ # First, try to detect Docker presence via common paths
1113
+ ret, output = await self._run_command(
1114
+ f"curl -sI {self.target}/docker-compose.yml --connect-timeout 5 | head -1",
1115
+ timeout=10
1116
+ )
1117
+ has_docker = "200" in output
1118
+
1119
+ # Scan web target for container-related vulnerabilities
1120
+ ret, trivy_output = await self._run_command(
1121
+ f"trivy fs --severity {self.config.trivy_severity} --format json --output {self.output_dir}/trivy_{self.domain}.json . 2>/dev/null",
1122
+ timeout=300
1123
+ )
1124
+ if ret == 0:
1125
+ tools_run.append("trivy")
1126
+ # Parse trivy JSON output
1127
+ trivy_file = self.output_dir / f"trivy_{self.domain}.json"
1128
+ if trivy_file.exists():
1129
+ try:
1130
+ trivy_data = json.loads(trivy_file.read_text())
1131
+ for result in trivy_data.get("Results", []):
1132
+ for vuln in result.get("Vulnerabilities", []):
1133
+ severity = vuln.get("Severity", "UNKNOWN").lower()
1134
+ findings.append(Finding(
1135
+ type="container_vulnerability",
1136
+ value=vuln.get("VulnerabilityID", "Unknown"),
1137
+ description=f"{vuln.get('PkgName', '')}: {vuln.get('Title', vuln.get('VulnerabilityID', ''))}",
1138
+ severity=severity if severity in ["critical", "high", "medium", "low"] else "medium",
1139
+ phase="scan",
1140
+ tool="trivy",
1141
+ target=self.target,
1142
+ metadata={
1143
+ "cve": vuln.get("VulnerabilityID"),
1144
+ "package": vuln.get("PkgName"),
1145
+ "installed_version": vuln.get("InstalledVersion"),
1146
+ "fixed_version": vuln.get("FixedVersion"),
1147
+ "cvss": vuln.get("CVSS", {})
1148
+ }
1149
+ ))
1150
+ except (json.JSONDecodeError, FileNotFoundError):
1151
+ pass
1152
+ trivy_findings = len([f for f in findings if f.tool == "trivy"])
1153
+ self._log_tool(f"trivy - {trivy_findings} vulnerabilities", "done")
1154
+ else:
1155
+ self._log_tool("trivy - not installed or failed", "skip")
1156
+ except Exception as e:
1157
+ errors.append(f"Trivy error: {str(e)}")
1158
+ self._log_tool(f"trivy - error: {str(e)}", "error")
1159
+
1160
+ # ==================== SECRET DETECTION (DevSecOps) ====================
1161
+ # 11. Gitleaks - Secret Detection in Git Repos
1162
+ if self.config.enable_secret_detection or self.config.full_mode:
1163
+ self._log_tool("gitleaks", "running")
1164
+ try:
1165
+ # Check if .git is exposed
1166
+ ret, git_check = await self._run_command(
1167
+ f"curl -sI {self.target}/.git/config --connect-timeout 5 | head -1",
1168
+ timeout=10
1169
+ )
1170
+ if "200" in git_check:
1171
+ findings.append(Finding(
1172
+ type="exposed_git",
1173
+ value=f"{self.target}/.git/config",
1174
+ description="Git repository exposed - potential source code and credentials leak",
1175
+ severity="critical",
1176
+ phase="scan",
1177
+ tool="gitleaks",
1178
+ target=self.target
1179
+ ))
1180
+
1181
+ # Run gitleaks on local output directory for any downloaded content
1182
+ ret, gitleaks_output = await self._run_command(
1183
+ f"gitleaks detect --source {self.output_dir} --report-path {self.output_dir}/gitleaks_{self.domain}.json --report-format json 2>/dev/null",
1184
+ timeout=120
1185
+ )
1186
+ if ret == 0 or ret == 1: # gitleaks returns 1 when secrets found
1187
+ tools_run.append("gitleaks")
1188
+ gitleaks_file = self.output_dir / f"gitleaks_{self.domain}.json"
1189
+ if gitleaks_file.exists():
1190
+ try:
1191
+ gitleaks_data = json.loads(gitleaks_file.read_text())
1192
+ for secret in gitleaks_data if isinstance(gitleaks_data, list) else []:
1193
+ findings.append(Finding(
1194
+ type="secret_detected",
1195
+ value=secret.get("RuleID", "Unknown"),
1196
+ description=f"Secret detected: {secret.get('Description', secret.get('RuleID', 'Unknown secret'))}",
1197
+ severity="high" if "api" in secret.get("RuleID", "").lower() or "key" in secret.get("RuleID", "").lower() else "medium",
1198
+ phase="scan",
1199
+ tool="gitleaks",
1200
+ target=secret.get("File", self.target),
1201
+ metadata={
1202
+ "rule": secret.get("RuleID"),
1203
+ "file": secret.get("File"),
1204
+ "line": secret.get("StartLine"),
1205
+ "match": secret.get("Match", "")[:50] + "..." if len(secret.get("Match", "")) > 50 else secret.get("Match", "")
1206
+ }
1207
+ ))
1208
+ except (json.JSONDecodeError, FileNotFoundError):
1209
+ pass
1210
+ gitleaks_count = len([f for f in findings if f.tool == "gitleaks"])
1211
+ self._log_tool(f"gitleaks - {gitleaks_count} secrets found", "done")
1212
+ else:
1213
+ self._log_tool("gitleaks - not installed", "skip")
1214
+ except Exception as e:
1215
+ errors.append(f"Gitleaks error: {str(e)}")
1216
+ self._log_tool(f"gitleaks - error: {str(e)}", "error")
1217
+
1218
+ # 12. TruffleHog - Deep Secret Scanning
1219
+ self._log_tool("trufflehog", "running")
1220
+ try:
1221
+ ret, trufflehog_output = await self._run_command(
1222
+ f"trufflehog filesystem {self.output_dir} --json --only-verified 2>/dev/null > {self.output_dir}/trufflehog_{self.domain}.json",
1223
+ timeout=180
1224
+ )
1225
+ if ret == 0:
1226
+ tools_run.append("trufflehog")
1227
+ trufflehog_file = self.output_dir / f"trufflehog_{self.domain}.json"
1228
+ if trufflehog_file.exists() and trufflehog_file.stat().st_size > 0:
1229
+ try:
1230
+ # TruffleHog outputs JSONL (one JSON per line)
1231
+ for line in trufflehog_file.read_text().strip().split("\n"):
1232
+ if line.strip():
1233
+ secret = json.loads(line)
1234
+ findings.append(Finding(
1235
+ type="verified_secret",
1236
+ value=secret.get("DetectorName", "Unknown"),
1237
+ description=f"Verified secret: {secret.get('DetectorName', 'Unknown')} - {secret.get('DecoderName', '')}",
1238
+ severity="critical", # Verified secrets are critical
1239
+ phase="scan",
1240
+ tool="trufflehog",
1241
+ target=secret.get("SourceMetadata", {}).get("Data", {}).get("Filesystem", {}).get("file", self.target),
1242
+ metadata={
1243
+ "detector": secret.get("DetectorName"),
1244
+ "verified": secret.get("Verified", False),
1245
+ "raw": secret.get("Raw", "")[:30] + "..." if len(secret.get("Raw", "")) > 30 else secret.get("Raw", "")
1246
+ }
1247
+ ))
1248
+ except (json.JSONDecodeError, FileNotFoundError):
1249
+ pass
1250
+ trufflehog_count = len([f for f in findings if f.tool == "trufflehog"])
1251
+ self._log_tool(f"trufflehog - {trufflehog_count} verified secrets", "done")
1252
+ else:
1253
+ self._log_tool("trufflehog - not installed", "skip")
1254
+ except Exception as e:
1255
+ errors.append(f"TruffleHog error: {str(e)}")
1256
+ self._log_tool(f"trufflehog - error: {str(e)}", "error")
1257
+
1258
+ # Add findings to global list
1259
+ for f in findings:
1260
+ self._add_finding(f)
1261
+
1262
+ duration = time.time() - start_time
1263
+ result = PhaseResult(
1264
+ phase=phase,
1265
+ status="completed",
1266
+ started_at=started_at,
1267
+ finished_at=datetime.now(timezone.utc).isoformat(),
1268
+ duration=duration,
1269
+ findings=findings,
1270
+ tools_run=tools_run,
1271
+ errors=errors,
1272
+ metadata={
1273
+ "scan_ids": self.scan_ids
1274
+ }
1275
+ )
1276
+
1277
+ self.phase_results[phase] = result
1278
+ if self.on_phase_complete:
1279
+ self.on_phase_complete(result)
1280
+
1281
+ return result
1282
+
1283
+ def _parse_nuclei_severity(self, line: str) -> str:
1284
+ """Parse severity from nuclei output line."""
1285
+ line_lower = line.lower()
1286
+ if "critical" in line_lower:
1287
+ return "critical"
1288
+ elif "high" in line_lower:
1289
+ return "high"
1290
+ elif "medium" in line_lower:
1291
+ return "medium"
1292
+ elif "low" in line_lower:
1293
+ return "low"
1294
+ return "info"
1295
+
1296
+ def _map_wpscan_severity(self, severity: str) -> str:
1297
+ """Map WPScan severity to standard severity levels."""
1298
+ severity_map = {
1299
+ "critical": "critical",
1300
+ "high": "high",
1301
+ "medium": "medium",
1302
+ "low": "low",
1303
+ "info": "info",
1304
+ "informational": "info"
1305
+ }
1306
+ return severity_map.get(severity.lower(), "medium")
1307
+
1308
+ # ==================== ANALYZE PHASE (Intelligence Module) ====================
1309
+
1310
+ async def run_analyze(self) -> PhaseResult:
1311
+ """
1312
+ Execute intelligence analysis phase.
1313
+
1314
+ This phase runs after SCAN to:
1315
+ 1. Discover attack chains (vulnerability combinations)
1316
+ 2. Prioritize findings by real-world exploitability
1317
+ 3. Generate executive summary
1318
+ """
1319
+ phase = Phase.ANALYZE
1320
+ started_at = datetime.now(timezone.utc).isoformat()
1321
+ start_time = time.time()
1322
+ findings = []
1323
+ tools_run = []
1324
+ errors = []
1325
+
1326
+ if self.on_phase_start:
1327
+ self.on_phase_start(phase)
1328
+
1329
+ self._log_phase(phase, f"Intelligence Analysis for {self.domain}")
1330
+
1331
+ if not self.config.enable_intelligence or not self._vuln_chainer:
1332
+ self._log_tool("Intelligence module disabled", "skip")
1333
+ duration = time.time() - start_time
1334
+ result = PhaseResult(
1335
+ phase=phase,
1336
+ status="skipped",
1337
+ started_at=started_at,
1338
+ finished_at=datetime.now(timezone.utc).isoformat(),
1339
+ duration=duration,
1340
+ findings=[],
1341
+ tools_run=[],
1342
+ errors=[],
1343
+ metadata={"reason": "Intelligence module disabled"}
1344
+ )
1345
+ self.phase_results[phase] = result
1346
+ return result
1347
+
1348
+ # =====================================================================
1349
+ # 1. Vulnerability Chaining - Discover attack paths
1350
+ # =====================================================================
1351
+ self._log_tool("Vulnerability Chaining", "running")
1352
+ try:
1353
+ # Convert orchestrator findings to models.Finding format for intelligence modules
1354
+ from aipt_v2.models.findings import Finding as ModelsFinding, Severity as ModelsSeverity, VulnerabilityType
1355
+
1356
+ models_findings = []
1357
+ for f in self.findings:
1358
+ try:
1359
+ # Map severity string to enum
1360
+ severity_map = {
1361
+ "critical": ModelsSeverity.CRITICAL,
1362
+ "high": ModelsSeverity.HIGH,
1363
+ "medium": ModelsSeverity.MEDIUM,
1364
+ "low": ModelsSeverity.LOW,
1365
+ "info": ModelsSeverity.INFO,
1366
+ "informational": ModelsSeverity.INFO,
1367
+ }
1368
+ severity = severity_map.get(f.severity.lower(), ModelsSeverity.INFO)
1369
+
1370
+ # Map finding type to vulnerability type
1371
+ vuln_type_map = {
1372
+ "sqli": VulnerabilityType.SQL_INJECTION,
1373
+ "sql_injection": VulnerabilityType.SQL_INJECTION,
1374
+ "xss": VulnerabilityType.XSS_REFLECTED,
1375
+ "xss_stored": VulnerabilityType.XSS_STORED,
1376
+ "xss_reflected": VulnerabilityType.XSS_REFLECTED,
1377
+ "ssrf": VulnerabilityType.SSRF,
1378
+ "rce": VulnerabilityType.RCE,
1379
+ "lfi": VulnerabilityType.FILE_INCLUSION,
1380
+ "file_inclusion": VulnerabilityType.FILE_INCLUSION,
1381
+ "open_redirect": VulnerabilityType.OPEN_REDIRECT,
1382
+ "csrf": VulnerabilityType.CSRF,
1383
+ "idor": VulnerabilityType.BROKEN_ACCESS_CONTROL,
1384
+ "info_disclosure": VulnerabilityType.INFORMATION_DISCLOSURE,
1385
+ "information_disclosure": VulnerabilityType.INFORMATION_DISCLOSURE,
1386
+ "misconfig": VulnerabilityType.SECURITY_MISCONFIGURATION,
1387
+ "misconfiguration": VulnerabilityType.SECURITY_MISCONFIGURATION,
1388
+ }
1389
+ vuln_type = vuln_type_map.get(f.type.lower(), VulnerabilityType.OTHER)
1390
+
1391
+ models_findings.append(ModelsFinding(
1392
+ title=f.value,
1393
+ severity=severity,
1394
+ vuln_type=vuln_type,
1395
+ url=f.target or self.target,
1396
+ description=f.description,
1397
+ source=f.tool,
1398
+ ))
1399
+ except Exception as conv_err:
1400
+ logger.debug(f"Could not convert finding for chaining: {conv_err}")
1401
+ continue
1402
+
1403
+ chains = self._vuln_chainer.find_chains(models_findings)
1404
+ self.attack_chains = chains
1405
+
1406
+ if chains:
1407
+ tools_run.append("vulnerability_chainer")
1408
+ self._log_tool(f"Vulnerability Chaining - {len(chains)} attack chains discovered", "done")
1409
+
1410
+ # Log critical chains
1411
+ for chain in chains:
1412
+ if chain.max_impact == "Critical":
1413
+ logger.warning(f"CRITICAL CHAIN: {chain.title} - {chain.impact_description}")
1414
+
1415
+ # Add as finding
1416
+ findings.append(Finding(
1417
+ type="attack_chain",
1418
+ value=chain.title,
1419
+ description=chain.impact_description,
1420
+ severity="critical",
1421
+ phase="analyze",
1422
+ tool="vulnerability_chainer",
1423
+ target=self.domain,
1424
+ metadata={
1425
+ "chain_id": chain.chain_id,
1426
+ "steps": len(chain.links),
1427
+ "vulnerabilities": [link.finding.get("title", "") for link in chain.links]
1428
+ }
1429
+ ))
1430
+
1431
+ # Notify callback
1432
+ if self.on_chain_discovered:
1433
+ self.on_chain_discovered(chain)
1434
+
1435
+ # Save chains to file
1436
+ chains_data = [c.to_dict() for c in chains]
1437
+ (self.output_dir / "attack_chains.json").write_text(json.dumps(chains_data, indent=2))
1438
+ else:
1439
+ self._log_tool("Vulnerability Chaining - No chains found", "done")
1440
+
1441
+ except Exception as e:
1442
+ errors.append(f"Chaining error: {str(e)}")
1443
+ self._log_tool(f"Vulnerability Chaining - Error: {e}", "error")
1444
+
1445
+ # =====================================================================
1446
+ # 2. AI-Powered Triage - Prioritize by exploitability
1447
+ # =====================================================================
1448
+ self._log_tool("AI Triage", "running")
1449
+ try:
1450
+ # Reuse models_findings from chaining if available, otherwise convert now
1451
+ if not models_findings:
1452
+ from aipt_v2.models.findings import Finding as ModelsFinding, Severity as ModelsSeverity, VulnerabilityType
1453
+ models_findings = []
1454
+ for f in self.findings:
1455
+ try:
1456
+ severity_map = {
1457
+ "critical": ModelsSeverity.CRITICAL,
1458
+ "high": ModelsSeverity.HIGH,
1459
+ "medium": ModelsSeverity.MEDIUM,
1460
+ "low": ModelsSeverity.LOW,
1461
+ "info": ModelsSeverity.INFO,
1462
+ }
1463
+ severity = severity_map.get(f.severity.lower(), ModelsSeverity.INFO)
1464
+ models_findings.append(ModelsFinding(
1465
+ title=f.value,
1466
+ severity=severity,
1467
+ vuln_type=VulnerabilityType.OTHER,
1468
+ url=f.target or self.target,
1469
+ description=f.description,
1470
+ source=f.tool,
1471
+ ))
1472
+ except Exception:
1473
+ continue
1474
+
1475
+ # Call the analyze() method (not triage())
1476
+ triage_result = await self._ai_triage.analyze(models_findings)
1477
+ self.triage_result = triage_result
1478
+
1479
+ tools_run.append("ai_triage")
1480
+
1481
+ # Save triage results
1482
+ (self.output_dir / "triage_result.json").write_text(
1483
+ json.dumps(triage_result.to_dict(), indent=2)
1484
+ )
1485
+
1486
+ # Save executive summary
1487
+ (self.output_dir / "EXECUTIVE_SUMMARY.md").write_text(triage_result.executive_summary)
1488
+
1489
+ # Log top priorities using get_top_priority() method
1490
+ top_assessments = triage_result.get_top_priority(3)
1491
+ if top_assessments:
1492
+ top_titles = [a.finding.title for a in top_assessments]
1493
+ self._log_tool(f"AI Triage - Top priorities: {', '.join(top_titles)}", "done")
1494
+ else:
1495
+ self._log_tool("AI Triage - No high-priority findings", "done")
1496
+
1497
+ except Exception as e:
1498
+ errors.append(f"Triage error: {str(e)}")
1499
+ self._log_tool(f"AI Triage - Error: {e}", "error")
1500
+
1501
+ # =====================================================================
1502
+ # 3. Scope Audit - Check for violations
1503
+ # =====================================================================
1504
+ if self._scope_enforcer:
1505
+ self._log_tool("Scope Audit", "running")
1506
+ violations = self._scope_enforcer.get_violations()
1507
+ if violations:
1508
+ self._log_tool(f"Scope Audit - {len(violations)} violations detected!", "done")
1509
+ # Save audit log
1510
+ audit_log = self._scope_enforcer.get_audit_log()
1511
+ (self.output_dir / "scope_audit.json").write_text(json.dumps(audit_log, indent=2))
1512
+ else:
1513
+ self._log_tool("Scope Audit - All requests within scope", "done")
1514
+ tools_run.append("scope_audit")
1515
+
1516
+ # Add findings to global list
1517
+ for f in findings:
1518
+ self._add_finding(f)
1519
+
1520
+ duration = time.time() - start_time
1521
+ result = PhaseResult(
1522
+ phase=phase,
1523
+ status="completed",
1524
+ started_at=started_at,
1525
+ finished_at=datetime.now(timezone.utc).isoformat(),
1526
+ duration=duration,
1527
+ findings=findings,
1528
+ tools_run=tools_run,
1529
+ errors=errors,
1530
+ metadata={
1531
+ "attack_chains_count": len(self.attack_chains),
1532
+ "top_priorities_count": len(self.triage_result.get_top_priority(10)) if self.triage_result else 0,
1533
+ "scope_violations": len(self._scope_enforcer.get_violations()) if self._scope_enforcer else 0
1534
+ }
1535
+ )
1536
+
1537
+ self.phase_results[phase] = result
1538
+ if self.on_phase_complete:
1539
+ self.on_phase_complete(result)
1540
+
1541
+ return result
1542
+
1543
+ # ==================== EXPLOIT PHASE ====================
1544
+
1545
+ async def run_exploit(self) -> PhaseResult:
1546
+ """Execute exploitation/validation phase."""
1547
+ phase = Phase.EXPLOIT
1548
+ started_at = datetime.now(timezone.utc).isoformat()
1549
+ start_time = time.time()
1550
+ findings = []
1551
+ tools_run = []
1552
+ errors = []
1553
+
1554
+ if self.on_phase_start:
1555
+ self.on_phase_start(phase)
1556
+
1557
+ self._log_phase(phase, f"Vulnerability Validation on {self.domain}")
1558
+
1559
+ # 1. Check Sensitive Endpoints
1560
+ if self.config.check_sensitive_paths:
1561
+ self._log_tool("Sensitive Path Check", "running")
1562
+
1563
+ sensitive_paths = [
1564
+ "/metrics", "/actuator", "/actuator/health", "/actuator/env",
1565
+ "/.env", "/.git/config", "/swagger-ui.html", "/api/swagger",
1566
+ "/graphql", "/debug", "/admin", "/phpinfo.php",
1567
+ "/server-status", "/.aws/credentials", "/backup"
1568
+ ]
1569
+
1570
+ for path in sensitive_paths:
1571
+ try:
1572
+ ret, output = await self._run_command(
1573
+ f"curl -s -o /dev/null -w '%{{http_code}}' '{self.target}{path}' --connect-timeout 5",
1574
+ timeout=10
1575
+ )
1576
+ if ret == 0 and output.strip() in ["200", "301", "302"]:
1577
+ severity = "high" if path in ["/.env", "/.git/config", "/.aws/credentials"] else "medium"
1578
+ findings.append(Finding(
1579
+ type="exposed_endpoint",
1580
+ value=f"{self.target}{path}",
1581
+ description=f"Sensitive endpoint accessible: {path} (HTTP {output.strip()})",
1582
+ severity=severity,
1583
+ phase="exploit",
1584
+ tool="path_check",
1585
+ target=self.target
1586
+ ))
1587
+ except Exception:
1588
+ continue
1589
+
1590
+ exposed_count = len([f for f in findings if f.type == "exposed_endpoint"])
1591
+ tools_run.append("sensitive_path_check")
1592
+ self._log_tool(f"Sensitive Path Check - {exposed_count} exposed", "done")
1593
+
1594
+ # 2. WAF Detection
1595
+ self._log_tool("WAF Detection", "running")
1596
+ ret, output = await self._run_command(
1597
+ f"curl -sI \"{self.target}/?id=1'%20OR%20'1'='1\" --connect-timeout 5 | head -1",
1598
+ timeout=10
1599
+ )
1600
+ waf_detected = "403" in output or "406" in output or "429" in output
1601
+ (self.output_dir / "waf_test.txt").write_text(f"WAF Test Response: {output}\nWAF Detected: {waf_detected}")
1602
+ tools_run.append("waf_detection")
1603
+
1604
+ if not waf_detected:
1605
+ findings.append(Finding(
1606
+ type="waf_bypass",
1607
+ value="No WAF detected",
1608
+ description="Target does not appear to have a WAF or WAF is not blocking",
1609
+ severity="low",
1610
+ phase="exploit",
1611
+ tool="waf_detection",
1612
+ target=self.target
1613
+ ))
1614
+ self._log_tool(f"WAF Detection - {'Detected' if waf_detected else 'Not detected'}", "done")
1615
+
1616
+ # ==================== EXPLOITATION TOOLS (Enabled in full_mode) ====================
1617
+ if self.config.full_mode or self.config.enable_exploitation:
1618
+
1619
+ # 3. SQLMap - SQL Injection Testing (NEW)
1620
+ if "sqlmap" in self.config.exploit_tools:
1621
+ self._log_tool("sqlmap", "running")
1622
+ sqlmap_output_dir = self.output_dir / "sqlmap"
1623
+ sqlmap_output_dir.mkdir(exist_ok=True)
1624
+
1625
+ # Run SQLMap in batch mode with safe settings
1626
+ ret, output = await self._run_command(
1627
+ f"sqlmap -u {shlex.quote(self.target)} --batch --forms --crawl=2 "
1628
+ f"--level={self.config.sqlmap_level} --risk={self.config.sqlmap_risk} "
1629
+ f"--output-dir={sqlmap_output_dir} --random-agent 2>/dev/null",
1630
+ timeout=self.config.sqlmap_timeout
1631
+ )
1632
+ if ret == 0:
1633
+ (self.output_dir / f"sqlmap_{self.domain}.txt").write_text(output)
1634
+ tools_run.append("sqlmap")
1635
+
1636
+ # Parse SQLMap findings
1637
+ if "is vulnerable" in output.lower() or "injection" in output.lower():
1638
+ # Extract vulnerable parameters
1639
+ vuln_params = []
1640
+ for line in output.split("\n"):
1641
+ if "Parameter:" in line or "is vulnerable" in line:
1642
+ vuln_params.append(line.strip())
1643
+
1644
+ if vuln_params:
1645
+ findings.append(Finding(
1646
+ type="sql_injection",
1647
+ value="SQL Injection Detected",
1648
+ description=f"SQL injection vulnerability found. Parameters: {'; '.join(vuln_params[:5])}",
1649
+ severity="critical",
1650
+ phase="exploit",
1651
+ tool="sqlmap",
1652
+ target=self.target,
1653
+ metadata={"vulnerable_params": vuln_params}
1654
+ ))
1655
+ # Mark shell access if OS shell was obtained
1656
+ if "--os-shell" in output or "os-shell" in output:
1657
+ self.config.shell_obtained = True
1658
+ self.config.target_os = "linux" if "linux" in output.lower() else "windows"
1659
+
1660
+ self._log_tool(f"sqlmap - {'Vulnerable!' if vuln_params else 'No injection found'}", "done")
1661
+ else:
1662
+ self._log_tool("sqlmap - completed", "done")
1663
+
1664
+ # 4. Commix - Command Injection Testing (NEW)
1665
+ if "commix" in self.config.exploit_tools:
1666
+ self._log_tool("commix", "running")
1667
+ ret, output = await self._run_command(
1668
+ f"commix -u {shlex.quote(self.target)} --batch --crawl=1 --level=2 2>/dev/null",
1669
+ timeout=300
1670
+ )
1671
+ if ret == 0:
1672
+ (self.output_dir / f"commix_{self.domain}.txt").write_text(output)
1673
+ tools_run.append("commix")
1674
+
1675
+ if "is vulnerable" in output.lower() or "command injection" in output.lower():
1676
+ findings.append(Finding(
1677
+ type="command_injection",
1678
+ value="Command Injection Detected",
1679
+ description="OS command injection vulnerability found",
1680
+ severity="critical",
1681
+ phase="exploit",
1682
+ tool="commix",
1683
+ target=self.target
1684
+ ))
1685
+ self.config.shell_obtained = True
1686
+
1687
+ self._log_tool("commix - completed", "done")
1688
+
1689
+ # 5. XSStrike - XSS Detection (NEW)
1690
+ if "xsstrike" in self.config.exploit_tools:
1691
+ self._log_tool("xsstrike", "running")
1692
+ ret, output = await self._run_command(
1693
+ f"xsstrike -u {shlex.quote(self.target)} --crawl -l 2 --blind 2>/dev/null",
1694
+ timeout=300
1695
+ )
1696
+ if ret == 0:
1697
+ (self.output_dir / f"xsstrike_{self.domain}.txt").write_text(output)
1698
+ tools_run.append("xsstrike")
1699
+
1700
+ # Parse XSS findings
1701
+ xss_count = output.lower().count("xss") + output.lower().count("reflection")
1702
+ if xss_count > 0 or "vulnerable" in output.lower():
1703
+ findings.append(Finding(
1704
+ type="xss_vulnerability",
1705
+ value="XSS Vulnerability Detected",
1706
+ description=f"Cross-site scripting vulnerability detected",
1707
+ severity="high",
1708
+ phase="exploit",
1709
+ tool="xsstrike",
1710
+ target=self.target
1711
+ ))
1712
+
1713
+ self._log_tool(f"xsstrike - {xss_count} potential XSS", "done")
1714
+
1715
+ # 6. Hydra - Credential Brute-forcing (NEW)
1716
+ if "hydra" in self.config.exploit_tools:
1717
+ # Only run against discovered services with auth
1718
+ services_to_bruteforce = []
1719
+
1720
+ # Check for SSH (port 22)
1721
+ if any("22/tcp" in str(f.value) for f in self.findings if f.type == "open_port"):
1722
+ services_to_bruteforce.append(("ssh", 22))
1723
+
1724
+ # Check for FTP (port 21)
1725
+ if any("21/tcp" in str(f.value) for f in self.findings if f.type == "open_port"):
1726
+ services_to_bruteforce.append(("ftp", 21))
1727
+
1728
+ # Check for HTTP Basic Auth
1729
+ if any("401" in str(f.value) for f in self.findings):
1730
+ services_to_bruteforce.append(("http-get", 80))
1731
+
1732
+ for service, port in services_to_bruteforce[:2]: # Limit to 2 services
1733
+ self._log_tool(f"hydra ({service})", "running")
1734
+ ret, output = await self._run_command(
1735
+ f"hydra -L {self.config.wordlist_users} -P {self.config.wordlist_passwords} "
1736
+ f"-t {self.config.hydra_threads} -f -o {self.output_dir}/hydra_{service}.txt "
1737
+ f"{self.safe_domain} {service} 2>/dev/null",
1738
+ timeout=self.config.hydra_timeout
1739
+ )
1740
+ if ret == 0:
1741
+ tools_run.append(f"hydra_{service}")
1742
+
1743
+ if "login:" in output.lower() or "password:" in output.lower():
1744
+ findings.append(Finding(
1745
+ type="credential_found",
1746
+ value=f"Weak credentials on {service}",
1747
+ description=f"Valid credentials found for {service} service",
1748
+ severity="critical",
1749
+ phase="exploit",
1750
+ tool="hydra",
1751
+ target=f"{self.domain}:{port}",
1752
+ metadata={"service": service}
1753
+ ))
1754
+ self.config.shell_obtained = True
1755
+
1756
+ self._log_tool(f"hydra ({service}) - completed", "done")
1757
+
1758
+ # 7. Searchsploit - Exploit Database Search (NEW)
1759
+ if "searchsploit" in self.config.exploit_tools:
1760
+ self._log_tool("searchsploit", "running")
1761
+ # Search for exploits based on discovered technologies
1762
+ search_terms = []
1763
+
1764
+ # Get technologies from whatweb/httpx findings
1765
+ for f in self.findings:
1766
+ if f.tool in ["whatweb", "httpx", "nmap"]:
1767
+ # Extract potential software names
1768
+ if "Apache" in f.value or "apache" in f.description:
1769
+ search_terms.append("Apache")
1770
+ if "nginx" in f.value.lower() or "nginx" in f.description.lower():
1771
+ search_terms.append("nginx")
1772
+ if "WordPress" in f.value or "wordpress" in f.description.lower():
1773
+ search_terms.append("WordPress")
1774
+
1775
+ search_terms = list(set(search_terms))[:3] # Dedupe and limit
1776
+
1777
+ for term in search_terms:
1778
+ ret, output = await self._run_command(
1779
+ f"searchsploit {shlex.quote(term)} --json 2>/dev/null | head -50"
1780
+ )
1781
+ if ret == 0 and output.strip():
1782
+ try:
1783
+ exploits = json.loads(output)
1784
+ if exploits.get("RESULTS_EXPLOIT"):
1785
+ (self.output_dir / f"searchsploit_{term}.json").write_text(output)
1786
+ findings.append(Finding(
1787
+ type="potential_exploit",
1788
+ value=f"Exploits found for {term}",
1789
+ description=f"Found {len(exploits['RESULTS_EXPLOIT'])} potential exploits for {term}",
1790
+ severity="info",
1791
+ phase="exploit",
1792
+ tool="searchsploit",
1793
+ target=self.domain,
1794
+ metadata={"exploits": exploits["RESULTS_EXPLOIT"][:5]}
1795
+ ))
1796
+ except json.JSONDecodeError:
1797
+ pass
1798
+
1799
+ tools_run.append("searchsploit")
1800
+ self._log_tool("searchsploit - completed", "done")
1801
+
1802
+ # 8. Fetch Acunetix Results (if scan completed)
1803
+ if "acunetix" in self.scan_ids and not self.config.wait_for_scanners:
1804
+ self._log_tool("Fetching Acunetix Results", "running")
1805
+ try:
1806
+ acunetix = get_acunetix()
1807
+ status = acunetix.get_scan_status(self.scan_ids["acunetix"])
1808
+
1809
+ if status.status == "completed":
1810
+ vulns = acunetix.get_scan_vulnerabilities(self.scan_ids["acunetix"])
1811
+ for vuln in vulns:
1812
+ findings.append(Finding(
1813
+ type="vulnerability",
1814
+ value=vuln.name,
1815
+ description=vuln.description or vuln.name,
1816
+ severity=vuln.severity,
1817
+ phase="exploit",
1818
+ tool="acunetix",
1819
+ target=vuln.affected_url,
1820
+ metadata={
1821
+ "vuln_id": vuln.vuln_id,
1822
+ "cvss": vuln.cvss_score
1823
+ }
1824
+ ))
1825
+ self._log_tool(f"Acunetix Results - {len(vulns)} vulnerabilities", "done")
1826
+ else:
1827
+ self._log_tool(f"Acunetix - Scan still {status.status} ({status.progress}%)", "done")
1828
+ except Exception as e:
1829
+ errors.append(f"Error fetching Acunetix results: {e}")
1830
+
1831
+ # Add findings to global list
1832
+ for f in findings:
1833
+ self._add_finding(f)
1834
+
1835
+ duration = time.time() - start_time
1836
+ result = PhaseResult(
1837
+ phase=phase,
1838
+ status="completed",
1839
+ started_at=started_at,
1840
+ finished_at=datetime.now(timezone.utc).isoformat(),
1841
+ duration=duration,
1842
+ findings=findings,
1843
+ tools_run=tools_run,
1844
+ errors=errors
1845
+ )
1846
+
1847
+ self.phase_results[phase] = result
1848
+ if self.on_phase_complete:
1849
+ self.on_phase_complete(result)
1850
+
1851
+ return result
1852
+
1853
+ # ==================== POST-EXPLOITATION PHASE (NEW) ====================
1854
+
1855
+ async def run_post_exploit(self) -> PhaseResult:
1856
+ """
1857
+ Execute post-exploitation phase.
1858
+
1859
+ This phase auto-triggers when shell access is obtained during exploitation.
1860
+ Runs privilege escalation tools to discover further attack paths.
1861
+ """
1862
+ phase = Phase.POST_EXPLOIT
1863
+ started_at = datetime.now(timezone.utc).isoformat()
1864
+ start_time = time.time()
1865
+ findings = []
1866
+ tools_run = []
1867
+ errors = []
1868
+
1869
+ if self.on_phase_start:
1870
+ self.on_phase_start(phase)
1871
+
1872
+ self._log_phase(phase, f"Post-Exploitation on {self.domain}")
1873
+
1874
+ # Check if shell access was obtained
1875
+ if not self.config.shell_obtained:
1876
+ self._log_tool("No shell access - skipping post-exploitation", "done")
1877
+ duration = time.time() - start_time
1878
+ result = PhaseResult(
1879
+ phase=phase,
1880
+ status="skipped",
1881
+ started_at=started_at,
1882
+ finished_at=datetime.now(timezone.utc).isoformat(),
1883
+ duration=duration,
1884
+ findings=[],
1885
+ tools_run=[],
1886
+ errors=[],
1887
+ metadata={"reason": "No shell access obtained during exploitation"}
1888
+ )
1889
+ self.phase_results[phase] = result
1890
+ return result
1891
+
1892
+ # Determine target OS
1893
+ target_os = self.config.target_os or "linux" # Default to linux
1894
+ self._log_tool(f"Target OS: {target_os}", "done")
1895
+
1896
+ # ==================== LINUX POST-EXPLOITATION ====================
1897
+ if target_os == "linux":
1898
+
1899
+ # 1. LinPEAS - Linux Privilege Escalation
1900
+ if "linpeas" in self.config.post_exploit_tools:
1901
+ self._log_tool("linpeas", "running")
1902
+ # Note: In real scenario, this would be uploaded and executed on target
1903
+ # For now, we simulate the check
1904
+ ret, output = await self._run_command(
1905
+ f"curl -sL https://github.com/carlospolop/PEASS-ng/releases/latest/download/linpeas.sh -o /tmp/linpeas.sh 2>/dev/null && echo 'Downloaded'",
1906
+ timeout=60
1907
+ )
1908
+ if ret == 0 and "Downloaded" in output:
1909
+ tools_run.append("linpeas")
1910
+ findings.append(Finding(
1911
+ type="post_exploit_tool",
1912
+ value="LinPEAS ready",
1913
+ description="LinPEAS privilege escalation script downloaded and ready for execution on target",
1914
+ severity="info",
1915
+ phase="post_exploit",
1916
+ tool="linpeas",
1917
+ target=self.domain,
1918
+ metadata={"script_path": "/tmp/linpeas.sh"}
1919
+ ))
1920
+ self._log_tool("linpeas - downloaded", "done")
1921
+
1922
+ # 2. pspy - Process Monitoring
1923
+ if "pspy" in self.config.post_exploit_tools:
1924
+ self._log_tool("pspy", "running")
1925
+ ret, output = await self._run_command(
1926
+ f"curl -sL https://github.com/DominicBreuker/pspy/releases/download/v1.2.1/pspy64 -o /tmp/pspy64 2>/dev/null && chmod +x /tmp/pspy64 && echo 'Downloaded'",
1927
+ timeout=60
1928
+ )
1929
+ if ret == 0 and "Downloaded" in output:
1930
+ tools_run.append("pspy")
1931
+ findings.append(Finding(
1932
+ type="post_exploit_tool",
1933
+ value="pspy ready",
1934
+ description="pspy process monitor downloaded for cron job and process analysis",
1935
+ severity="info",
1936
+ phase="post_exploit",
1937
+ tool="pspy",
1938
+ target=self.domain,
1939
+ metadata={"binary_path": "/tmp/pspy64"}
1940
+ ))
1941
+ self._log_tool("pspy - downloaded", "done")
1942
+
1943
+ # ==================== WINDOWS POST-EXPLOITATION ====================
1944
+ elif target_os == "windows":
1945
+
1946
+ # 1. WinPEAS - Windows Privilege Escalation
1947
+ if "winpeas" in self.config.post_exploit_tools:
1948
+ self._log_tool("winpeas", "running")
1949
+ ret, output = await self._run_command(
1950
+ f"curl -sL https://github.com/carlospolop/PEASS-ng/releases/latest/download/winPEASany_ofs.exe -o /tmp/winpeas.exe 2>/dev/null && echo 'Downloaded'",
1951
+ timeout=60
1952
+ )
1953
+ if ret == 0 and "Downloaded" in output:
1954
+ tools_run.append("winpeas")
1955
+ findings.append(Finding(
1956
+ type="post_exploit_tool",
1957
+ value="WinPEAS ready",
1958
+ description="WinPEAS privilege escalation tool downloaded for Windows target",
1959
+ severity="info",
1960
+ phase="post_exploit",
1961
+ tool="winpeas",
1962
+ target=self.domain,
1963
+ metadata={"binary_path": "/tmp/winpeas.exe"}
1964
+ ))
1965
+ self._log_tool("winpeas - downloaded", "done")
1966
+
1967
+ # 2. LaZagne - Credential Recovery
1968
+ if "lazagne" in self.config.post_exploit_tools:
1969
+ self._log_tool("lazagne", "running")
1970
+ ret, output = await self._run_command(
1971
+ f"curl -sL https://github.com/AlessandroZ/LaZagne/releases/download/v2.4.5/LaZagne.exe -o /tmp/lazagne.exe 2>/dev/null && echo 'Downloaded'",
1972
+ timeout=60
1973
+ )
1974
+ if ret == 0 and "Downloaded" in output:
1975
+ tools_run.append("lazagne")
1976
+ findings.append(Finding(
1977
+ type="post_exploit_tool",
1978
+ value="LaZagne ready",
1979
+ description="LaZagne credential recovery tool downloaded for Windows target",
1980
+ severity="info",
1981
+ phase="post_exploit",
1982
+ tool="lazagne",
1983
+ target=self.domain,
1984
+ metadata={"binary_path": "/tmp/lazagne.exe"}
1985
+ ))
1986
+ self._log_tool("lazagne - downloaded", "done")
1987
+
1988
+ # 3. Generate Post-Exploitation Report
1989
+ post_exploit_report = {
1990
+ "target": self.domain,
1991
+ "target_os": target_os,
1992
+ "shell_obtained": self.config.shell_obtained,
1993
+ "tools_prepared": tools_run,
1994
+ "recommendations": [
1995
+ "Execute LinPEAS/WinPEAS on target for privilege escalation paths",
1996
+ "Run pspy to monitor for cron jobs and scheduled tasks",
1997
+ "Use LaZagne to recover stored credentials",
1998
+ "Check for kernel exploits based on version",
1999
+ "Look for SUID binaries (Linux) or service misconfigurations (Windows)"
2000
+ ]
2001
+ }
2002
+ (self.output_dir / "post_exploit_report.json").write_text(json.dumps(post_exploit_report, indent=2))
2003
+
2004
+ # Add findings to global list
2005
+ for f in findings:
2006
+ self._add_finding(f)
2007
+
2008
+ duration = time.time() - start_time
2009
+ result = PhaseResult(
2010
+ phase=phase,
2011
+ status="completed",
2012
+ started_at=started_at,
2013
+ finished_at=datetime.now(timezone.utc).isoformat(),
2014
+ duration=duration,
2015
+ findings=findings,
2016
+ tools_run=tools_run,
2017
+ errors=errors,
2018
+ metadata={
2019
+ "target_os": target_os,
2020
+ "shell_obtained": self.config.shell_obtained
2021
+ }
2022
+ )
2023
+
2024
+ self.phase_results[phase] = result
2025
+ if self.on_phase_complete:
2026
+ self.on_phase_complete(result)
2027
+
2028
+ return result
2029
+
2030
+ # ==================== REPORT PHASE ====================
2031
+
2032
+ async def run_report(self) -> PhaseResult:
2033
+ """Execute report generation phase."""
2034
+ phase = Phase.REPORT
2035
+ started_at = datetime.now(timezone.utc).isoformat()
2036
+ start_time = time.time()
2037
+ findings = []
2038
+ tools_run = []
2039
+ errors = []
2040
+
2041
+ if self.on_phase_start:
2042
+ self.on_phase_start(phase)
2043
+
2044
+ self._log_phase(phase, f"Generating Report for {self.domain}")
2045
+
2046
+ # 1. Generate Summary
2047
+ summary = self._generate_summary()
2048
+ (self.output_dir / "SUMMARY.md").write_text(summary)
2049
+ tools_run.append("summary_generator")
2050
+ self._log_tool("Summary generated", "done")
2051
+
2052
+ # 2. Generate Findings JSON
2053
+ findings_data = [
2054
+ {
2055
+ "type": f.type,
2056
+ "value": f.value,
2057
+ "description": f.description,
2058
+ "severity": f.severity,
2059
+ "phase": f.phase,
2060
+ "tool": f.tool,
2061
+ "target": f.target,
2062
+ "metadata": f.metadata,
2063
+ "timestamp": f.timestamp
2064
+ }
2065
+ for f in self.findings
2066
+ ]
2067
+ (self.output_dir / "findings.json").write_text(json.dumps(findings_data, indent=2))
2068
+ tools_run.append("findings_export")
2069
+ self._log_tool("Findings exported", "done")
2070
+
2071
+ # 3. Generate HTML Report
2072
+ if self.config.report_format == "html":
2073
+ html_report = self._generate_html_report()
2074
+ report_file = self.output_dir / f"VAPT_Report_{self.domain.replace('.', '_')}.html"
2075
+ report_file.write_text(html_report)
2076
+ tools_run.append("html_report")
2077
+ self._log_tool(f"HTML Report: {report_file.name}", "done")
2078
+
2079
+ duration = time.time() - start_time
2080
+ result = PhaseResult(
2081
+ phase=phase,
2082
+ status="completed",
2083
+ started_at=started_at,
2084
+ finished_at=datetime.now(timezone.utc).isoformat(),
2085
+ duration=duration,
2086
+ findings=findings,
2087
+ tools_run=tools_run,
2088
+ errors=errors,
2089
+ metadata={
2090
+ "output_dir": str(self.output_dir),
2091
+ "total_findings": len(self.findings)
2092
+ }
2093
+ )
2094
+
2095
+ self.phase_results[phase] = result
2096
+ if self.on_phase_complete:
2097
+ self.on_phase_complete(result)
2098
+
2099
+ return result
2100
+
2101
+ def _generate_summary(self) -> str:
2102
+ """Generate markdown summary."""
2103
+ severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
2104
+ for f in self.findings:
2105
+ sev = f.severity.lower()
2106
+ if sev in severity_counts:
2107
+ severity_counts[sev] += 1
2108
+
2109
+ phases_info = []
2110
+ for phase, result in self.phase_results.items():
2111
+ phases_info.append(f"| {phase.value.upper()} | {result.status} | {result.duration:.1f}s | {len(result.findings)} |")
2112
+
2113
+ return f"""# AIPT Scan Summary
2114
+
2115
+ ## Target Information
2116
+ - **Domain**: {self.domain}
2117
+ - **Target URL**: {self.target}
2118
+ - **Scan Date**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
2119
+ - **Report ID**: VAPT-{self.domain.upper().replace('.', '-')}-{datetime.now().strftime('%Y%m%d')}
2120
+
2121
+ ## Vulnerability Summary
2122
+ | Severity | Count |
2123
+ |----------|-------|
2124
+ | 🔴 Critical | {severity_counts['critical']} |
2125
+ | 🟠 High | {severity_counts['high']} |
2126
+ | 🟡 Medium | {severity_counts['medium']} |
2127
+ | 🔵 Low | {severity_counts['low']} |
2128
+ | ⚪ Info | {severity_counts['info']} |
2129
+ | **Total** | **{len(self.findings)}** |
2130
+
2131
+ ## Phase Results
2132
+ | Phase | Status | Duration | Findings |
2133
+ |-------|--------|----------|----------|
2134
+ {chr(10).join(phases_info)}
2135
+
2136
+ ## Scanner IDs
2137
+ {json.dumps(self.scan_ids, indent=2) if self.scan_ids else 'No enterprise scans'}
2138
+
2139
+ ## Assets Discovered
2140
+ - Subdomains: {len(self.subdomains)}
2141
+ - Live Hosts: {len(self.live_hosts)}
2142
+
2143
+ ## Output Directory
2144
+ {self.output_dir}
2145
+ """
2146
+
2147
+ def _generate_html_report(self) -> str:
2148
+ """Generate HTML report."""
2149
+ severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}
2150
+ for f in self.findings:
2151
+ sev = f.severity.lower()
2152
+ if sev in severity_counts:
2153
+ severity_counts[sev] += 1
2154
+
2155
+ findings_html = ""
2156
+ for f in self.findings:
2157
+ sev_class = f.severity.lower()
2158
+ findings_html += f"""
2159
+ <div class="finding {sev_class}">
2160
+ <div class="finding-header">
2161
+ <span class="severity-badge {sev_class}">{f.severity.upper()}</span>
2162
+ <span class="finding-title">{f.value}</span>
2163
+ <span class="finding-tool">{f.tool}</span>
2164
+ </div>
2165
+ <div class="finding-body">
2166
+ <p>{f.description}</p>
2167
+ <small>Target: {f.target or self.target} | Phase: {f.phase}</small>
2168
+ </div>
2169
+ </div>
2170
+ """
2171
+
2172
+ return f"""<!DOCTYPE html>
2173
+ <html lang="en">
2174
+ <head>
2175
+ <meta charset="UTF-8">
2176
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
2177
+ <title>VAPT Report - {self.domain}</title>
2178
+ <style>
2179
+ :root {{
2180
+ --critical: #dc3545;
2181
+ --high: #fd7e14;
2182
+ --medium: #ffc107;
2183
+ --low: #17a2b8;
2184
+ --info: #6c757d;
2185
+ }}
2186
+ body {{ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }}
2187
+ .container {{ max-width: 1200px; margin: 0 auto; }}
2188
+ .header {{ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 40px; border-radius: 10px; margin-bottom: 30px; }}
2189
+ .header h1 {{ margin: 0 0 10px 0; }}
2190
+ .stats {{ display: grid; grid-template-columns: repeat(5, 1fr); gap: 15px; margin-bottom: 30px; }}
2191
+ .stat {{ background: white; padding: 20px; border-radius: 10px; text-align: center; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
2192
+ .stat .number {{ font-size: 2em; font-weight: bold; }}
2193
+ .stat.critical .number {{ color: var(--critical); }}
2194
+ .stat.high .number {{ color: var(--high); }}
2195
+ .stat.medium .number {{ color: var(--medium); }}
2196
+ .stat.low .number {{ color: var(--low); }}
2197
+ .stat.info .number {{ color: var(--info); }}
2198
+ .findings {{ background: white; border-radius: 10px; padding: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }}
2199
+ .finding {{ border-left: 4px solid; padding: 15px; margin-bottom: 15px; background: #fafafa; border-radius: 0 5px 5px 0; }}
2200
+ .finding.critical {{ border-color: var(--critical); }}
2201
+ .finding.high {{ border-color: var(--high); }}
2202
+ .finding.medium {{ border-color: var(--medium); }}
2203
+ .finding.low {{ border-color: var(--low); }}
2204
+ .finding.info {{ border-color: var(--info); }}
2205
+ .finding-header {{ display: flex; align-items: center; gap: 10px; margin-bottom: 10px; }}
2206
+ .severity-badge {{ padding: 3px 8px; border-radius: 3px; font-size: 0.8em; color: white; }}
2207
+ .severity-badge.critical {{ background: var(--critical); }}
2208
+ .severity-badge.high {{ background: var(--high); }}
2209
+ .severity-badge.medium {{ background: var(--medium); }}
2210
+ .severity-badge.low {{ background: var(--low); }}
2211
+ .severity-badge.info {{ background: var(--info); }}
2212
+ .finding-title {{ font-weight: bold; flex-grow: 1; }}
2213
+ .finding-tool {{ color: #666; font-size: 0.9em; }}
2214
+ .finding-body p {{ margin: 0 0 10px 0; }}
2215
+ .finding-body small {{ color: #666; }}
2216
+ </style>
2217
+ </head>
2218
+ <body>
2219
+ <div class="container">
2220
+ <div class="header">
2221
+ <h1>🔒 VAPT Report</h1>
2222
+ <p><strong>Target:</strong> {self.domain}</p>
2223
+ <p><strong>Date:</strong> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
2224
+ <p><strong>Report ID:</strong> VAPT-{self.domain.upper().replace('.', '-')}-{datetime.now().strftime('%Y%m%d')}</p>
2225
+ </div>
2226
+
2227
+ <div class="stats">
2228
+ <div class="stat critical"><div class="number">{severity_counts['critical']}</div><div>Critical</div></div>
2229
+ <div class="stat high"><div class="number">{severity_counts['high']}</div><div>High</div></div>
2230
+ <div class="stat medium"><div class="number">{severity_counts['medium']}</div><div>Medium</div></div>
2231
+ <div class="stat low"><div class="number">{severity_counts['low']}</div><div>Low</div></div>
2232
+ <div class="stat info"><div class="number">{severity_counts['info']}</div><div>Info</div></div>
2233
+ </div>
2234
+
2235
+ <div class="findings">
2236
+ <h2>Findings ({len(self.findings)})</h2>
2237
+ {findings_html if findings_html else '<p>No vulnerabilities found.</p>'}
2238
+ </div>
2239
+
2240
+ <div style="text-align: center; margin-top: 30px; color: #666;">
2241
+ <p>Generated by AIPT - AI-Powered Penetration Testing</p>
2242
+ <p>Scanners: {', '.join(self.scan_ids.keys()) if self.scan_ids else 'Open Source Tools'}</p>
2243
+ </div>
2244
+ </div>
2245
+ </body>
2246
+ </html>"""
2247
+
2248
+ # ==================== MAIN RUNNER ====================
2249
+
2250
+ async def run(self, phases: Optional[List[Phase]] = None) -> Dict[str, Any]:
2251
+ """
2252
+ Run the full orchestration pipeline.
2253
+
2254
+ Args:
2255
+ phases: Optional list of phases to run (default: all)
2256
+
2257
+ Returns:
2258
+ Complete results dictionary
2259
+ """
2260
+ if phases is None:
2261
+ phases = [Phase.RECON, Phase.SCAN, Phase.ANALYZE, Phase.EXPLOIT, Phase.POST_EXPLOIT, Phase.REPORT]
2262
+
2263
+ start_time = time.time()
2264
+
2265
+ print("\n" + "="*60)
2266
+ print(" AIPT - AI-Powered Penetration Testing (v2.1 - Maximum Tools)")
2267
+ print("="*60)
2268
+ print(f" Target: {self.domain}")
2269
+ print(f" Output: {self.output_dir}")
2270
+ print(f" Mode: {'FULL (All Tools)' if self.config.full_mode else 'Standard'}")
2271
+ print(f" Intelligence: {'Enabled' if self.config.enable_intelligence else 'Disabled'}")
2272
+ print(f" Acunetix: {'Enabled' if self.config.use_acunetix else 'Disabled'}")
2273
+ print(f" Burp: {'Enabled' if self.config.use_burp else 'Disabled'}")
2274
+ print(f" Nessus: {'Enabled' if self.config.use_nessus else 'Disabled'}")
2275
+ print(f" ZAP: {'Enabled' if self.config.use_zap else 'Disabled'}")
2276
+ print(f" Exploitation: {'Enabled' if (self.config.full_mode or self.config.enable_exploitation) else 'Disabled'}")
2277
+ print("="*60 + "\n")
2278
+
2279
+ try:
2280
+ if Phase.RECON in phases and not self.config.skip_recon:
2281
+ await self.run_recon()
2282
+
2283
+ if Phase.SCAN in phases and not self.config.skip_scan:
2284
+ await self.run_scan()
2285
+
2286
+ # NEW: Intelligence Analysis Phase
2287
+ if Phase.ANALYZE in phases and self.config.enable_intelligence:
2288
+ await self.run_analyze()
2289
+
2290
+ if Phase.EXPLOIT in phases and not self.config.skip_exploit:
2291
+ await self.run_exploit()
2292
+
2293
+ # Auto-trigger POST_EXPLOIT if shell was obtained
2294
+ if Phase.POST_EXPLOIT in phases and self.config.shell_obtained:
2295
+ await self.run_post_exploit()
2296
+
2297
+ if Phase.REPORT in phases and not self.config.skip_report:
2298
+ await self.run_report()
2299
+
2300
+ except Exception as e:
2301
+ logger.exception(f"Orchestration error: {e}")
2302
+ raise
2303
+
2304
+ total_duration = time.time() - start_time
2305
+
2306
+ # Final summary
2307
+ print("\n" + "="*60)
2308
+ print(" SCAN COMPLETE")
2309
+ print("="*60)
2310
+ print(f" Duration: {total_duration:.1f}s")
2311
+ print(f" Findings: {len(self.findings)}")
2312
+ if self.attack_chains:
2313
+ print(f" Attack Chains: {len(self.attack_chains)}")
2314
+ print(f" Output: {self.output_dir}")
2315
+ print("="*60 + "\n")
2316
+
2317
+ return {
2318
+ "target": self.target,
2319
+ "domain": self.domain,
2320
+ "duration": total_duration,
2321
+ "phases": {p.value: r.__dict__ for p, r in self.phase_results.items()},
2322
+ "findings_count": len(self.findings),
2323
+ "attack_chains_count": len(self.attack_chains),
2324
+ "scan_ids": self.scan_ids,
2325
+ "output_dir": str(self.output_dir)
2326
+ }
2327
+
2328
+
2329
+ # ==================== CLI ====================
2330
+
2331
+ async def main():
2332
+ """CLI entry point."""
2333
+ import argparse
2334
+
2335
+ parser = argparse.ArgumentParser(
2336
+ description="AIPT Orchestrator - Full Penetration Testing Pipeline (v2.1 - Maximum Tools)",
2337
+ formatter_class=argparse.RawDescriptionHelpFormatter,
2338
+ epilog="""
2339
+ Examples:
2340
+ aiptx scan example.com # Standard scan
2341
+ aiptx scan example.com --full # Full scan with exploitation tools
2342
+ aiptx scan example.com --full --exploit # Enable all exploitation
2343
+ aiptx scan example.com --nessus --zap # With enterprise scanners
2344
+
2345
+ Tools included:
2346
+ RECON: subfinder, assetfinder, amass, nmap, waybackurls, theHarvester, dnsrecon, wafw00f, whatweb
2347
+ SCAN: nuclei, ffuf, sslscan, nikto, wpscan, testssl, gobuster, dirsearch
2348
+ EXPLOIT: sqlmap, commix, xsstrike, hydra, searchsploit (--full mode)
2349
+ POST: linpeas, winpeas, pspy, lazagne (auto-triggers on shell access)
2350
+ """
2351
+ )
2352
+
2353
+ # Target
2354
+ parser.add_argument("target", help="Target domain or URL")
2355
+ parser.add_argument("-o", "--output", default="./scan_results", help="Output directory")
2356
+
2357
+ # Scan modes
2358
+ parser.add_argument("--full", action="store_true",
2359
+ help="Enable FULL mode with all tools including exploitation")
2360
+ parser.add_argument("--exploit", action="store_true",
2361
+ help="Enable exploitation tools (sqlmap, hydra, commix)")
2362
+
2363
+ # Phase control
2364
+ parser.add_argument("--skip-recon", action="store_true", help="Skip reconnaissance phase")
2365
+ parser.add_argument("--skip-scan", action="store_true", help="Skip scanning phase")
2366
+ parser.add_argument("--skip-exploit", action="store_true", help="Skip exploitation phase")
2367
+
2368
+ # Enterprise scanners
2369
+ parser.add_argument("--no-acunetix", action="store_true", help="Disable Acunetix")
2370
+ parser.add_argument("--no-burp", action="store_true", help="Disable Burp Suite")
2371
+ parser.add_argument("--nessus", action="store_true", help="Enable Nessus scanner")
2372
+ parser.add_argument("--zap", action="store_true", help="Enable OWASP ZAP scanner")
2373
+ parser.add_argument("--wait", action="store_true", help="Wait for enterprise scanners to complete")
2374
+ parser.add_argument("--acunetix-profile", default="full",
2375
+ choices=["full", "high_risk", "xss", "sqli"],
2376
+ help="Acunetix scan profile")
2377
+
2378
+ # SQLMap settings
2379
+ parser.add_argument("--sqlmap-level", type=int, default=2,
2380
+ help="SQLMap testing level (1-5, default: 2)")
2381
+ parser.add_argument("--sqlmap-risk", type=int, default=2,
2382
+ help="SQLMap risk level (1-3, default: 2)")
2383
+
2384
+ # DevSecOps
2385
+ parser.add_argument("--container", action="store_true",
2386
+ help="Enable container security scanning (trivy)")
2387
+ parser.add_argument("--secrets", action="store_true",
2388
+ help="Enable secret detection (gitleaks, trufflehog)")
2389
+
2390
+ args = parser.parse_args()
2391
+
2392
+ config = OrchestratorConfig(
2393
+ target=args.target,
2394
+ output_dir=args.output,
2395
+ full_mode=args.full,
2396
+ skip_recon=args.skip_recon,
2397
+ skip_scan=args.skip_scan,
2398
+ skip_exploit=args.skip_exploit,
2399
+ use_acunetix=not args.no_acunetix,
2400
+ use_burp=not args.no_burp,
2401
+ use_nessus=args.nessus,
2402
+ use_zap=args.zap,
2403
+ wait_for_scanners=args.wait,
2404
+ acunetix_profile=args.acunetix_profile,
2405
+ enable_exploitation=args.exploit or args.full,
2406
+ sqlmap_level=args.sqlmap_level,
2407
+ sqlmap_risk=args.sqlmap_risk,
2408
+ enable_container_scan=args.container,
2409
+ enable_secret_detection=args.secrets
2410
+ )
2411
+
2412
+ orchestrator = Orchestrator(args.target, config)
2413
+ results = await orchestrator.run()
2414
+
2415
+ # Summary
2416
+ print(f"\n{'='*60}")
2417
+ print(f" ✓ SCAN COMPLETE - {results['findings_count']} findings")
2418
+ print(f"{'='*60}")
2419
+ print(f" Output: {results['output_dir']}")
2420
+ print(f" Duration: {results['duration']:.1f}s")
2421
+ if config.full_mode:
2422
+ print(f" Mode: FULL (All exploitation tools enabled)")
2423
+ print(f"{'='*60}\n")
2424
+
2425
+
2426
+ if __name__ == "__main__":
2427
+ asyncio.run(main())