zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
|
@@ -0,0 +1,937 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Post-Scan Agent - Automates the Pentester Workflow After Initial Scan
|
|
3
|
+
|
|
4
|
+
Based on industry-standard penetration testing methodology (PTES, OWASP, NIST):
|
|
5
|
+
1. Manual Verification (False Positive Elimination)
|
|
6
|
+
2. Vulnerability Validation & Risk Scoring
|
|
7
|
+
3. Exploitation Attempts
|
|
8
|
+
4. Post-Exploitation (Privilege Escalation, Lateral Movement)
|
|
9
|
+
5. Evidence Collection & Documentation
|
|
10
|
+
6. Loot Aggregation
|
|
11
|
+
7. Cleanup & Restoration
|
|
12
|
+
8. Report Generation Preparation
|
|
13
|
+
|
|
14
|
+
This agent runs automatically after every scan to ensure professional pentest standards.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
import json
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from enum import Enum
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
24
|
+
|
|
25
|
+
from .agent_base import AgentRole, AgentState, BaseAgent
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class PostScanPhase(Enum):
|
|
29
|
+
"""Phases of post-scan workflow based on PTES methodology"""
|
|
30
|
+
|
|
31
|
+
VERIFICATION = "verification" # Manual verification of findings
|
|
32
|
+
VALIDATION = "validation" # Validate vulnerabilities are real
|
|
33
|
+
EXPLOITATION = "exploitation" # Attempt exploitation
|
|
34
|
+
POST_EXPLOITATION = "post_exploitation" # Privilege escalation, lateral movement
|
|
35
|
+
EVIDENCE_COLLECTION = "evidence" # Screenshots, logs, proof
|
|
36
|
+
LOOT_DOCUMENTATION = "loot" # Credentials, sensitive data
|
|
37
|
+
CLEANUP = "cleanup" # Remove backdoors, restore systems
|
|
38
|
+
REPORT_PREP = "report_prep" # Prepare findings for reporting
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class VerifiedFinding:
|
|
43
|
+
"""A verified vulnerability finding with evidence"""
|
|
44
|
+
|
|
45
|
+
id: str
|
|
46
|
+
title: str
|
|
47
|
+
severity: str # critical, high, medium, low, info
|
|
48
|
+
cvss_score: float
|
|
49
|
+
description: str
|
|
50
|
+
affected_host: str
|
|
51
|
+
port: Optional[int] = None
|
|
52
|
+
service: Optional[str] = None
|
|
53
|
+
|
|
54
|
+
# Verification
|
|
55
|
+
verified: bool = False
|
|
56
|
+
verification_method: str = "" # manual, automated, hybrid
|
|
57
|
+
false_positive: bool = False
|
|
58
|
+
verification_notes: str = ""
|
|
59
|
+
|
|
60
|
+
# Exploitation
|
|
61
|
+
exploited: bool = False
|
|
62
|
+
exploitation_successful: bool = False
|
|
63
|
+
exploit_method: str = ""
|
|
64
|
+
exploitation_notes: str = ""
|
|
65
|
+
|
|
66
|
+
# Post-Exploitation
|
|
67
|
+
privileges_obtained: str = "" # user, admin, system, none
|
|
68
|
+
lateral_movement_possible: bool = False
|
|
69
|
+
lateral_movement_targets: List[str] = field(default_factory=list)
|
|
70
|
+
|
|
71
|
+
# Evidence
|
|
72
|
+
screenshots: List[str] = field(default_factory=list) # Paths to screenshots
|
|
73
|
+
log_files: List[str] = field(default_factory=list)
|
|
74
|
+
command_history: List[str] = field(default_factory=list)
|
|
75
|
+
|
|
76
|
+
# Loot
|
|
77
|
+
credentials_found: List[Dict[str, str]] = field(default_factory=list)
|
|
78
|
+
sensitive_files: List[str] = field(default_factory=list)
|
|
79
|
+
|
|
80
|
+
# Cleanup
|
|
81
|
+
cleanup_performed: bool = False
|
|
82
|
+
cleanup_verified: bool = False
|
|
83
|
+
cleanup_notes: str = ""
|
|
84
|
+
|
|
85
|
+
# Timestamps
|
|
86
|
+
discovered_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
87
|
+
verified_at: Optional[str] = None
|
|
88
|
+
exploited_at: Optional[str] = None
|
|
89
|
+
cleaned_up_at: Optional[str] = None
|
|
90
|
+
|
|
91
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
92
|
+
"""Convert to dictionary for serialization"""
|
|
93
|
+
return {
|
|
94
|
+
"id": self.id,
|
|
95
|
+
"title": self.title,
|
|
96
|
+
"severity": self.severity,
|
|
97
|
+
"cvss_score": self.cvss_score,
|
|
98
|
+
"description": self.description,
|
|
99
|
+
"affected_host": self.affected_host,
|
|
100
|
+
"port": self.port,
|
|
101
|
+
"service": self.service,
|
|
102
|
+
"verification": {
|
|
103
|
+
"verified": self.verified,
|
|
104
|
+
"method": self.verification_method,
|
|
105
|
+
"false_positive": self.false_positive,
|
|
106
|
+
"notes": self.verification_notes,
|
|
107
|
+
"timestamp": self.verified_at,
|
|
108
|
+
},
|
|
109
|
+
"exploitation": {
|
|
110
|
+
"attempted": self.exploited,
|
|
111
|
+
"successful": self.exploitation_successful,
|
|
112
|
+
"method": self.exploit_method,
|
|
113
|
+
"notes": self.exploitation_notes,
|
|
114
|
+
"timestamp": self.exploited_at,
|
|
115
|
+
},
|
|
116
|
+
"post_exploitation": {
|
|
117
|
+
"privileges": self.privileges_obtained,
|
|
118
|
+
"lateral_movement_possible": self.lateral_movement_possible,
|
|
119
|
+
"lateral_targets": self.lateral_movement_targets,
|
|
120
|
+
},
|
|
121
|
+
"evidence": {
|
|
122
|
+
"screenshots": self.screenshots,
|
|
123
|
+
"logs": self.log_files,
|
|
124
|
+
"commands": self.command_history,
|
|
125
|
+
},
|
|
126
|
+
"loot": {
|
|
127
|
+
"credentials": self.credentials_found,
|
|
128
|
+
"sensitive_files": self.sensitive_files,
|
|
129
|
+
},
|
|
130
|
+
"cleanup": {
|
|
131
|
+
"performed": self.cleanup_performed,
|
|
132
|
+
"verified": self.cleanup_verified,
|
|
133
|
+
"notes": self.cleanup_notes,
|
|
134
|
+
"timestamp": self.cleaned_up_at,
|
|
135
|
+
},
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@dataclass
|
|
140
|
+
class PentestLoot:
|
|
141
|
+
"""Aggregated loot from entire penetration test"""
|
|
142
|
+
|
|
143
|
+
credentials: List[Dict[str, str]] = field(default_factory=list)
|
|
144
|
+
hashes: List[str] = field(default_factory=list)
|
|
145
|
+
tokens: List[str] = field(default_factory=list)
|
|
146
|
+
sensitive_files: List[str] = field(default_factory=list)
|
|
147
|
+
database_dumps: List[str] = field(default_factory=list)
|
|
148
|
+
screenshots: List[str] = field(default_factory=list)
|
|
149
|
+
|
|
150
|
+
def add_credential(
|
|
151
|
+
self, host: str, username: str, password: str, credential_type: str = "password"
|
|
152
|
+
):
|
|
153
|
+
self.credentials.append(
|
|
154
|
+
{
|
|
155
|
+
"host": host,
|
|
156
|
+
"username": username,
|
|
157
|
+
"password": password,
|
|
158
|
+
"type": credential_type,
|
|
159
|
+
"discovered_at": datetime.now().isoformat(),
|
|
160
|
+
}
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class PostScanAgent(BaseAgent):
|
|
165
|
+
"""
|
|
166
|
+
Agent that automates professional pentester post-scan workflow.
|
|
167
|
+
|
|
168
|
+
Runs after initial automated scanning to:
|
|
169
|
+
1. Verify findings (eliminate false positives)
|
|
170
|
+
2. Validate vulnerabilities
|
|
171
|
+
3. Attempt exploitation
|
|
172
|
+
4. Document evidence
|
|
173
|
+
5. Collect loot
|
|
174
|
+
6. Perform cleanup
|
|
175
|
+
7. Prepare for reporting
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
def __init__(self):
|
|
179
|
+
super().__init__(
|
|
180
|
+
name="PostScanAgent",
|
|
181
|
+
role=AgentRole.POST_EXPLOITATION,
|
|
182
|
+
description="Automates post-scan pentester workflow - verification, exploitation, evidence collection",
|
|
183
|
+
)
|
|
184
|
+
self.verified_findings: List[VerifiedFinding] = []
|
|
185
|
+
self.loot = PentestLoot()
|
|
186
|
+
self.current_phase = PostScanPhase.VERIFICATION
|
|
187
|
+
self.phase_results: Dict[str, Any] = {}
|
|
188
|
+
self.evidence_dir = Path("evidence")
|
|
189
|
+
self.report_data: Dict[str, Any] = {}
|
|
190
|
+
|
|
191
|
+
async def run(
|
|
192
|
+
self, target: str, initial_findings: List[Dict[str, Any]], **kwargs
|
|
193
|
+
) -> Dict[str, Any]:
|
|
194
|
+
"""
|
|
195
|
+
Execute complete post-scan workflow
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
target: Target host/IP
|
|
199
|
+
initial_findings: Raw findings from automated scan
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
Complete post-scan results ready for reporting
|
|
203
|
+
"""
|
|
204
|
+
self.log_action(f"Starting post-scan workflow for {target}")
|
|
205
|
+
print(f"\n[{'='*60}")
|
|
206
|
+
print(f" Post-Scan Pentest Workflow")
|
|
207
|
+
print(f" Target: {target}")
|
|
208
|
+
print(f" Initial Findings: {len(initial_findings)}")
|
|
209
|
+
print(f"{'='*60}\n")
|
|
210
|
+
|
|
211
|
+
# Create evidence directory
|
|
212
|
+
self.evidence_dir = Path(
|
|
213
|
+
f"evidence/{target}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
|
214
|
+
)
|
|
215
|
+
self.evidence_dir.mkdir(parents=True, exist_ok=True)
|
|
216
|
+
|
|
217
|
+
# Phase 1: Manual Verification
|
|
218
|
+
await self._phase_verification(target, initial_findings)
|
|
219
|
+
|
|
220
|
+
# Phase 2: Vulnerability Validation
|
|
221
|
+
await self._phase_validation(target)
|
|
222
|
+
|
|
223
|
+
# Phase 3: Exploitation Attempts
|
|
224
|
+
await self._phase_exploitation(target)
|
|
225
|
+
|
|
226
|
+
# Phase 4: Post-Exploitation
|
|
227
|
+
await self._phase_post_exploitation(target)
|
|
228
|
+
|
|
229
|
+
# Phase 5: Evidence Collection
|
|
230
|
+
await self._phase_evidence_collection(target)
|
|
231
|
+
|
|
232
|
+
# Phase 6: Loot Documentation
|
|
233
|
+
await self._phase_loot_documentation(target)
|
|
234
|
+
|
|
235
|
+
# Phase 7: Cleanup
|
|
236
|
+
await self._phase_cleanup(target)
|
|
237
|
+
|
|
238
|
+
# Phase 8: Report Preparation
|
|
239
|
+
await self._phase_report_preparation(target)
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
"target": target,
|
|
243
|
+
"phases_completed": list(self.phase_results.keys()),
|
|
244
|
+
"verified_findings": [f.to_dict() for f in self.verified_findings],
|
|
245
|
+
"total_verified": len(
|
|
246
|
+
[
|
|
247
|
+
f
|
|
248
|
+
for f in self.verified_findings
|
|
249
|
+
if f.verified and not f.false_positive
|
|
250
|
+
]
|
|
251
|
+
),
|
|
252
|
+
"total_false_positives": len(
|
|
253
|
+
[f for f in self.verified_findings if f.false_positive]
|
|
254
|
+
),
|
|
255
|
+
"total_exploited": len(
|
|
256
|
+
[f for f in self.verified_findings if f.exploitation_successful]
|
|
257
|
+
),
|
|
258
|
+
"loot_summary": {
|
|
259
|
+
"credentials": len(self.loot.credentials),
|
|
260
|
+
"screenshots": len(self.loot.screenshots),
|
|
261
|
+
"sensitive_files": len(self.loot.sensitive_files),
|
|
262
|
+
},
|
|
263
|
+
"evidence_directory": str(self.evidence_dir),
|
|
264
|
+
"report_data": self.report_data,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
async def _phase_verification(
|
|
268
|
+
self, target: str, initial_findings: List[Dict[str, Any]]
|
|
269
|
+
):
|
|
270
|
+
"""Phase 1: Manual verification of findings - eliminate false positives"""
|
|
271
|
+
self.current_phase = PostScanPhase.VERIFICATION
|
|
272
|
+
print(f"[Phase 1/8] Manual Verification of {len(initial_findings)} findings...")
|
|
273
|
+
|
|
274
|
+
verified_count = 0
|
|
275
|
+
false_positive_count = 0
|
|
276
|
+
|
|
277
|
+
for finding in initial_findings:
|
|
278
|
+
vf = VerifiedFinding(
|
|
279
|
+
id=finding.get("id", f"FINDING_{len(self.verified_findings)}"),
|
|
280
|
+
title=finding.get("title", "Unknown"),
|
|
281
|
+
severity=finding.get("severity", "medium"),
|
|
282
|
+
cvss_score=finding.get("cvss_score", 5.0),
|
|
283
|
+
description=finding.get("description", ""),
|
|
284
|
+
affected_host=target,
|
|
285
|
+
port=finding.get("port"),
|
|
286
|
+
service=finding.get("service"),
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
# Simulate manual verification logic
|
|
290
|
+
verification_result = await self._verify_finding(vf)
|
|
291
|
+
|
|
292
|
+
vf.verified = verification_result["verified"]
|
|
293
|
+
vf.false_positive = verification_result["false_positive"]
|
|
294
|
+
vf.verification_method = verification_result["method"]
|
|
295
|
+
vf.verification_notes = verification_result["notes"]
|
|
296
|
+
vf.verified_at = datetime.now().isoformat()
|
|
297
|
+
|
|
298
|
+
if vf.verified and not vf.false_positive:
|
|
299
|
+
verified_count += 1
|
|
300
|
+
elif vf.false_positive:
|
|
301
|
+
false_positive_count += 1
|
|
302
|
+
|
|
303
|
+
self.verified_findings.append(vf)
|
|
304
|
+
|
|
305
|
+
self.phase_results["verification"] = {
|
|
306
|
+
"total_findings": len(initial_findings),
|
|
307
|
+
"verified": verified_count,
|
|
308
|
+
"false_positives": false_positive_count,
|
|
309
|
+
"requires_validation": verified_count,
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
print(
|
|
313
|
+
f" [OK] Verified: {verified_count}, False Positives: {false_positive_count}"
|
|
314
|
+
)
|
|
315
|
+
self.log_action(
|
|
316
|
+
f"Verification complete: {verified_count} valid, {false_positive_count} FP"
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
async def _verify_finding(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
320
|
+
"""Verify a single finding for false positives"""
|
|
321
|
+
# In real implementation, this would:
|
|
322
|
+
# - Re-run the specific test manually
|
|
323
|
+
# - Check for environmental factors
|
|
324
|
+
# - Verify with different tools
|
|
325
|
+
# - Contextual analysis
|
|
326
|
+
|
|
327
|
+
# Simulation logic:
|
|
328
|
+
# High confidence findings are likely true positives
|
|
329
|
+
# Low confidence or generic findings need verification
|
|
330
|
+
|
|
331
|
+
confidence_indicators = [
|
|
332
|
+
finding.cvss_score > 7.0, # CVSS high
|
|
333
|
+
finding.port is not None, # Specific port
|
|
334
|
+
"confirmed" in finding.description.lower(),
|
|
335
|
+
]
|
|
336
|
+
|
|
337
|
+
if any(confidence_indicators):
|
|
338
|
+
return {
|
|
339
|
+
"verified": True,
|
|
340
|
+
"false_positive": False,
|
|
341
|
+
"method": "automated_context_analysis",
|
|
342
|
+
"notes": f"High confidence finding on port {finding.port}",
|
|
343
|
+
}
|
|
344
|
+
else:
|
|
345
|
+
# Low confidence - flag for manual review
|
|
346
|
+
return {
|
|
347
|
+
"verified": True, # Still include but flag
|
|
348
|
+
"false_positive": False,
|
|
349
|
+
"method": "requires_manual_review",
|
|
350
|
+
"notes": "Low confidence - manual verification recommended",
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
async def _phase_validation(self, target: str):
|
|
354
|
+
"""Phase 2: Validate vulnerabilities and assess business impact"""
|
|
355
|
+
self.current_phase = PostScanPhase.VALIDATION
|
|
356
|
+
print("[Phase 2/8] Vulnerability Validation & Risk Assessment...")
|
|
357
|
+
|
|
358
|
+
validated_count = 0
|
|
359
|
+
|
|
360
|
+
for finding in self.verified_findings:
|
|
361
|
+
if finding.verified and not finding.false_positive:
|
|
362
|
+
# Validate exploitability
|
|
363
|
+
validation = await self._validate_vulnerability(finding)
|
|
364
|
+
|
|
365
|
+
# Update with validation results
|
|
366
|
+
if validation["exploitable"]:
|
|
367
|
+
finding.cvss_score = min(10.0, finding.cvss_score + 0.5)
|
|
368
|
+
validated_count += 1
|
|
369
|
+
|
|
370
|
+
self.phase_results["validation"] = {
|
|
371
|
+
"validated": validated_count,
|
|
372
|
+
"risk_assessment": "completed",
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
print(f" [OK] {validated_count} vulnerabilities validated as exploitable")
|
|
376
|
+
|
|
377
|
+
async def _validate_vulnerability(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
378
|
+
"""Validate if a vulnerability is actually exploitable"""
|
|
379
|
+
# In real implementation:
|
|
380
|
+
# - Check for compensating controls
|
|
381
|
+
# - Verify exploit conditions exist
|
|
382
|
+
# - Test proof-of-concept (safe check)
|
|
383
|
+
|
|
384
|
+
exploitable_services = ["ssh", "http", "https", "smb", "rdp", "ftp"]
|
|
385
|
+
|
|
386
|
+
if finding.service and finding.service.lower() in exploitable_services:
|
|
387
|
+
return {"exploitable": True, "confidence": "high"}
|
|
388
|
+
elif finding.cvss_score >= 9.0:
|
|
389
|
+
return {"exploitable": True, "confidence": "medium"}
|
|
390
|
+
else:
|
|
391
|
+
return {"exploitable": False, "confidence": "low"}
|
|
392
|
+
|
|
393
|
+
async def _phase_exploitation(self, target: str):
|
|
394
|
+
"""Phase 3: Attempt exploitation of validated vulnerabilities"""
|
|
395
|
+
self.current_phase = PostScanPhase.EXPLOITATION
|
|
396
|
+
print("[Phase 3/8] Exploitation Attempts...")
|
|
397
|
+
|
|
398
|
+
exploitation_results = []
|
|
399
|
+
|
|
400
|
+
# Prioritize by severity
|
|
401
|
+
critical_findings = [
|
|
402
|
+
f
|
|
403
|
+
for f in self.verified_findings
|
|
404
|
+
if f.severity in ["critical", "high"] and f.verified
|
|
405
|
+
]
|
|
406
|
+
|
|
407
|
+
for finding in critical_findings[:5]: # Limit attempts for safety
|
|
408
|
+
result = await self._attempt_exploitation(finding)
|
|
409
|
+
exploitation_results.append(result)
|
|
410
|
+
|
|
411
|
+
finding.exploited = True
|
|
412
|
+
finding.exploitation_successful = result["success"]
|
|
413
|
+
finding.exploit_method = result["method"]
|
|
414
|
+
finding.exploitation_notes = result["notes"]
|
|
415
|
+
finding.exploited_at = datetime.now().isoformat()
|
|
416
|
+
|
|
417
|
+
successful = len([r for r in exploitation_results if r["success"]])
|
|
418
|
+
|
|
419
|
+
self.phase_results["exploitation"] = {
|
|
420
|
+
"attempted": len(exploitation_results),
|
|
421
|
+
"successful": successful,
|
|
422
|
+
"methods_used": list(set(r["method"] for r in exploitation_results)),
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
print(
|
|
426
|
+
f" [OK] Attempted: {len(exploitation_results)}, Successful: {successful}"
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
async def _attempt_exploitation(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
430
|
+
"""Safely attempt exploitation of a finding"""
|
|
431
|
+
# In real implementation:
|
|
432
|
+
# - Use Metasploit / custom exploits
|
|
433
|
+
# - Test with safe payloads first
|
|
434
|
+
# - Document every attempt
|
|
435
|
+
|
|
436
|
+
methods = {
|
|
437
|
+
"sql_injection": "SQLMap with --risk=1 --level=1",
|
|
438
|
+
"xss": "XSS payload injection with event handlers",
|
|
439
|
+
"rce": "Command injection with safe ping test",
|
|
440
|
+
"lfi": "Local file inclusion with /etc/passwd",
|
|
441
|
+
"default_creds": "Default credential testing",
|
|
442
|
+
"known_exploit": "CVE-specific exploit module",
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
# Simulate based on finding type
|
|
446
|
+
if "sql" in finding.title.lower():
|
|
447
|
+
return {
|
|
448
|
+
"success": True,
|
|
449
|
+
"method": methods["sql_injection"],
|
|
450
|
+
"notes": "Successfully extracted database version",
|
|
451
|
+
}
|
|
452
|
+
elif "xss" in finding.title.lower():
|
|
453
|
+
return {
|
|
454
|
+
"success": True,
|
|
455
|
+
"method": methods["xss"],
|
|
456
|
+
"notes": "Reflected XSS confirmed",
|
|
457
|
+
}
|
|
458
|
+
else:
|
|
459
|
+
return {
|
|
460
|
+
"success": False,
|
|
461
|
+
"method": "generic_test",
|
|
462
|
+
"notes": "Requires manual exploitation",
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
async def _phase_post_exploitation(self, target: str):
|
|
466
|
+
"""Phase 4: Post-exploitation - privilege escalation, lateral movement"""
|
|
467
|
+
self.current_phase = PostScanPhase.POST_EXPLOITATION
|
|
468
|
+
print("[Phase 4/8] Post-Exploitation Analysis...")
|
|
469
|
+
|
|
470
|
+
successful_exploits = [
|
|
471
|
+
f for f in self.verified_findings if f.exploitation_successful
|
|
472
|
+
]
|
|
473
|
+
|
|
474
|
+
for finding in successful_exploits:
|
|
475
|
+
# Assess privilege level
|
|
476
|
+
privileges = await self._assess_privileges(finding)
|
|
477
|
+
finding.privileges_obtained = privileges["level"]
|
|
478
|
+
|
|
479
|
+
# Check lateral movement possibilities
|
|
480
|
+
lateral = await self._check_lateral_movement(finding)
|
|
481
|
+
finding.lateral_movement_possible = lateral["possible"]
|
|
482
|
+
finding.lateral_movement_targets = lateral["targets"]
|
|
483
|
+
|
|
484
|
+
# Collect credentials if possible
|
|
485
|
+
if privileges["level"] in ["admin", "system"]:
|
|
486
|
+
creds = await self._harvest_credentials(finding)
|
|
487
|
+
finding.credentials_found = creds
|
|
488
|
+
for cred in creds:
|
|
489
|
+
self.loot.add_credential(
|
|
490
|
+
finding.affected_host,
|
|
491
|
+
cred.get("username", ""),
|
|
492
|
+
cred.get("password", ""),
|
|
493
|
+
cred.get("type", "password"),
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
self.phase_results["post_exploitation"] = {
|
|
497
|
+
"systems_compromised": len(successful_exploits),
|
|
498
|
+
"privilege_levels": list(
|
|
499
|
+
set(f.privileges_obtained for f in successful_exploits)
|
|
500
|
+
),
|
|
501
|
+
"lateral_movement_opportunities": sum(
|
|
502
|
+
1 for f in successful_exploits if f.lateral_movement_possible
|
|
503
|
+
),
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
print(f" [OK] Analyzed {len(successful_exploits)} compromised systems")
|
|
507
|
+
|
|
508
|
+
async def _assess_privileges(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
509
|
+
"""Assess privilege level obtained"""
|
|
510
|
+
# In real implementation:
|
|
511
|
+
# - Check user ID
|
|
512
|
+
# - Check group memberships
|
|
513
|
+
# - Check sudo/admin rights
|
|
514
|
+
return {"level": "user", "groups": ["users"], "sudo": False}
|
|
515
|
+
|
|
516
|
+
async def _check_lateral_movement(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
517
|
+
"""Check possibilities for lateral movement"""
|
|
518
|
+
# In real implementation:
|
|
519
|
+
# - Network discovery
|
|
520
|
+
# - Credential reuse checks
|
|
521
|
+
# - Trust relationship analysis
|
|
522
|
+
return {
|
|
523
|
+
"possible": True,
|
|
524
|
+
"targets": ["internal_subnet_1"],
|
|
525
|
+
"method": "pass_the_hash",
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
async def _harvest_credentials(
|
|
529
|
+
self, finding: VerifiedFinding
|
|
530
|
+
) -> List[Dict[str, str]]:
|
|
531
|
+
"""Harvest credentials from compromised system"""
|
|
532
|
+
# In real implementation:
|
|
533
|
+
# - Dump password hashes
|
|
534
|
+
# - Extract browser credentials
|
|
535
|
+
# - Find configuration files with passwords
|
|
536
|
+
# - Keylogger/memory dump analysis
|
|
537
|
+
return [
|
|
538
|
+
{"username": "admin", "password": "[HASH]", "type": "ntlm_hash"},
|
|
539
|
+
{
|
|
540
|
+
"username": "service_account",
|
|
541
|
+
"password": "[CLEARTEXT]",
|
|
542
|
+
"type": "password",
|
|
543
|
+
},
|
|
544
|
+
]
|
|
545
|
+
|
|
546
|
+
async def _phase_evidence_collection(self, target: str):
|
|
547
|
+
"""Phase 5: Collect evidence (screenshots, logs, proof)"""
|
|
548
|
+
self.current_phase = PostScanPhase.EVIDENCE_COLLECTION
|
|
549
|
+
print("[Phase 5/8] Evidence Collection...")
|
|
550
|
+
|
|
551
|
+
evidence_count = 0
|
|
552
|
+
|
|
553
|
+
for finding in self.verified_findings:
|
|
554
|
+
if finding.verified and not finding.false_positive:
|
|
555
|
+
# Generate screenshot path
|
|
556
|
+
screenshot_path = self.evidence_dir / f"{finding.id}_evidence.png"
|
|
557
|
+
finding.screenshots.append(str(screenshot_path))
|
|
558
|
+
self.loot.screenshots.append(str(screenshot_path))
|
|
559
|
+
|
|
560
|
+
# Simulate command history
|
|
561
|
+
finding.command_history = await self._generate_command_history(finding)
|
|
562
|
+
|
|
563
|
+
# Log file
|
|
564
|
+
log_path = self.evidence_dir / f"{finding.id}_logs.txt"
|
|
565
|
+
finding.log_files.append(str(log_path))
|
|
566
|
+
|
|
567
|
+
evidence_count += 1
|
|
568
|
+
|
|
569
|
+
self.phase_results["evidence"] = {
|
|
570
|
+
"screenshots_taken": len(self.loot.screenshots),
|
|
571
|
+
"log_files": len([f for f in self.verified_findings if f.log_files]),
|
|
572
|
+
"evidence_directory": str(self.evidence_dir),
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
print(f" [OK] Collected evidence for {evidence_count} findings")
|
|
576
|
+
|
|
577
|
+
async def _generate_command_history(self, finding: VerifiedFinding) -> List[str]:
|
|
578
|
+
"""Generate command history for a finding"""
|
|
579
|
+
commands = []
|
|
580
|
+
|
|
581
|
+
if "sql" in finding.title.lower():
|
|
582
|
+
commands = [
|
|
583
|
+
f"sqlmap -u 'http://{finding.affected_host}:{finding.port}/search?q=test' --risk=1",
|
|
584
|
+
"sqlmap --dbs",
|
|
585
|
+
"sqlmap -D database --tables",
|
|
586
|
+
"sqlmap -D database -T users --dump",
|
|
587
|
+
]
|
|
588
|
+
elif "xss" in finding.title.lower():
|
|
589
|
+
commands = [
|
|
590
|
+
f"curl -X POST http://{finding.affected_host}:{finding.port}/comment",
|
|
591
|
+
"<script>alert('XSS')</script>",
|
|
592
|
+
"<img src=x onerror=alert('XSS')>",
|
|
593
|
+
]
|
|
594
|
+
else:
|
|
595
|
+
commands = [f"nmap -sV -p {finding.port} {finding.affected_host}"]
|
|
596
|
+
|
|
597
|
+
return commands
|
|
598
|
+
|
|
599
|
+
async def _phase_loot_documentation(self, target: str):
|
|
600
|
+
"""Phase 6: Document all loot (credentials, sensitive data)"""
|
|
601
|
+
self.current_phase = PostScanPhase.LOOT_DOCUMENTATION
|
|
602
|
+
print("[Phase 6/8] Loot Documentation...")
|
|
603
|
+
|
|
604
|
+
# Save loot to secure file
|
|
605
|
+
loot_file = self.evidence_dir / "loot_summary.json"
|
|
606
|
+
|
|
607
|
+
loot_data = {
|
|
608
|
+
"credentials": self.loot.credentials,
|
|
609
|
+
"hashes": self.loot.hashes,
|
|
610
|
+
"tokens": self.loot.tokens,
|
|
611
|
+
"sensitive_files": self.loot.sensitive_files,
|
|
612
|
+
"screenshots": self.loot.screenshots,
|
|
613
|
+
"collected_at": datetime.now().isoformat(),
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
with open(loot_file, "w") as f:
|
|
617
|
+
json.dump(loot_data, f, indent=2)
|
|
618
|
+
|
|
619
|
+
self.phase_results["loot"] = {
|
|
620
|
+
"credentials_found": len(self.loot.credentials),
|
|
621
|
+
"screenshots": len(self.loot.screenshots),
|
|
622
|
+
"loot_file": str(loot_file),
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
print(
|
|
626
|
+
f" [OK] Documented {len(self.loot.credentials)} credentials, {len(self.loot.screenshots)} screenshots"
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
async def _phase_cleanup(self, target: str):
|
|
630
|
+
"""Phase 7: Cleanup - remove backdoors, restore systems"""
|
|
631
|
+
self.current_phase = PostScanPhase.CLEANUP
|
|
632
|
+
print("[Phase 7/8] Cleanup & Restoration...")
|
|
633
|
+
|
|
634
|
+
cleanup_actions = []
|
|
635
|
+
|
|
636
|
+
for finding in self.verified_findings:
|
|
637
|
+
if finding.exploitation_successful:
|
|
638
|
+
# Simulate cleanup
|
|
639
|
+
cleanup = await self._perform_cleanup(finding)
|
|
640
|
+
finding.cleanup_performed = cleanup["performed"]
|
|
641
|
+
finding.cleanup_verified = cleanup["verified"]
|
|
642
|
+
finding.cleanup_notes = cleanup["notes"]
|
|
643
|
+
finding.cleaned_up_at = datetime.now().isoformat()
|
|
644
|
+
|
|
645
|
+
cleanup_actions.append(
|
|
646
|
+
{
|
|
647
|
+
"finding": finding.id,
|
|
648
|
+
"status": "success" if cleanup["verified"] else "pending",
|
|
649
|
+
}
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
self.phase_results["cleanup"] = {
|
|
653
|
+
"systems_cleaned": len(cleanup_actions),
|
|
654
|
+
"verified": len([a for a in cleanup_actions if a["status"] == "success"]),
|
|
655
|
+
"actions": cleanup_actions,
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
print(f" [OK] Cleanup performed on {len(cleanup_actions)} systems")
|
|
659
|
+
|
|
660
|
+
async def _perform_cleanup(self, finding: VerifiedFinding) -> Dict[str, Any]:
|
|
661
|
+
"""Perform cleanup for a specific finding"""
|
|
662
|
+
# In real implementation:
|
|
663
|
+
# - Remove created accounts
|
|
664
|
+
# - Delete uploaded files
|
|
665
|
+
# - Remove persistence mechanisms
|
|
666
|
+
# - Clear logs (if allowed)
|
|
667
|
+
# - Verify system state
|
|
668
|
+
|
|
669
|
+
return {
|
|
670
|
+
"performed": True,
|
|
671
|
+
"verified": True,
|
|
672
|
+
"notes": f"Removed test artifacts from {finding.affected_host}",
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
async def _phase_report_preparation(self, target: str):
|
|
676
|
+
"""Phase 8: Prepare data for final report"""
|
|
677
|
+
self.current_phase = PostScanPhase.REPORT_PREP
|
|
678
|
+
print("[Phase 8/8] Report Preparation...")
|
|
679
|
+
|
|
680
|
+
# Organize findings by severity
|
|
681
|
+
by_severity = {"critical": [], "high": [], "medium": [], "low": [], "info": []}
|
|
682
|
+
|
|
683
|
+
for finding in self.verified_findings:
|
|
684
|
+
if finding.verified and not finding.false_positive:
|
|
685
|
+
sev = finding.severity.lower()
|
|
686
|
+
if sev in by_severity:
|
|
687
|
+
by_severity[sev].append(finding.to_dict())
|
|
688
|
+
|
|
689
|
+
# Executive summary data
|
|
690
|
+
executive_summary = {
|
|
691
|
+
"target": target,
|
|
692
|
+
"test_date": datetime.now().strftime("%Y-%m-%d"),
|
|
693
|
+
"total_findings": len(self.verified_findings),
|
|
694
|
+
"confirmed_vulnerabilities": len(
|
|
695
|
+
[
|
|
696
|
+
f
|
|
697
|
+
for f in self.verified_findings
|
|
698
|
+
if f.verified and not f.false_positive
|
|
699
|
+
]
|
|
700
|
+
),
|
|
701
|
+
"false_positives": len(
|
|
702
|
+
[f for f in self.verified_findings if f.false_positive]
|
|
703
|
+
),
|
|
704
|
+
"exploited_systems": len(
|
|
705
|
+
[f for f in self.verified_findings if f.exploitation_successful]
|
|
706
|
+
),
|
|
707
|
+
"critical_count": len(by_severity["critical"]),
|
|
708
|
+
"high_count": len(by_severity["high"]),
|
|
709
|
+
"medium_count": len(by_severity["medium"]),
|
|
710
|
+
"low_count": len(by_severity["low"]),
|
|
711
|
+
"overall_risk": self._calculate_overall_risk(),
|
|
712
|
+
"key_recommendations": self._generate_recommendations(),
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
# Technical findings
|
|
716
|
+
technical_findings = [
|
|
717
|
+
f.to_dict()
|
|
718
|
+
for f in self.verified_findings
|
|
719
|
+
if f.verified and not f.false_positive
|
|
720
|
+
]
|
|
721
|
+
|
|
722
|
+
# Remediation priorities
|
|
723
|
+
remediation = {
|
|
724
|
+
"immediate": by_severity["critical"],
|
|
725
|
+
"short_term": by_severity["high"],
|
|
726
|
+
"medium_term": by_severity["medium"],
|
|
727
|
+
"long_term": by_severity["low"] + by_severity["info"],
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
self.report_data = {
|
|
731
|
+
"executive_summary": executive_summary,
|
|
732
|
+
"methodology": "PTES (Penetration Testing Execution Standard)",
|
|
733
|
+
"phases_completed": list(self.phase_results.keys()),
|
|
734
|
+
"technical_findings": technical_findings,
|
|
735
|
+
"remediation_priorities": remediation,
|
|
736
|
+
"evidence_location": str(self.evidence_dir),
|
|
737
|
+
"loot_summary": {
|
|
738
|
+
"credentials": len(self.loot.credentials),
|
|
739
|
+
"screenshots": len(self.loot.screenshots),
|
|
740
|
+
},
|
|
741
|
+
"appendices": {
|
|
742
|
+
"tools_used": ["Nmap", "SQLMap", "Custom Scripts"],
|
|
743
|
+
"testing_period": f"{datetime.now().strftime('%Y-%m-%d')}",
|
|
744
|
+
"limitations": "Simulated test environment",
|
|
745
|
+
},
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
# Save report data
|
|
749
|
+
report_file = self.evidence_dir / "report_data.json"
|
|
750
|
+
with open(report_file, "w") as f:
|
|
751
|
+
json.dump(self.report_data, f, indent=2)
|
|
752
|
+
|
|
753
|
+
self.phase_results["report_prep"] = {
|
|
754
|
+
"report_file": str(report_file),
|
|
755
|
+
"findings_by_severity": {k: len(v) for k, v in by_severity.items()},
|
|
756
|
+
"executive_summary_ready": True,
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
print(f" [OK] Report data prepared: {len(technical_findings)} findings")
|
|
760
|
+
print(f" [OK] Report file: {report_file}")
|
|
761
|
+
|
|
762
|
+
def _calculate_overall_risk(self) -> str:
|
|
763
|
+
"""Calculate overall risk rating"""
|
|
764
|
+
critical = len(
|
|
765
|
+
[
|
|
766
|
+
f
|
|
767
|
+
for f in self.verified_findings
|
|
768
|
+
if f.severity == "critical" and f.verified
|
|
769
|
+
]
|
|
770
|
+
)
|
|
771
|
+
high = len(
|
|
772
|
+
[f for f in self.verified_findings if f.severity == "high" and f.verified]
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
if critical > 0:
|
|
776
|
+
return "CRITICAL"
|
|
777
|
+
elif high >= 3:
|
|
778
|
+
return "HIGH"
|
|
779
|
+
elif high > 0:
|
|
780
|
+
return "MEDIUM"
|
|
781
|
+
else:
|
|
782
|
+
return "LOW"
|
|
783
|
+
|
|
784
|
+
def _generate_recommendations(self) -> List[str]:
|
|
785
|
+
"""Generate key recommendations based on findings"""
|
|
786
|
+
recommendations = []
|
|
787
|
+
|
|
788
|
+
critical_findings = [
|
|
789
|
+
f for f in self.verified_findings if f.severity == "critical" and f.verified
|
|
790
|
+
]
|
|
791
|
+
|
|
792
|
+
if any("sql" in f.title.lower() for f in critical_findings):
|
|
793
|
+
recommendations.append(
|
|
794
|
+
"Implement parameterized queries and input validation"
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
if any("default" in f.title.lower() for f in critical_findings):
|
|
798
|
+
recommendations.append("Change all default credentials immediately")
|
|
799
|
+
|
|
800
|
+
if any(f.exploitation_successful for f in self.verified_findings):
|
|
801
|
+
recommendations.append(
|
|
802
|
+
"Deploy endpoint detection and response (EDR) solutions"
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
recommendations.append(
|
|
806
|
+
"Conduct regular security assessments and penetration tests"
|
|
807
|
+
)
|
|
808
|
+
recommendations.append("Implement defense-in-depth security architecture")
|
|
809
|
+
|
|
810
|
+
return recommendations
|
|
811
|
+
|
|
812
|
+
def _dict_to_finding(self, data: Dict[str, Any]) -> VerifiedFinding:
|
|
813
|
+
"""Convert dictionary to VerifiedFinding object"""
|
|
814
|
+
return VerifiedFinding(
|
|
815
|
+
id=data.get("id", "UNKNOWN"),
|
|
816
|
+
title=data.get("title", "Unknown"),
|
|
817
|
+
severity=data.get("severity", "medium"),
|
|
818
|
+
cvss_score=data.get("cvss_score", 5.0),
|
|
819
|
+
description=data.get("description", ""),
|
|
820
|
+
affected_host=data.get("affected_host", "unknown"),
|
|
821
|
+
port=data.get("port"),
|
|
822
|
+
service=data.get("service"),
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
def get_report_template(self) -> str:
|
|
826
|
+
"""Generate a markdown report template"""
|
|
827
|
+
if not self.report_data:
|
|
828
|
+
return "No report data available. Run the post-scan workflow first."
|
|
829
|
+
|
|
830
|
+
es = self.report_data.get("executive_summary", {})
|
|
831
|
+
|
|
832
|
+
template = f"""# Penetration Testing Report
|
|
833
|
+
|
|
834
|
+
## Executive Summary
|
|
835
|
+
|
|
836
|
+
**Target:** {es.get('target', 'N/A')}
|
|
837
|
+
**Test Date:** {es.get('test_date', 'N/A')}
|
|
838
|
+
**Overall Risk Rating:** {es.get('overall_risk', 'N/A')}
|
|
839
|
+
|
|
840
|
+
### Findings Overview
|
|
841
|
+
|
|
842
|
+
| Severity | Count |
|
|
843
|
+
|----------|-------|
|
|
844
|
+
| Critical | {es.get('critical_count', 0)} |
|
|
845
|
+
| High | {es.get('high_count', 0)} |
|
|
846
|
+
| Medium | {es.get('medium_count', 0)} |
|
|
847
|
+
| Low | {es.get('low_count', 0)} |
|
|
848
|
+
| **Total Confirmed** | **{es.get('confirmed_vulnerabilities', 0)}** |
|
|
849
|
+
|
|
850
|
+
### Key Findings
|
|
851
|
+
|
|
852
|
+
- **{es.get('exploited_systems', 0)}** systems were successfully compromised
|
|
853
|
+
- **{es.get('false_positives', 0)}** findings were identified as false positives
|
|
854
|
+
- Evidence and logs collected in: `{self.evidence_dir}`
|
|
855
|
+
|
|
856
|
+
### Key Recommendations
|
|
857
|
+
|
|
858
|
+
"""
|
|
859
|
+
for rec in es.get("key_recommendations", []):
|
|
860
|
+
template += f"1. {rec}\n"
|
|
861
|
+
|
|
862
|
+
template += """
|
|
863
|
+
|
|
864
|
+
## Methodology
|
|
865
|
+
|
|
866
|
+
This assessment followed the Penetration Testing Execution Standard (PTES):
|
|
867
|
+
|
|
868
|
+
1. Pre-Engagement Interactions
|
|
869
|
+
2. Intelligence Gathering
|
|
870
|
+
3. Threat Modeling
|
|
871
|
+
4. Vulnerability Analysis
|
|
872
|
+
5. **Exploitation** ✓
|
|
873
|
+
6. **Post-Exploitation** ✓
|
|
874
|
+
7. **Reporting** ✓
|
|
875
|
+
|
|
876
|
+
## Technical Findings
|
|
877
|
+
|
|
878
|
+
"""
|
|
879
|
+
|
|
880
|
+
# Add finding details
|
|
881
|
+
for finding in self.report_data.get("technical_findings", []):
|
|
882
|
+
template += f"""### {finding['id']}: {finding['title']}
|
|
883
|
+
|
|
884
|
+
**Severity:** {finding['severity']} | **CVSS:** {finding['cvss_score']}
|
|
885
|
+
|
|
886
|
+
**Description:**
|
|
887
|
+
{finding['description']}
|
|
888
|
+
|
|
889
|
+
**Evidence:**
|
|
890
|
+
"""
|
|
891
|
+
for screenshot in finding.get("evidence", {}).get("screenshots", []):
|
|
892
|
+
template += f"- Screenshot: `{screenshot}`\n"
|
|
893
|
+
|
|
894
|
+
if finding.get("exploitation", {}).get("successful"):
|
|
895
|
+
template += f"""
|
|
896
|
+
**Exploitation:**
|
|
897
|
+
- Method: {finding['exploitation'].get('method', 'N/A')}
|
|
898
|
+
- Successful: Yes
|
|
899
|
+
- Privileges Obtained: {finding.get('post_exploitation', {}).get('privileges', 'N/A')}
|
|
900
|
+
|
|
901
|
+
"""
|
|
902
|
+
|
|
903
|
+
template += """
|
|
904
|
+
## Remediation Roadmap
|
|
905
|
+
|
|
906
|
+
### Immediate (Critical)
|
|
907
|
+
Address within 24-48 hours
|
|
908
|
+
|
|
909
|
+
### Short-term (High)
|
|
910
|
+
Address within 1-2 weeks
|
|
911
|
+
|
|
912
|
+
### Medium-term (Medium)
|
|
913
|
+
Address within 1-3 months
|
|
914
|
+
|
|
915
|
+
### Long-term (Low/Info)
|
|
916
|
+
Address as part of regular maintenance
|
|
917
|
+
|
|
918
|
+
---
|
|
919
|
+
|
|
920
|
+
*Report generated by Zen AI Pentest - PostScan Agent*
|
|
921
|
+
"""
|
|
922
|
+
|
|
923
|
+
return template
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
# Convenience function for direct use
|
|
927
|
+
async def run_post_scan_workflow(
|
|
928
|
+
target: str, findings: List[Dict[str, Any]]
|
|
929
|
+
) -> Dict[str, Any]:
|
|
930
|
+
"""
|
|
931
|
+
Standalone function to run the complete post-scan workflow
|
|
932
|
+
|
|
933
|
+
Usage:
|
|
934
|
+
results = await run_post_scan_workflow("192.168.1.1", scan_findings)
|
|
935
|
+
"""
|
|
936
|
+
agent = PostScanAgent()
|
|
937
|
+
return await agent.run(target, findings)
|