zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
core/orchestrator.py
ADDED
|
@@ -0,0 +1,611 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Zen AI Hybrid Orchestrator
|
|
4
|
+
Multi-LLM Routing System with Penetration Testing Capabilities
|
|
5
|
+
Enhanced with Autonomous Agent Loop, Risk Engine, and Exploit Validation
|
|
6
|
+
|
|
7
|
+
Author: SHAdd0WTAka
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
from abc import ABC, abstractmethod
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from enum import Enum
|
|
17
|
+
from typing import Any, Dict, List, Optional
|
|
18
|
+
|
|
19
|
+
import aiohttp
|
|
20
|
+
|
|
21
|
+
# Apply Windows/asyncio fixes before any other imports
|
|
22
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
23
|
+
from utils.async_fixes import apply_windows_async_fixes, safe_close_session # noqa: E402
|
|
24
|
+
|
|
25
|
+
apply_windows_async_fixes()
|
|
26
|
+
|
|
27
|
+
# Import new components (with graceful fallback)
|
|
28
|
+
try:
|
|
29
|
+
from autonomous import AutonomousAgentLoop, AgentState, AgentMemory
|
|
30
|
+
from autonomous import ExploitValidator, ExploitValidatorPool, ExploitType
|
|
31
|
+
from autonomous import ScopeConfig, SandboxConfig, ExploitResult
|
|
32
|
+
AUTONOMOUS_AVAILABLE = True
|
|
33
|
+
except ImportError as e:
|
|
34
|
+
AUTONOMOUS_AVAILABLE = False
|
|
35
|
+
logging.warning(f"Autonomous components not available: {e}")
|
|
36
|
+
AutonomousAgentLoop = None
|
|
37
|
+
AgentState = None
|
|
38
|
+
AgentMemory = None
|
|
39
|
+
ExploitValidator = None
|
|
40
|
+
ExploitValidatorPool = None
|
|
41
|
+
ExploitType = None
|
|
42
|
+
ScopeConfig = None
|
|
43
|
+
SandboxConfig = None
|
|
44
|
+
ExploitResult = None
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
from risk_engine import FalsePositiveEngine, BusinessImpactCalculator
|
|
48
|
+
from risk_engine import Finding, FindingStatus, ValidationResult
|
|
49
|
+
from risk_engine import ConfidenceLevel, ComplianceType, DataClassification
|
|
50
|
+
RISK_ENGINE_AVAILABLE = True
|
|
51
|
+
except ImportError as e:
|
|
52
|
+
RISK_ENGINE_AVAILABLE = False
|
|
53
|
+
logging.warning(f"Risk engine components not available: {e}")
|
|
54
|
+
FalsePositiveEngine = None
|
|
55
|
+
BusinessImpactCalculator = None
|
|
56
|
+
Finding = None
|
|
57
|
+
FindingStatus = None
|
|
58
|
+
ValidationResult = None
|
|
59
|
+
ConfidenceLevel = None
|
|
60
|
+
ComplianceType = None
|
|
61
|
+
DataClassification = None
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
from integrations import load_integrations_from_config
|
|
65
|
+
INTEGRATIONS_AVAILABLE = True
|
|
66
|
+
except ImportError:
|
|
67
|
+
INTEGRATIONS_AVAILABLE = False
|
|
68
|
+
load_integrations_from_config = None
|
|
69
|
+
|
|
70
|
+
logging.basicConfig(
|
|
71
|
+
level=logging.INFO,
|
|
72
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
73
|
+
handlers=[logging.FileHandler("logs/zen_ai.log"), logging.StreamHandler()],
|
|
74
|
+
)
|
|
75
|
+
logger = logging.getLogger("ZenAI")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class QualityLevel(Enum):
|
|
79
|
+
"""Quality levels for LLM responses"""
|
|
80
|
+
|
|
81
|
+
LOW = "low" # Fast, free (DDG)
|
|
82
|
+
MEDIUM = "medium" # OpenRouter Free Tier
|
|
83
|
+
HIGH = "high" # Direct API (ChatGPT/Claude Web)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class LLMResponse:
|
|
88
|
+
"""Standardized LLM response"""
|
|
89
|
+
|
|
90
|
+
content: str
|
|
91
|
+
source: str
|
|
92
|
+
latency: float
|
|
93
|
+
quality: QualityLevel
|
|
94
|
+
metadata: Dict = None
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class BaseBackend(ABC):
|
|
98
|
+
"""Abstract base class for LLM backends"""
|
|
99
|
+
|
|
100
|
+
def __init__(self, name: str, priority: int):
|
|
101
|
+
self.name = name
|
|
102
|
+
self.priority = priority
|
|
103
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
104
|
+
|
|
105
|
+
async def __aenter__(self):
|
|
106
|
+
self.session = aiohttp.ClientSession()
|
|
107
|
+
return self
|
|
108
|
+
|
|
109
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
110
|
+
if self.session:
|
|
111
|
+
await safe_close_session(self.session)
|
|
112
|
+
|
|
113
|
+
@abstractmethod
|
|
114
|
+
async def chat(self, prompt: str, context: str = "") -> Optional[str]:
|
|
115
|
+
"""Send prompt to backend and return response"""
|
|
116
|
+
pass
|
|
117
|
+
|
|
118
|
+
async def health_check(self) -> bool:
|
|
119
|
+
"""Check if backend is available"""
|
|
120
|
+
return True
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class ZenOrchestrator:
|
|
124
|
+
"""
|
|
125
|
+
Central orchestrator for multi-LLM management
|
|
126
|
+
Routes requests to appropriate backends based on quality requirements
|
|
127
|
+
|
|
128
|
+
Enhanced with:
|
|
129
|
+
- Autonomous Agent Loop integration
|
|
130
|
+
- False Positive Engine
|
|
131
|
+
- Exploit Validation
|
|
132
|
+
- CI/CD Integrations
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
def __init__(self):
|
|
136
|
+
self.backends: List[BaseBackend] = []
|
|
137
|
+
self.results_cache = {}
|
|
138
|
+
self.request_count = 0
|
|
139
|
+
|
|
140
|
+
# Initialize new components
|
|
141
|
+
self._autonomous_loop: Optional[Any] = None
|
|
142
|
+
self._fp_engine: Optional[Any] = None
|
|
143
|
+
self._exploit_validator: Optional[Any] = None
|
|
144
|
+
self._business_impact: Optional[Any] = None
|
|
145
|
+
self._integrations: Dict[str, Any] = {}
|
|
146
|
+
|
|
147
|
+
# Initialize if available
|
|
148
|
+
self._initialize_components()
|
|
149
|
+
|
|
150
|
+
def _initialize_components(self):
|
|
151
|
+
"""Initialize optional components."""
|
|
152
|
+
if AUTONOMOUS_AVAILABLE:
|
|
153
|
+
try:
|
|
154
|
+
self._autonomous_loop = AutonomousAgentLoop(max_iterations=50)
|
|
155
|
+
logger.info("AutonomousAgentLoop initialized")
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.warning(f"Failed to initialize AutonomousAgentLoop: {e}")
|
|
158
|
+
|
|
159
|
+
if RISK_ENGINE_AVAILABLE:
|
|
160
|
+
try:
|
|
161
|
+
self._fp_engine = FalsePositiveEngine()
|
|
162
|
+
self._business_impact = BusinessImpactCalculator()
|
|
163
|
+
logger.info("Risk engine components initialized")
|
|
164
|
+
except Exception as e:
|
|
165
|
+
logger.warning(f"Failed to initialize Risk Engine: {e}")
|
|
166
|
+
|
|
167
|
+
if AUTONOMOUS_AVAILABLE and ExploitValidator:
|
|
168
|
+
try:
|
|
169
|
+
self._exploit_validator = ExploitValidator()
|
|
170
|
+
logger.info("ExploitValidator initialized")
|
|
171
|
+
except Exception as e:
|
|
172
|
+
logger.warning(f"Failed to initialize ExploitValidator: {e}")
|
|
173
|
+
|
|
174
|
+
if INTEGRATIONS_AVAILABLE and load_integrations_from_config:
|
|
175
|
+
try:
|
|
176
|
+
self._integrations = load_integrations_from_config()
|
|
177
|
+
logger.info(f"Loaded {len(self._integrations)} integrations")
|
|
178
|
+
except Exception as e:
|
|
179
|
+
logger.warning(f"Failed to load integrations: {e}")
|
|
180
|
+
|
|
181
|
+
def add_backend(self, backend: BaseBackend):
|
|
182
|
+
"""Register a new backend"""
|
|
183
|
+
self.backends.append(backend)
|
|
184
|
+
# Sort by priority (lowest first = faster/free)
|
|
185
|
+
self.backends.sort(key=lambda x: x.priority)
|
|
186
|
+
logger.info(
|
|
187
|
+
f"[Zen] Backend '{backend.name}' registered with priority {backend.priority}"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
async def process(
|
|
191
|
+
self, prompt: str, required_quality: QualityLevel = QualityLevel.MEDIUM
|
|
192
|
+
) -> LLMResponse:
|
|
193
|
+
"""
|
|
194
|
+
Process a prompt through available backends
|
|
195
|
+
Tries backends in priority order with automatic fallback
|
|
196
|
+
"""
|
|
197
|
+
self.request_count += 1
|
|
198
|
+
start_time = asyncio.get_event_loop().time()
|
|
199
|
+
|
|
200
|
+
logger.info(
|
|
201
|
+
f"[Zen] Request #{self.request_count} | Quality: {required_quality.value}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Filter candidates based on quality requirement
|
|
205
|
+
if required_quality == QualityLevel.LOW:
|
|
206
|
+
candidates = [b for b in self.backends if b.priority == 1]
|
|
207
|
+
elif required_quality == QualityLevel.MEDIUM:
|
|
208
|
+
candidates = [b for b in self.backends if b.priority <= 2]
|
|
209
|
+
else:
|
|
210
|
+
candidates = self.backends
|
|
211
|
+
|
|
212
|
+
# Try sequentially (faster than parallel for different tiers)
|
|
213
|
+
for backend in candidates:
|
|
214
|
+
logger.info(f"[Zen] Trying {backend.name}...")
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
result = await backend.chat(prompt)
|
|
218
|
+
|
|
219
|
+
if result and len(result) > 10:
|
|
220
|
+
latency = asyncio.get_event_loop().time() - start_time
|
|
221
|
+
|
|
222
|
+
quality = (
|
|
223
|
+
QualityLevel.HIGH
|
|
224
|
+
if backend.priority == 3
|
|
225
|
+
else (
|
|
226
|
+
QualityLevel.MEDIUM
|
|
227
|
+
if backend.priority == 2
|
|
228
|
+
else QualityLevel.LOW
|
|
229
|
+
)
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
logger.info(f"[Zen] Success from {backend.name} in {latency:.2f}s")
|
|
233
|
+
|
|
234
|
+
return LLMResponse(
|
|
235
|
+
content=result,
|
|
236
|
+
source=backend.name,
|
|
237
|
+
latency=latency,
|
|
238
|
+
quality=quality,
|
|
239
|
+
metadata={
|
|
240
|
+
"model": getattr(backend, "current_model", "unknown")
|
|
241
|
+
},
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
except Exception as e:
|
|
245
|
+
logger.error(f"[Zen] Backend {backend.name} failed: {e}")
|
|
246
|
+
continue
|
|
247
|
+
|
|
248
|
+
# Complete failure
|
|
249
|
+
logger.error("[Zen] All backends failed")
|
|
250
|
+
return LLMResponse(
|
|
251
|
+
content="All backends failed. Check logs.",
|
|
252
|
+
source="None",
|
|
253
|
+
latency=0,
|
|
254
|
+
quality=QualityLevel.LOW,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
async def parallel_consensus(self, prompt: str) -> Dict[str, Any]:
|
|
258
|
+
"""
|
|
259
|
+
For critical tasks: Query multiple backends simultaneously
|
|
260
|
+
Returns consensus and alternative responses
|
|
261
|
+
"""
|
|
262
|
+
logger.info("[Zen] Parallel consensus mode activated")
|
|
263
|
+
|
|
264
|
+
tasks = []
|
|
265
|
+
for backend in self.backends[:2]: # Top 2 (DDG + OpenRouter)
|
|
266
|
+
tasks.append(backend.chat(prompt))
|
|
267
|
+
|
|
268
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
269
|
+
|
|
270
|
+
valid = []
|
|
271
|
+
for backend, result in zip(self.backends[:2], results):
|
|
272
|
+
if isinstance(result, str) and len(result) > 50:
|
|
273
|
+
valid.append({"source": backend.name, "content": result})
|
|
274
|
+
|
|
275
|
+
return {
|
|
276
|
+
"responses": valid,
|
|
277
|
+
"consensus": valid[0] if valid else None,
|
|
278
|
+
"alternative": valid[1] if len(valid) > 1 else None,
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
# ==================== NEW METHODS ====================
|
|
282
|
+
|
|
283
|
+
async def run_autonomous_scan(
|
|
284
|
+
self,
|
|
285
|
+
target: str,
|
|
286
|
+
goal: str,
|
|
287
|
+
scope: Optional[Dict[str, Any]] = None
|
|
288
|
+
) -> Dict[str, Any]:
|
|
289
|
+
"""
|
|
290
|
+
Run an autonomous scan using the AutonomousAgentLoop.
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
target: Target URL/IP/Domain
|
|
294
|
+
goal: Scan goal (e.g., "Find vulnerabilities", "Enumerate subdomains")
|
|
295
|
+
scope: Optional scope configuration
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Scan results with findings and execution details
|
|
299
|
+
"""
|
|
300
|
+
if not AUTONOMOUS_AVAILABLE or not self._autonomous_loop:
|
|
301
|
+
return {
|
|
302
|
+
"success": False,
|
|
303
|
+
"error": "Autonomous Agent Loop not available",
|
|
304
|
+
"findings": []
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
logger.info(f"[Zen] Starting autonomous scan on {target}")
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
result = await self._autonomous_loop.run(
|
|
311
|
+
goal=goal,
|
|
312
|
+
target=target,
|
|
313
|
+
scope=scope or {}
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
logger.info(f"[Zen] Autonomous scan completed: {result.get('state', 'unknown')}")
|
|
317
|
+
|
|
318
|
+
# Validate findings if risk engine is available
|
|
319
|
+
if RISK_ENGINE_AVAILABLE and self._fp_engine and result.get('findings'):
|
|
320
|
+
validated_findings = await self.validate_findings(
|
|
321
|
+
result['findings'].get('items', [])
|
|
322
|
+
)
|
|
323
|
+
result['validated_findings'] = validated_findings
|
|
324
|
+
|
|
325
|
+
return result
|
|
326
|
+
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logger.error(f"[Zen] Autonomous scan failed: {e}")
|
|
329
|
+
return {
|
|
330
|
+
"success": False,
|
|
331
|
+
"error": str(e),
|
|
332
|
+
"findings": []
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
async def validate_findings(
|
|
336
|
+
self,
|
|
337
|
+
findings: List[Dict[str, Any]]
|
|
338
|
+
) -> List[Dict[str, Any]]:
|
|
339
|
+
"""
|
|
340
|
+
Validate findings using the False Positive Engine.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
findings: List of raw findings from scanners
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
List of validated findings with confidence scores
|
|
347
|
+
"""
|
|
348
|
+
if not RISK_ENGINE_AVAILABLE or not self._fp_engine:
|
|
349
|
+
logger.warning("[Zen] Risk Engine not available, returning unvalidated findings")
|
|
350
|
+
return findings
|
|
351
|
+
|
|
352
|
+
validated = []
|
|
353
|
+
|
|
354
|
+
for finding_data in findings:
|
|
355
|
+
try:
|
|
356
|
+
# Convert dict to Finding object
|
|
357
|
+
finding = Finding(
|
|
358
|
+
id=finding_data.get('id', ''),
|
|
359
|
+
title=finding_data.get('title', 'Unknown'),
|
|
360
|
+
description=finding_data.get('description', ''),
|
|
361
|
+
severity=finding_data.get('severity', 'medium'),
|
|
362
|
+
target=finding_data.get('target', ''),
|
|
363
|
+
source=finding_data.get('source', ''),
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Validate
|
|
367
|
+
result = await self._fp_engine.validate_finding(finding)
|
|
368
|
+
|
|
369
|
+
validated.append({
|
|
370
|
+
"original": finding_data,
|
|
371
|
+
"validation": result.to_dict() if hasattr(result, 'to_dict') else result,
|
|
372
|
+
"is_false_positive": getattr(result, 'is_false_positive', False),
|
|
373
|
+
"confidence": getattr(result, 'confidence', 0.0),
|
|
374
|
+
"priority": getattr(result, 'priority', 999)
|
|
375
|
+
})
|
|
376
|
+
|
|
377
|
+
except Exception as e:
|
|
378
|
+
logger.error(f"[Zen] Finding validation failed: {e}")
|
|
379
|
+
validated.append({
|
|
380
|
+
"original": finding_data,
|
|
381
|
+
"validation": None,
|
|
382
|
+
"is_false_positive": False,
|
|
383
|
+
"error": str(e)
|
|
384
|
+
})
|
|
385
|
+
|
|
386
|
+
# Sort by priority (highest first)
|
|
387
|
+
validated.sort(key=lambda x: x.get('priority', 999))
|
|
388
|
+
|
|
389
|
+
return validated
|
|
390
|
+
|
|
391
|
+
async def execute_exploit(
|
|
392
|
+
self,
|
|
393
|
+
exploit: Dict[str, Any],
|
|
394
|
+
target: str,
|
|
395
|
+
safe_mode: bool = True
|
|
396
|
+
) -> Dict[str, Any]:
|
|
397
|
+
"""
|
|
398
|
+
Execute an exploit with validation and safety controls.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
exploit: Exploit definition with code, type, etc.
|
|
402
|
+
target: Target URL/host
|
|
403
|
+
safe_mode: If True, run in controlled/safe mode
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
Exploit execution result with evidence
|
|
407
|
+
"""
|
|
408
|
+
if not AUTONOMOUS_AVAILABLE or not ExploitValidator:
|
|
409
|
+
return {
|
|
410
|
+
"success": False,
|
|
411
|
+
"error": "Exploit Validator not available"
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
logger.info(f"[Zen] Executing exploit on {target} (safe_mode={safe_mode})")
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
# Create validator with appropriate safety level
|
|
418
|
+
safety = "controlled" if safe_mode else "full"
|
|
419
|
+
validator = ExploitValidator(
|
|
420
|
+
safety_level=safety,
|
|
421
|
+
scope_config=ScopeConfig(allowed_hosts=[target]),
|
|
422
|
+
sandbox_config=SandboxConfig(
|
|
423
|
+
use_docker=False, # Default to local for compatibility
|
|
424
|
+
timeout=300
|
|
425
|
+
)
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Get exploit details
|
|
429
|
+
exploit_code = exploit.get('code', '')
|
|
430
|
+
exploit_type_str = exploit.get('type', 'web_rce')
|
|
431
|
+
|
|
432
|
+
# Map string to enum
|
|
433
|
+
type_map = {
|
|
434
|
+
'sqli': ExploitType.WEB_SQLI,
|
|
435
|
+
'sql_injection': ExploitType.WEB_SQLI,
|
|
436
|
+
'xss': ExploitType.WEB_XSS,
|
|
437
|
+
'rce': ExploitType.WEB_RCE,
|
|
438
|
+
'lfi': ExploitType.WEB_LFI,
|
|
439
|
+
'command_injection': ExploitType.WEB_CMD_INJECTION,
|
|
440
|
+
}
|
|
441
|
+
exploit_type = type_map.get(exploit_type_str.lower(), ExploitType.WEB_RCE)
|
|
442
|
+
|
|
443
|
+
# Execute
|
|
444
|
+
result = await validator.validate(
|
|
445
|
+
exploit_code=exploit_code,
|
|
446
|
+
target=target,
|
|
447
|
+
exploit_type=exploit_type
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
return {
|
|
451
|
+
"success": result.success,
|
|
452
|
+
"exploitable": result.success,
|
|
453
|
+
"evidence": result.evidence.to_dict() if result.evidence else {},
|
|
454
|
+
"output": result.output,
|
|
455
|
+
"error": result.error,
|
|
456
|
+
"severity": result.severity,
|
|
457
|
+
"remediation": result.remediation,
|
|
458
|
+
"execution_time": result.execution_time
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
except Exception as e:
|
|
462
|
+
logger.error(f"[Zen] Exploit execution failed: {e}")
|
|
463
|
+
return {
|
|
464
|
+
"success": False,
|
|
465
|
+
"error": str(e),
|
|
466
|
+
"exploitable": False
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
async def calculate_business_impact(
|
|
470
|
+
self,
|
|
471
|
+
finding: Dict[str, Any],
|
|
472
|
+
asset_context: Optional[Dict[str, Any]] = None
|
|
473
|
+
) -> Dict[str, Any]:
|
|
474
|
+
"""
|
|
475
|
+
Calculate business impact for a finding.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
finding: Security finding details
|
|
479
|
+
asset_context: Optional asset context information
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
Business impact assessment
|
|
483
|
+
"""
|
|
484
|
+
if not RISK_ENGINE_AVAILABLE or not self._business_impact:
|
|
485
|
+
return {
|
|
486
|
+
"error": "Business Impact Calculator not available",
|
|
487
|
+
"overall_score": 0.5
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
try:
|
|
491
|
+
from risk_engine import AssetContext, AssetCriticality, DataClassification
|
|
492
|
+
|
|
493
|
+
# Create asset context
|
|
494
|
+
if asset_context:
|
|
495
|
+
ctx = AssetContext(
|
|
496
|
+
asset_id=asset_context.get('id', 'unknown'),
|
|
497
|
+
asset_name=asset_context.get('name', 'Unknown Asset'),
|
|
498
|
+
asset_type=asset_context.get('type', 'unknown'),
|
|
499
|
+
criticality=AssetCriticality[asset_context.get('criticality', 'MEDIUM')],
|
|
500
|
+
data_classification=DataClassification[asset_context.get('data_classification', 'INTERNAL')],
|
|
501
|
+
internet_exposed=asset_context.get('internet_exposed', False),
|
|
502
|
+
user_count=asset_context.get('user_count', 0),
|
|
503
|
+
revenue_dependency=asset_context.get('revenue_dependency', 0.0)
|
|
504
|
+
)
|
|
505
|
+
else:
|
|
506
|
+
ctx = AssetContext(
|
|
507
|
+
asset_id="unknown",
|
|
508
|
+
asset_name="Unknown Asset",
|
|
509
|
+
asset_type="unknown",
|
|
510
|
+
criticality=AssetCriticality.MEDIUM,
|
|
511
|
+
data_classification=DataClassification.INTERNAL
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
result = self._business_impact.calculate_overall_impact(
|
|
515
|
+
asset_context=ctx,
|
|
516
|
+
finding_type=finding.get('type', 'unknown'),
|
|
517
|
+
severity=finding.get('severity', 'medium')
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
return {
|
|
521
|
+
"overall_score": result.overall_score,
|
|
522
|
+
"risk_category": result.get_risk_category(),
|
|
523
|
+
"financial_impact": {
|
|
524
|
+
"total_costs": result.financial_impact.total_costs,
|
|
525
|
+
"direct_costs": result.financial_impact.direct_costs,
|
|
526
|
+
"indirect_costs": result.financial_impact.indirect_costs,
|
|
527
|
+
},
|
|
528
|
+
"compliance_impact": {
|
|
529
|
+
"frameworks": [f.name for f in result.compliance_impact.frameworks],
|
|
530
|
+
"violated_controls": result.compliance_impact.violated_controls[:5],
|
|
531
|
+
"max_fine": result.compliance_impact.get_max_fine()
|
|
532
|
+
},
|
|
533
|
+
"recommendations": result.get_prioritized_remediation()
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
except Exception as e:
|
|
537
|
+
logger.error(f"[Zen] Business impact calculation failed: {e}")
|
|
538
|
+
return {
|
|
539
|
+
"error": str(e),
|
|
540
|
+
"overall_score": 0.5
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
async def notify_integrations(
|
|
544
|
+
self,
|
|
545
|
+
event: str,
|
|
546
|
+
data: Dict[str, Any]
|
|
547
|
+
) -> Dict[str, bool]:
|
|
548
|
+
"""
|
|
549
|
+
Send notifications to configured integrations.
|
|
550
|
+
|
|
551
|
+
Args:
|
|
552
|
+
event: Event type (scan_started, scan_completed, finding, etc.)
|
|
553
|
+
data: Event data
|
|
554
|
+
|
|
555
|
+
Returns:
|
|
556
|
+
Status of each integration notification
|
|
557
|
+
"""
|
|
558
|
+
results = {}
|
|
559
|
+
|
|
560
|
+
for name, integration in self._integrations.items():
|
|
561
|
+
try:
|
|
562
|
+
if event == "scan_started" and hasattr(integration, 'notify_scan_started'):
|
|
563
|
+
await integration.notify_scan_started(
|
|
564
|
+
target=data.get('target', 'unknown'),
|
|
565
|
+
scan_type=data.get('scan_type', 'security')
|
|
566
|
+
)
|
|
567
|
+
results[name] = True
|
|
568
|
+
|
|
569
|
+
elif event == "scan_completed" and hasattr(integration, 'notify_scan_completed'):
|
|
570
|
+
await integration.notify_scan_completed(
|
|
571
|
+
results=data,
|
|
572
|
+
target=data.get('target', 'unknown')
|
|
573
|
+
)
|
|
574
|
+
results[name] = True
|
|
575
|
+
|
|
576
|
+
elif event == "finding" and hasattr(integration, 'notify_finding'):
|
|
577
|
+
await integration.notify_finding(data)
|
|
578
|
+
results[name] = True
|
|
579
|
+
|
|
580
|
+
else:
|
|
581
|
+
results[name] = False
|
|
582
|
+
|
|
583
|
+
except Exception as e:
|
|
584
|
+
logger.error(f"[Zen] Failed to notify {name}: {e}")
|
|
585
|
+
results[name] = False
|
|
586
|
+
|
|
587
|
+
return results
|
|
588
|
+
|
|
589
|
+
def get_stats(self) -> Dict:
|
|
590
|
+
"""Get orchestrator statistics"""
|
|
591
|
+
return {
|
|
592
|
+
"backends_registered": len(self.backends),
|
|
593
|
+
"backends": [b.name for b in self.backends],
|
|
594
|
+
"requests_processed": self.request_count,
|
|
595
|
+
"autonomous_available": AUTONOMOUS_AVAILABLE,
|
|
596
|
+
"risk_engine_available": RISK_ENGINE_AVAILABLE,
|
|
597
|
+
"integrations_available": INTEGRATIONS_AVAILABLE,
|
|
598
|
+
"integrations_loaded": list(self._integrations.keys())
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
def get_capabilities(self) -> Dict[str, bool]:
|
|
602
|
+
"""Get available capabilities."""
|
|
603
|
+
return {
|
|
604
|
+
"autonomous_scan": AUTONOMOUS_AVAILABLE and self._autonomous_loop is not None,
|
|
605
|
+
"false_positive_validation": RISK_ENGINE_AVAILABLE and self._fp_engine is not None,
|
|
606
|
+
"exploit_validation": AUTONOMOUS_AVAILABLE and self._exploit_validator is not None,
|
|
607
|
+
"business_impact": RISK_ENGINE_AVAILABLE and self._business_impact is not None,
|
|
608
|
+
"slack_notifications": "slack" in self._integrations,
|
|
609
|
+
"github_integration": "github" in self._integrations,
|
|
610
|
+
"jira_integration": "jira" in self._integrations,
|
|
611
|
+
}
|