zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Claude Direct Backend (Reverse Engineered)
|
|
4
|
+
Uses internal API for Claude.ai
|
|
5
|
+
Requires session cookie extraction
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
import aiohttp
|
|
14
|
+
|
|
15
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
16
|
+
from utils.async_fixes import safe_close_session
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger("ZenAI")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ClaudeDirectBackend:
|
|
22
|
+
"""
|
|
23
|
+
Direct Claude API Backend
|
|
24
|
+
- Uses session key from browser
|
|
25
|
+
- Optimized for long-form analysis
|
|
26
|
+
- Good for code review and complex reasoning
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, session_key: str = None):
|
|
30
|
+
self.name = "Claude-Direct"
|
|
31
|
+
self.priority = 3
|
|
32
|
+
self.session_key = session_key
|
|
33
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
34
|
+
|
|
35
|
+
async def __aenter__(self):
|
|
36
|
+
self.session = aiohttp.ClientSession()
|
|
37
|
+
return self
|
|
38
|
+
|
|
39
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
40
|
+
if self.session:
|
|
41
|
+
await safe_close_session(self.session)
|
|
42
|
+
|
|
43
|
+
async def chat(self, prompt: str, context: str = "") -> Optional[str]:
|
|
44
|
+
"""Send chat request to Claude"""
|
|
45
|
+
if not self.session_key:
|
|
46
|
+
logger.warning("[Claude-Direct] No session key provided")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
headers = {
|
|
51
|
+
"Cookie": f"sessionKey={self.session_key}",
|
|
52
|
+
"Content-Type": "application/json",
|
|
53
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
|
54
|
+
"Accept": "text/event-stream",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Organization ID required for Claude
|
|
58
|
+
org_id = await self._get_org_id()
|
|
59
|
+
if not org_id:
|
|
60
|
+
logger.error("[Claude-Direct] Could not get organization ID")
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
payload = {
|
|
64
|
+
"prompt": prompt,
|
|
65
|
+
"model": "claude-3-5-sonnet-20241022",
|
|
66
|
+
"timezone": "Europe/Berlin",
|
|
67
|
+
"attachments": [],
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
logger.info("[Claude-Direct] Sending request...")
|
|
71
|
+
|
|
72
|
+
async with self.session.post(
|
|
73
|
+
f"https://claude.ai/api/organizations/{org_id}/chat_conversations",
|
|
74
|
+
json=payload,
|
|
75
|
+
headers=headers,
|
|
76
|
+
timeout=aiohttp.ClientTimeout(total=120),
|
|
77
|
+
) as resp:
|
|
78
|
+
if resp.status == 401:
|
|
79
|
+
logger.error("[Claude-Direct] Session expired")
|
|
80
|
+
return None
|
|
81
|
+
elif resp.status != 200:
|
|
82
|
+
logger.error(f"[Claude-Direct] HTTP Error: {resp.status}")
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
# Claude uses SSE (Server-Sent Events)
|
|
86
|
+
full_response = ""
|
|
87
|
+
async for line in resp.content:
|
|
88
|
+
line = line.decode("utf-8").strip()
|
|
89
|
+
if line.startswith("data: "):
|
|
90
|
+
try:
|
|
91
|
+
import json
|
|
92
|
+
|
|
93
|
+
data = json.loads(line[6:])
|
|
94
|
+
if data.get("completion"):
|
|
95
|
+
full_response += data["completion"]
|
|
96
|
+
except:
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
return full_response
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
logger.error(f"[Claude-Direct] Error: {e}")
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
async def _get_org_id(self) -> Optional[str]:
|
|
106
|
+
"""Get organization ID from session"""
|
|
107
|
+
try:
|
|
108
|
+
headers = {
|
|
109
|
+
"Cookie": f"sessionKey={self.session_key}",
|
|
110
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async with self.session.get(
|
|
114
|
+
"https://claude.ai/api/organizations", headers=headers
|
|
115
|
+
) as resp:
|
|
116
|
+
if resp.status == 200:
|
|
117
|
+
import json
|
|
118
|
+
|
|
119
|
+
data = await resp.json()
|
|
120
|
+
if data and len(data) > 0:
|
|
121
|
+
return data[0].get("uuid")
|
|
122
|
+
return None
|
|
123
|
+
except:
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
async def health_check(self) -> bool:
|
|
127
|
+
"""Check if backend is available"""
|
|
128
|
+
if not self.session_key:
|
|
129
|
+
return False
|
|
130
|
+
return await self._get_org_id() is not None
|
backends/duckduckgo.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
DuckDuckGo AI Backend
|
|
4
|
+
Free, no authentication required
|
|
5
|
+
Supports GPT-4o-mini, Claude-3-haiku, Llama-3.1-70B
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
import aiohttp
|
|
14
|
+
|
|
15
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
16
|
+
from utils.async_fixes import safe_close_session
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger("ZenAI")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class DuckDuckGoBackend:
|
|
22
|
+
"""
|
|
23
|
+
DuckDuckGo AI Chat Backend
|
|
24
|
+
- No API key required
|
|
25
|
+
- ~50-100 requests per day limit
|
|
26
|
+
- Automatic model rotation on rate limit
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self):
|
|
30
|
+
self.name = "DuckDuckGo"
|
|
31
|
+
self.priority = 1 # Highest priority (free, fast)
|
|
32
|
+
self.vqd_token = None
|
|
33
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
34
|
+
self.models = [
|
|
35
|
+
"gpt-4o-mini",
|
|
36
|
+
"claude-3-haiku",
|
|
37
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
38
|
+
]
|
|
39
|
+
self.current_model = 0
|
|
40
|
+
|
|
41
|
+
async def __aenter__(self):
|
|
42
|
+
self.session = aiohttp.ClientSession()
|
|
43
|
+
return self
|
|
44
|
+
|
|
45
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
46
|
+
if self.session:
|
|
47
|
+
await safe_close_session(self.session)
|
|
48
|
+
|
|
49
|
+
async def _get_vqd_token(self):
|
|
50
|
+
"""Get anti-CSRF token from DDG"""
|
|
51
|
+
if self.vqd_token:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
headers = {
|
|
55
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
|
56
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
|
57
|
+
"Accept-Language": "de-DE,de;q=0.9,en;q=0.8",
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
async with self.session.get(
|
|
62
|
+
"https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat", headers=headers
|
|
63
|
+
) as resp:
|
|
64
|
+
text = await resp.text()
|
|
65
|
+
if 'vqd="' in text:
|
|
66
|
+
self.vqd_token = text.split('vqd="')[1].split('"')[0]
|
|
67
|
+
logger.info(f"[DDG] VQD Token acquired: {self.vqd_token[:10]}...")
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.error(f"[DDG] Token acquisition failed: {e}")
|
|
70
|
+
|
|
71
|
+
async def chat(self, prompt: str, context: str = "") -> Optional[str]:
|
|
72
|
+
"""Send chat request to DuckDuckGo AI"""
|
|
73
|
+
try:
|
|
74
|
+
await self._get_vqd_token()
|
|
75
|
+
|
|
76
|
+
if not self.vqd_token:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
model = self.models[self.current_model % len(self.models)]
|
|
80
|
+
|
|
81
|
+
payload = {
|
|
82
|
+
"model": model,
|
|
83
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
headers = {
|
|
87
|
+
"X-Vqd-4": self.vqd_token,
|
|
88
|
+
"Content-Type": "application/json",
|
|
89
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)",
|
|
90
|
+
"Referer": "https://duckduckgo.com/",
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
logger.info(f"[DDG] Sending to {model}...")
|
|
94
|
+
|
|
95
|
+
async with self.session.post(
|
|
96
|
+
"https://duckduckgo.com/duckchat/v1/chat", json=payload, headers=headers
|
|
97
|
+
) as resp:
|
|
98
|
+
if resp.status == 429:
|
|
99
|
+
logger.warning("[DDG] Rate limited, rotating model...")
|
|
100
|
+
self.current_model += 1
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
if resp.status == 418:
|
|
104
|
+
logger.warning("[DDG] Teapot error (rate limit or invalid token)")
|
|
105
|
+
self.vqd_token = None # Force token refresh
|
|
106
|
+
return None
|
|
107
|
+
|
|
108
|
+
if resp.status != 200:
|
|
109
|
+
logger.error(f"[DDG] HTTP Error: {resp.status}")
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
# DDG streams JSON Lines
|
|
113
|
+
full_response = ""
|
|
114
|
+
async for line in resp.content:
|
|
115
|
+
line = line.decode("utf-8").strip()
|
|
116
|
+
if line.startswith("data: "):
|
|
117
|
+
try:
|
|
118
|
+
import json
|
|
119
|
+
|
|
120
|
+
data = json.loads(line[6:])
|
|
121
|
+
if "message" in data:
|
|
122
|
+
full_response += data["message"]
|
|
123
|
+
except:
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
return full_response
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"[DDG] Error: {e}")
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
async def health_check(self) -> bool:
|
|
133
|
+
"""Check if backend is available"""
|
|
134
|
+
try:
|
|
135
|
+
await self._get_vqd_token()
|
|
136
|
+
return self.vqd_token is not None
|
|
137
|
+
except:
|
|
138
|
+
return False
|
backends/openrouter.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
OpenRouter Backend
|
|
4
|
+
One API key, many free models
|
|
5
|
+
Supports multiple LLMs through unified API
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
import random
|
|
11
|
+
import sys
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
import aiohttp
|
|
15
|
+
|
|
16
|
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
17
|
+
from utils.async_fixes import safe_close_session
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("ZenAI")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OpenRouterBackend:
|
|
23
|
+
"""
|
|
24
|
+
OpenRouter API Backend
|
|
25
|
+
- Single key access to multiple models
|
|
26
|
+
- Generous free tier with rate limits
|
|
27
|
+
- Automatic model rotation
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, api_key: str = None):
|
|
31
|
+
self.name = "OpenRouter"
|
|
32
|
+
self.priority = 2 # Medium priority (requires key)
|
|
33
|
+
self.api_key = api_key
|
|
34
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
35
|
+
|
|
36
|
+
# Free tier models
|
|
37
|
+
self.models = [
|
|
38
|
+
"meta-llama/llama-3.2-3b-instruct:free",
|
|
39
|
+
"google/gemini-flash-1.5:free",
|
|
40
|
+
"microsoft/phi-3-mini-128k-instruct:free",
|
|
41
|
+
"nvidia/llama-3.1-nemotron-70b-instruct:free",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
async def __aenter__(self):
|
|
45
|
+
self.session = aiohttp.ClientSession()
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
49
|
+
if self.session:
|
|
50
|
+
await safe_close_session(self.session)
|
|
51
|
+
|
|
52
|
+
async def chat(self, prompt: str, context: str = "") -> Optional[str]:
|
|
53
|
+
"""Send chat request to OpenRouter"""
|
|
54
|
+
if not self.api_key:
|
|
55
|
+
logger.warning("[OpenRouter] No API key provided")
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
model = random.choice(self.models)
|
|
60
|
+
|
|
61
|
+
headers = {
|
|
62
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
63
|
+
"Content-Type": "application/json",
|
|
64
|
+
"HTTP-Referer": "https://localhost",
|
|
65
|
+
"X-Title": "ZenAI-Pentest",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
payload = {
|
|
69
|
+
"model": model,
|
|
70
|
+
"messages": [
|
|
71
|
+
{
|
|
72
|
+
"role": "system",
|
|
73
|
+
"content": "You are a cybersecurity expert and penetration testing assistant.",
|
|
74
|
+
},
|
|
75
|
+
{"role": "user", "content": prompt},
|
|
76
|
+
],
|
|
77
|
+
"temperature": 0.7,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
logger.info(f"[OpenRouter] Using {model}...")
|
|
81
|
+
|
|
82
|
+
async with self.session.post(
|
|
83
|
+
"https://openrouter.ai/api/v1/chat/completions",
|
|
84
|
+
json=payload,
|
|
85
|
+
headers=headers,
|
|
86
|
+
) as resp:
|
|
87
|
+
if resp.status == 429:
|
|
88
|
+
logger.warning("[OpenRouter] Rate limit hit")
|
|
89
|
+
return None
|
|
90
|
+
elif resp.status == 401:
|
|
91
|
+
logger.error("[OpenRouter] Invalid API key")
|
|
92
|
+
return None
|
|
93
|
+
elif resp.status != 200:
|
|
94
|
+
logger.error(f"[OpenRouter] HTTP Error: {resp.status}")
|
|
95
|
+
return None
|
|
96
|
+
|
|
97
|
+
data = await resp.json()
|
|
98
|
+
|
|
99
|
+
if "choices" in data and len(data["choices"]) > 0:
|
|
100
|
+
return data["choices"][0]["message"]["content"]
|
|
101
|
+
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"[OpenRouter] Error: {e}")
|
|
106
|
+
return None
|
|
107
|
+
|
|
108
|
+
async def health_check(self) -> bool:
|
|
109
|
+
"""Check if backend is available"""
|
|
110
|
+
if not self.api_key:
|
|
111
|
+
return False
|
|
112
|
+
try:
|
|
113
|
+
# Try a simple request
|
|
114
|
+
headers = {"Authorization": f"Bearer {self.api_key}"}
|
|
115
|
+
async with self.session.get(
|
|
116
|
+
"https://openrouter.ai/api/v1/auth/key", headers=headers
|
|
117
|
+
) as resp:
|
|
118
|
+
return resp.status == 200
|
|
119
|
+
except:
|
|
120
|
+
return False
|
benchmarks/__init__.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Zen-AI-Pentest Benchmarking & Testing Framework
|
|
3
|
+
|
|
4
|
+
Comprehensive benchmark suite for evaluating security testing performance.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
__version__ = "1.0.0"
|
|
8
|
+
__author__ = "Zen-AI-Pentest Team"
|
|
9
|
+
|
|
10
|
+
# Main components
|
|
11
|
+
from .benchmark_engine import (
|
|
12
|
+
BenchmarkEngine,
|
|
13
|
+
BenchmarkConfig,
|
|
14
|
+
BenchmarkReport,
|
|
15
|
+
BenchmarkStatus,
|
|
16
|
+
ScenarioResult
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from .metrics import (
|
|
20
|
+
BenchmarkMetrics,
|
|
21
|
+
ClassificationMetrics,
|
|
22
|
+
CoverageMetrics,
|
|
23
|
+
PerformanceMetrics,
|
|
24
|
+
ExploitMetrics,
|
|
25
|
+
TokenUsage,
|
|
26
|
+
FindingMetrics,
|
|
27
|
+
SeverityLevel,
|
|
28
|
+
FindingType,
|
|
29
|
+
MetricsAggregator,
|
|
30
|
+
compare_metrics,
|
|
31
|
+
calculate_confidence_interval
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
from .scenarios import (
|
|
35
|
+
TestScenario,
|
|
36
|
+
ScenarioType,
|
|
37
|
+
DifficultyLevel,
|
|
38
|
+
VulnerabilityProfile,
|
|
39
|
+
get_scenario,
|
|
40
|
+
get_scenarios_by_type,
|
|
41
|
+
get_scenarios_by_difficulty,
|
|
42
|
+
get_scenarios_by_tag,
|
|
43
|
+
list_all_scenarios,
|
|
44
|
+
create_benchmark_suite,
|
|
45
|
+
ALL_SCENARIOS,
|
|
46
|
+
# Pre-defined scenarios
|
|
47
|
+
OWASP_JUICE_SHOP,
|
|
48
|
+
DVWA_SCENARIO,
|
|
49
|
+
METASPLOITABLE2_SCENARIO,
|
|
50
|
+
METASPLOITABLE3_SCENARIO,
|
|
51
|
+
WEBGOAT_SCENARIO,
|
|
52
|
+
HTB_STARTING_POINT_TIER1,
|
|
53
|
+
THM_OWASP_TOP10
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
from .comparison import (
|
|
57
|
+
ComparisonFramework,
|
|
58
|
+
ComparisonResult,
|
|
59
|
+
CompetitorTool,
|
|
60
|
+
ToolMetadata,
|
|
61
|
+
ToolCapabilities,
|
|
62
|
+
ToolCategory,
|
|
63
|
+
ToolBenchmarkResult,
|
|
64
|
+
PentestGPTCompetitor,
|
|
65
|
+
AutoPentestDRLCompetitor,
|
|
66
|
+
PENTESTGPT_METADATA,
|
|
67
|
+
AUTOPENTEST_METADATA,
|
|
68
|
+
NESSUS_METADATA,
|
|
69
|
+
OPENVAS_METADATA,
|
|
70
|
+
BURP_SUITE_METADATA,
|
|
71
|
+
OWASP_ZAP_METADATA,
|
|
72
|
+
NIKTO_METADATA,
|
|
73
|
+
NUCLEI_METADATA,
|
|
74
|
+
SQLMAP_METADATA
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
from .ci_benchmark import (
|
|
78
|
+
CIBenchmarkRunner,
|
|
79
|
+
CIConfig,
|
|
80
|
+
PerformanceGate,
|
|
81
|
+
GateResult,
|
|
82
|
+
RegressionCheck,
|
|
83
|
+
RegressionSeverity
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
__all__ = [
|
|
87
|
+
# Engine
|
|
88
|
+
"BenchmarkEngine",
|
|
89
|
+
"BenchmarkConfig",
|
|
90
|
+
"BenchmarkReport",
|
|
91
|
+
"BenchmarkStatus",
|
|
92
|
+
"ScenarioResult",
|
|
93
|
+
|
|
94
|
+
# Metrics
|
|
95
|
+
"BenchmarkMetrics",
|
|
96
|
+
"ClassificationMetrics",
|
|
97
|
+
"CoverageMetrics",
|
|
98
|
+
"PerformanceMetrics",
|
|
99
|
+
"ExploitMetrics",
|
|
100
|
+
"TokenUsage",
|
|
101
|
+
"FindingMetrics",
|
|
102
|
+
"SeverityLevel",
|
|
103
|
+
"FindingType",
|
|
104
|
+
"MetricsAggregator",
|
|
105
|
+
|
|
106
|
+
# Scenarios
|
|
107
|
+
"TestScenario",
|
|
108
|
+
"ScenarioType",
|
|
109
|
+
"DifficultyLevel",
|
|
110
|
+
"VulnerabilityProfile",
|
|
111
|
+
"get_scenario",
|
|
112
|
+
"get_scenarios_by_type",
|
|
113
|
+
"get_scenarios_by_difficulty",
|
|
114
|
+
"get_scenarios_by_tag",
|
|
115
|
+
"list_all_scenarios",
|
|
116
|
+
"create_benchmark_suite",
|
|
117
|
+
|
|
118
|
+
# Comparison
|
|
119
|
+
"ComparisonFramework",
|
|
120
|
+
"ComparisonResult",
|
|
121
|
+
"CompetitorTool",
|
|
122
|
+
"ToolMetadata",
|
|
123
|
+
"ToolCapabilities",
|
|
124
|
+
"ToolCategory",
|
|
125
|
+
"ToolBenchmarkResult",
|
|
126
|
+
|
|
127
|
+
# CI/CD
|
|
128
|
+
"CIBenchmarkRunner",
|
|
129
|
+
"CIConfig",
|
|
130
|
+
"PerformanceGate",
|
|
131
|
+
"GateResult",
|
|
132
|
+
"RegressionCheck",
|
|
133
|
+
"RegressionSeverity",
|
|
134
|
+
]
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def get_version() -> str:
|
|
138
|
+
"""Get framework version."""
|
|
139
|
+
return __version__
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def get_available_scenarios() -> list:
|
|
143
|
+
"""Get list of all available scenario IDs."""
|
|
144
|
+
return list(ALL_SCENARIOS.keys())
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def create_default_engine(output_dir: str = "benchmark_results") -> BenchmarkEngine:
|
|
148
|
+
"""Create a benchmark engine with default configuration."""
|
|
149
|
+
return BenchmarkEngine(output_dir=output_dir)
|