zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
core/secure_config.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Secure Configuration Management
|
|
3
|
+
Handles API keys with keyring, .env support, and encryption
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import base64
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from dataclasses import asdict, dataclass
|
|
11
|
+
from functools import lru_cache
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Dict, Literal, Optional
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import keyring
|
|
17
|
+
|
|
18
|
+
KEYRING_AVAILABLE = True
|
|
19
|
+
except ImportError:
|
|
20
|
+
KEYRING_AVAILABLE = False
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
from cryptography.fernet import Fernet
|
|
24
|
+
from cryptography.hazmat.primitives import hashes
|
|
25
|
+
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
|
26
|
+
|
|
27
|
+
CRYPTO_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
CRYPTO_AVAILABLE = False
|
|
30
|
+
|
|
31
|
+
from dotenv import load_dotenv
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
# Service name for keyring
|
|
36
|
+
KEYRING_SERVICE = "zen-ai-pentest"
|
|
37
|
+
ENV_PREFIX = "ZEN_"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class APIKeyConfig:
|
|
42
|
+
"""Configuration for API keys"""
|
|
43
|
+
|
|
44
|
+
openrouter_key: Optional[str] = None
|
|
45
|
+
openai_key: Optional[str] = None
|
|
46
|
+
anthropic_key: Optional[str] = None
|
|
47
|
+
github_token: Optional[str] = None
|
|
48
|
+
shodan_key: Optional[str] = None
|
|
49
|
+
censys_id: Optional[str] = None
|
|
50
|
+
censys_secret: Optional[str] = None
|
|
51
|
+
|
|
52
|
+
def get_key(self, provider: str) -> Optional[str]:
|
|
53
|
+
"""Get API key for a specific provider"""
|
|
54
|
+
mapping = {
|
|
55
|
+
"openrouter": self.openrouter_key,
|
|
56
|
+
"openai": self.openai_key,
|
|
57
|
+
"anthropic": self.anthropic_key,
|
|
58
|
+
"claude": self.anthropic_key,
|
|
59
|
+
"chatgpt": self.openai_key,
|
|
60
|
+
"github": self.github_token,
|
|
61
|
+
"shodan": self.shodan_key,
|
|
62
|
+
"censys_id": self.censys_id,
|
|
63
|
+
"censys_secret": self.censys_secret,
|
|
64
|
+
}
|
|
65
|
+
return mapping.get(provider.lower())
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SecureConfigManager:
|
|
69
|
+
"""
|
|
70
|
+
Manages configuration securely using:
|
|
71
|
+
1. Environment variables (highest priority)
|
|
72
|
+
2. Keyring (system keychain)
|
|
73
|
+
3. Encrypted config file
|
|
74
|
+
4. Plain config file (fallback, not recommended)
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self, config_dir: Optional[Path] = None):
|
|
78
|
+
self.config_dir = config_dir or Path.home() / ".config" / "zen-ai-pentest"
|
|
79
|
+
self.config_dir.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
|
|
81
|
+
self.config_file = self.config_dir / "config.json"
|
|
82
|
+
self.encrypted_config = self.config_dir / "config.enc"
|
|
83
|
+
|
|
84
|
+
# Load .env file if exists
|
|
85
|
+
env_file = self.config_dir / ".env"
|
|
86
|
+
if env_file.exists():
|
|
87
|
+
load_dotenv(env_file)
|
|
88
|
+
# Also check project root
|
|
89
|
+
load_dotenv(Path(".env"))
|
|
90
|
+
|
|
91
|
+
self._cache: Dict[str, Any] = {}
|
|
92
|
+
|
|
93
|
+
def _get_keyring_password(self, key_name: str) -> Optional[str]:
|
|
94
|
+
"""Get password from system keyring"""
|
|
95
|
+
if not KEYRING_AVAILABLE:
|
|
96
|
+
return None
|
|
97
|
+
try:
|
|
98
|
+
return keyring.get_password(KEYRING_SERVICE, key_name)
|
|
99
|
+
except Exception as e:
|
|
100
|
+
logger.debug(f"Keyring error for {key_name}: {e}")
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
def _set_keyring_password(self, key_name: str, password: str) -> bool:
|
|
104
|
+
"""Store password in system keyring"""
|
|
105
|
+
if not KEYRING_AVAILABLE:
|
|
106
|
+
logger.warning("Keyring not available, install with: pip install keyring")
|
|
107
|
+
return False
|
|
108
|
+
try:
|
|
109
|
+
keyring.set_password(KEYRING_SERVICE, key_name, password)
|
|
110
|
+
return True
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f"Failed to store {key_name} in keyring: {e}")
|
|
113
|
+
return False
|
|
114
|
+
|
|
115
|
+
def get_api_key(self, provider: str, prefer_keyring: bool = True) -> Optional[str]:
|
|
116
|
+
"""
|
|
117
|
+
Get API key with priority:
|
|
118
|
+
1. Environment variable (ZEN_{PROVIDER}_KEY)
|
|
119
|
+
2. Keyring (if prefer_keyring=True)
|
|
120
|
+
3. Encrypted config
|
|
121
|
+
"""
|
|
122
|
+
env_var = f"{ENV_PREFIX}{provider.upper()}_KEY"
|
|
123
|
+
|
|
124
|
+
# 1. Check environment variable
|
|
125
|
+
env_value = os.getenv(env_var) or os.getenv(f"{provider.upper()}_KEY")
|
|
126
|
+
if env_value:
|
|
127
|
+
logger.debug(f"Using API key for {provider} from environment")
|
|
128
|
+
return env_value
|
|
129
|
+
|
|
130
|
+
# 2. Check keyring
|
|
131
|
+
if prefer_keyring and KEYRING_AVAILABLE:
|
|
132
|
+
keyring_value = self._get_keyring_password(f"{provider}_key")
|
|
133
|
+
if keyring_value:
|
|
134
|
+
logger.debug(f"Using API key for {provider} from keyring")
|
|
135
|
+
return keyring_value
|
|
136
|
+
|
|
137
|
+
# 3. Check encrypted config
|
|
138
|
+
if CRYPTO_AVAILABLE and self.encrypted_config.exists():
|
|
139
|
+
config = self._load_encrypted_config()
|
|
140
|
+
if config and provider in config:
|
|
141
|
+
return config[provider]
|
|
142
|
+
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
def set_api_key(
|
|
146
|
+
self,
|
|
147
|
+
provider: str,
|
|
148
|
+
key: str,
|
|
149
|
+
storage: Literal["keyring", "encrypted", "env"] = "keyring",
|
|
150
|
+
) -> bool:
|
|
151
|
+
"""
|
|
152
|
+
Store API key securely
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
provider: Provider name (openai, anthropic, etc.)
|
|
156
|
+
key: The API key
|
|
157
|
+
storage: Where to store - keyring, encrypted, or env
|
|
158
|
+
"""
|
|
159
|
+
if storage == "keyring":
|
|
160
|
+
return self._set_keyring_password(f"{provider}_key", key)
|
|
161
|
+
|
|
162
|
+
elif storage == "encrypted":
|
|
163
|
+
if not CRYPTO_AVAILABLE:
|
|
164
|
+
logger.error("cryptography library required for encrypted storage")
|
|
165
|
+
return False
|
|
166
|
+
config = self._load_encrypted_config() or {}
|
|
167
|
+
config[provider] = key
|
|
168
|
+
return self._save_encrypted_config(config)
|
|
169
|
+
|
|
170
|
+
elif storage == "env":
|
|
171
|
+
env_file = self.config_dir / ".env"
|
|
172
|
+
env_var = f"{ENV_PREFIX}{provider.upper()}_KEY"
|
|
173
|
+
|
|
174
|
+
# Read existing
|
|
175
|
+
lines = []
|
|
176
|
+
if env_file.exists():
|
|
177
|
+
lines = env_file.read_text().splitlines()
|
|
178
|
+
|
|
179
|
+
# Update or append
|
|
180
|
+
updated = False
|
|
181
|
+
for i, line in enumerate(lines):
|
|
182
|
+
if line.startswith(f"{env_var}="):
|
|
183
|
+
lines[i] = f"{env_var}={key}"
|
|
184
|
+
updated = True
|
|
185
|
+
break
|
|
186
|
+
|
|
187
|
+
if not updated:
|
|
188
|
+
lines.append(f"{env_var}={key}")
|
|
189
|
+
|
|
190
|
+
env_file.write_text("\n".join(lines) + "\n")
|
|
191
|
+
# Secure permissions
|
|
192
|
+
os.chmod(env_file, 0o600)
|
|
193
|
+
return True
|
|
194
|
+
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
def _derive_key(self, password: str, salt: bytes) -> bytes:
|
|
198
|
+
"""Derive encryption key from password"""
|
|
199
|
+
kdf = PBKDF2HMAC(
|
|
200
|
+
algorithm=hashes.SHA256(),
|
|
201
|
+
length=32,
|
|
202
|
+
salt=salt,
|
|
203
|
+
iterations=480000,
|
|
204
|
+
)
|
|
205
|
+
return base64.urlsafe_b64encode(kdf.derive(password.encode()))
|
|
206
|
+
|
|
207
|
+
def _load_encrypted_config(self) -> Optional[Dict[str, str]]:
|
|
208
|
+
"""Load and decrypt config file"""
|
|
209
|
+
if not self.encrypted_config.exists():
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
# Get master password from keyring or prompt
|
|
214
|
+
master = self._get_keyring_password("master")
|
|
215
|
+
if not master:
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
data = self.encrypted_config.read_bytes()
|
|
219
|
+
salt = data[:16]
|
|
220
|
+
encrypted = data[16:]
|
|
221
|
+
|
|
222
|
+
key = self._derive_key(master, salt)
|
|
223
|
+
f = Fernet(key)
|
|
224
|
+
decrypted = f.decrypt(encrypted)
|
|
225
|
+
|
|
226
|
+
return json.loads(decrypted)
|
|
227
|
+
except Exception as e:
|
|
228
|
+
logger.error(f"Failed to decrypt config: {e}")
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
def _save_encrypted_config(self, config: Dict[str, str]) -> bool:
|
|
232
|
+
"""Encrypt and save config file"""
|
|
233
|
+
try:
|
|
234
|
+
import secrets
|
|
235
|
+
|
|
236
|
+
# Get or create master password
|
|
237
|
+
master = self._get_keyring_password("master")
|
|
238
|
+
if not master:
|
|
239
|
+
master = secrets.token_urlsafe(32)
|
|
240
|
+
self._set_keyring_password("master", master)
|
|
241
|
+
|
|
242
|
+
salt = secrets.token_bytes(16)
|
|
243
|
+
key = self._derive_key(master, salt)
|
|
244
|
+
|
|
245
|
+
f = Fernet(key)
|
|
246
|
+
encrypted = f.encrypt(json.dumps(config).encode())
|
|
247
|
+
|
|
248
|
+
self.encrypted_config.write_bytes(salt + encrypted)
|
|
249
|
+
os.chmod(self.encrypted_config, 0o600)
|
|
250
|
+
return True
|
|
251
|
+
except Exception as e:
|
|
252
|
+
logger.error(f"Failed to encrypt config: {e}")
|
|
253
|
+
return False
|
|
254
|
+
|
|
255
|
+
def list_configured_keys(self) -> list:
|
|
256
|
+
"""List all configured API keys (names only, no values)"""
|
|
257
|
+
keys = []
|
|
258
|
+
providers = ["openai", "anthropic", "openrouter", "github", "shodan"]
|
|
259
|
+
|
|
260
|
+
for provider in providers:
|
|
261
|
+
if self.get_api_key(provider):
|
|
262
|
+
keys.append(provider)
|
|
263
|
+
|
|
264
|
+
return keys
|
|
265
|
+
|
|
266
|
+
def remove_api_key(self, provider: str) -> bool:
|
|
267
|
+
"""Remove API key from all storage locations"""
|
|
268
|
+
success = True
|
|
269
|
+
|
|
270
|
+
# Remove from keyring
|
|
271
|
+
if KEYRING_AVAILABLE:
|
|
272
|
+
try:
|
|
273
|
+
keyring.delete_password(KEYRING_SERVICE, f"{provider}_key")
|
|
274
|
+
except:
|
|
275
|
+
pass
|
|
276
|
+
|
|
277
|
+
# Remove from encrypted config
|
|
278
|
+
if self.encrypted_config.exists():
|
|
279
|
+
config = self._load_encrypted_config() or {}
|
|
280
|
+
if provider in config:
|
|
281
|
+
del config[provider]
|
|
282
|
+
success = self._save_encrypted_config(config) and success
|
|
283
|
+
|
|
284
|
+
return success
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
@lru_cache()
|
|
288
|
+
def get_secure_config() -> SecureConfigManager:
|
|
289
|
+
"""Get singleton instance of SecureConfigManager"""
|
|
290
|
+
return SecureConfigManager()
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def migrate_plain_config(config_path: Path) -> bool:
|
|
294
|
+
"""Migrate plain text config to secure storage"""
|
|
295
|
+
if not config_path.exists():
|
|
296
|
+
return False
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
with open(config_path) as f:
|
|
300
|
+
old_config = json.load(f)
|
|
301
|
+
|
|
302
|
+
manager = get_secure_config()
|
|
303
|
+
|
|
304
|
+
# Migrate API keys
|
|
305
|
+
key_mapping = {
|
|
306
|
+
"openai_key": "openai",
|
|
307
|
+
"anthropic_key": "anthropic",
|
|
308
|
+
"openrouter_key": "openrouter",
|
|
309
|
+
"github_token": "github",
|
|
310
|
+
"shodan_key": "shodan",
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
for old_key, provider in key_mapping.items():
|
|
314
|
+
if old_key in old_config and old_config[old_key]:
|
|
315
|
+
manager.set_api_key(provider, old_config[old_key], storage="keyring")
|
|
316
|
+
|
|
317
|
+
# Backup old config
|
|
318
|
+
backup_path = config_path.with_suffix(".json.backup")
|
|
319
|
+
config_path.rename(backup_path)
|
|
320
|
+
|
|
321
|
+
logger.info(
|
|
322
|
+
f"Config migrated to secure storage. Old config backed up to {backup_path}"
|
|
323
|
+
)
|
|
324
|
+
return True
|
|
325
|
+
|
|
326
|
+
except Exception as e:
|
|
327
|
+
logger.error(f"Migration failed: {e}")
|
|
328
|
+
return False
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Integration of Zen Shield with ZenOrchestrator
|
|
3
|
+
Adds security layer to LLM interactions
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Any, Dict, Optional
|
|
8
|
+
|
|
9
|
+
import aiohttp
|
|
10
|
+
|
|
11
|
+
from zen_shield.sanitizer import ZenSanitizer
|
|
12
|
+
from zen_shield.schemas import SanitizerRequest, SanitizerResponse
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ShieldedOrchestrator:
|
|
18
|
+
"""
|
|
19
|
+
Wrapper for ZenOrchestrator that adds data sanitization.
|
|
20
|
+
|
|
21
|
+
Pipeline:
|
|
22
|
+
1. Raw tool output -> Zen Shield (local sanitization)
|
|
23
|
+
2. Clean data -> Big LLM (GPT-4/Claude) for analysis
|
|
24
|
+
3. Response -> Optional: Normalize through shield
|
|
25
|
+
|
|
26
|
+
This ensures:
|
|
27
|
+
- No secrets leak to cloud LLMs
|
|
28
|
+
- Reduced token costs (90% noise reduction)
|
|
29
|
+
- Compliance with data protection policies
|
|
30
|
+
- Fallback if sanitization fails
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
bridge_url: str = "http://integration-bridge:8080",
|
|
36
|
+
shield_url: str = "http://zen-shield:9000",
|
|
37
|
+
big_llm_api_key: Optional[str] = None,
|
|
38
|
+
big_llm_provider: str = "openai",
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initialize shielded orchestrator
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
bridge_url: URL of integration bridge for tools
|
|
45
|
+
shield_url: URL of Zen Shield sanitizer service
|
|
46
|
+
big_llm_api_key: API key for big LLM (GPT-4/Claude)
|
|
47
|
+
big_llm_provider: Provider name
|
|
48
|
+
"""
|
|
49
|
+
self.bridge_url = bridge_url
|
|
50
|
+
self.shield_url = shield_url
|
|
51
|
+
self.big_llm_api_key = big_llm_api_key
|
|
52
|
+
self.big_llm_provider = big_llm_provider
|
|
53
|
+
|
|
54
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
55
|
+
|
|
56
|
+
# Local sanitizer as fallback
|
|
57
|
+
self.local_sanitizer: Optional[ZenSanitizer] = None
|
|
58
|
+
|
|
59
|
+
async def __aenter__(self):
|
|
60
|
+
self.session = aiohttp.ClientSession()
|
|
61
|
+
|
|
62
|
+
# Try to initialize local sanitizer as backup
|
|
63
|
+
try:
|
|
64
|
+
self.local_sanitizer = ZenSanitizer()
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.warning(f"Could not initialize local sanitizer: {e}")
|
|
67
|
+
|
|
68
|
+
return self
|
|
69
|
+
|
|
70
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
71
|
+
if self.session:
|
|
72
|
+
await self.session.close()
|
|
73
|
+
|
|
74
|
+
async def analyze_tool_output(
|
|
75
|
+
self, raw_output: str, source_tool: str, intent: str = "analyze"
|
|
76
|
+
) -> Dict[str, Any]:
|
|
77
|
+
"""
|
|
78
|
+
Analyze tool output with security sanitization
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
raw_output: Raw output from pentesting tool
|
|
82
|
+
source_tool: Name of tool (nmap, nuclei, etc.)
|
|
83
|
+
intent: Purpose of analysis
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Analysis result with metadata
|
|
87
|
+
"""
|
|
88
|
+
# Step 1: Sanitize through Zen Shield
|
|
89
|
+
sanitized = await self._sanitize_through_shield(raw_output, source_tool, intent)
|
|
90
|
+
|
|
91
|
+
if not sanitized:
|
|
92
|
+
# Fallback: analyze locally without LLM
|
|
93
|
+
logger.warning("Sanitization failed, using local analysis")
|
|
94
|
+
return self._local_analysis(raw_output)
|
|
95
|
+
|
|
96
|
+
if not sanitized.safe_to_send:
|
|
97
|
+
# High risk data - analyze locally
|
|
98
|
+
logger.warning(
|
|
99
|
+
f"High risk data detected ({len(sanitized.redactions)} redactions), "
|
|
100
|
+
"using local analysis"
|
|
101
|
+
)
|
|
102
|
+
return self._local_analysis(sanitized.cleaned_data)
|
|
103
|
+
|
|
104
|
+
# Step 2: Send to big LLM with sanitized data
|
|
105
|
+
analysis = await self._analyze_with_big_llm(
|
|
106
|
+
sanitized.cleaned_data,
|
|
107
|
+
metadata={
|
|
108
|
+
"source_tool": source_tool,
|
|
109
|
+
"redactions_count": len(sanitized.redactions),
|
|
110
|
+
"compression_ratio": sanitized.compression_ratio,
|
|
111
|
+
"tokens_saved": sanitized.tokens_saved,
|
|
112
|
+
"fallback_used": sanitized.fallback_used,
|
|
113
|
+
},
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
"analysis": analysis,
|
|
118
|
+
"sanitization": {
|
|
119
|
+
"redactions_count": len(sanitized.redactions),
|
|
120
|
+
"risk_level": sanitized.risk_level.value,
|
|
121
|
+
"compression_ratio": sanitized.compression_ratio,
|
|
122
|
+
"tokens_saved": sanitized.tokens_saved,
|
|
123
|
+
"fallback_used": sanitized.fallback_used,
|
|
124
|
+
},
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async def _sanitize_through_shield(
|
|
128
|
+
self, raw_data: str, source_tool: str, intent: str
|
|
129
|
+
) -> Optional[SanitizerResponse]:
|
|
130
|
+
"""
|
|
131
|
+
Send data to Zen Shield for sanitization
|
|
132
|
+
Falls back to local sanitizer if service unavailable
|
|
133
|
+
"""
|
|
134
|
+
request = SanitizerRequest(
|
|
135
|
+
raw_data=raw_data, source_tool=source_tool, intent=intent
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Try remote shield service first
|
|
139
|
+
if self.session:
|
|
140
|
+
try:
|
|
141
|
+
async with self.session.post(
|
|
142
|
+
f"{self.shield_url}/sanitize",
|
|
143
|
+
json=request.model_dump(),
|
|
144
|
+
timeout=aiohttp.ClientTimeout(total=30),
|
|
145
|
+
) as resp:
|
|
146
|
+
if resp.status == 200:
|
|
147
|
+
result = await resp.json()
|
|
148
|
+
return SanitizerResponse(**result)
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.warning(f"Remote shield unavailable: {e}")
|
|
151
|
+
|
|
152
|
+
# Fallback to local sanitizer
|
|
153
|
+
if self.local_sanitizer:
|
|
154
|
+
try:
|
|
155
|
+
return await self.local_sanitizer.process(request)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.error(f"Local sanitization failed: {e}")
|
|
158
|
+
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
async def _analyze_with_big_llm(
|
|
162
|
+
self, cleaned_data: str, metadata: Dict[str, Any]
|
|
163
|
+
) -> Dict[str, Any]:
|
|
164
|
+
"""
|
|
165
|
+
Send sanitized data to big LLM for analysis
|
|
166
|
+
"""
|
|
167
|
+
# This integrates with your existing LLM bridge
|
|
168
|
+
# For now, returning placeholder
|
|
169
|
+
|
|
170
|
+
tool = metadata.get("source_tool", "unknown")
|
|
171
|
+
|
|
172
|
+
# Construct prompt for big LLM
|
|
173
|
+
prompt = f"""Analyze the following {tool} scan output for security issues:
|
|
174
|
+
|
|
175
|
+
{cleaned_data}
|
|
176
|
+
|
|
177
|
+
Focus on:
|
|
178
|
+
1. Open ports and services
|
|
179
|
+
2. Potential vulnerabilities
|
|
180
|
+
3. Misconfigurations
|
|
181
|
+
4. Exploitation paths
|
|
182
|
+
|
|
183
|
+
Be concise and technical."""
|
|
184
|
+
|
|
185
|
+
# Here you would call your existing LLM bridge
|
|
186
|
+
# For now, return structure
|
|
187
|
+
return {
|
|
188
|
+
"prompt": prompt,
|
|
189
|
+
"metadata": metadata,
|
|
190
|
+
"provider": self.big_llm_provider,
|
|
191
|
+
"note": "Integrate with your existing LLM bridge here",
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
def _local_analysis(self, data: str) -> Dict[str, Any]:
|
|
195
|
+
"""
|
|
196
|
+
Local analysis without LLM
|
|
197
|
+
Used when sanitization fails or risk is too high
|
|
198
|
+
"""
|
|
199
|
+
# Simple keyword-based analysis
|
|
200
|
+
findings = []
|
|
201
|
+
|
|
202
|
+
# Check for common patterns
|
|
203
|
+
if "open" in data.lower():
|
|
204
|
+
findings.append("Open ports detected")
|
|
205
|
+
if "vuln" in data.lower() or "vulnerable" in data.lower():
|
|
206
|
+
findings.append("Potential vulnerabilities found")
|
|
207
|
+
if "error" in data.lower():
|
|
208
|
+
findings.append("Errors in scan output")
|
|
209
|
+
if "unauthorized" in data.lower() or "401" in data:
|
|
210
|
+
findings.append("Authentication may be required")
|
|
211
|
+
|
|
212
|
+
return {
|
|
213
|
+
"analysis": {
|
|
214
|
+
"findings": findings,
|
|
215
|
+
"note": "Local analysis (LLM bypassed due to security concerns)",
|
|
216
|
+
},
|
|
217
|
+
"sanitization": {"local_only": True, "reason": "security_fallback"},
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
async def comprehensive_scan_with_shield(
|
|
221
|
+
self, target: str, tools: list[str], use_shield: bool = True
|
|
222
|
+
) -> Dict[str, Any]:
|
|
223
|
+
"""
|
|
224
|
+
Run comprehensive scan with shield protection
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
target: Target to scan
|
|
228
|
+
tools: List of tools to use
|
|
229
|
+
use_shield: Whether to use sanitization
|
|
230
|
+
"""
|
|
231
|
+
from modules.tool_orchestrator import ToolOrchestrator
|
|
232
|
+
|
|
233
|
+
results = {
|
|
234
|
+
"target": target,
|
|
235
|
+
"tools_used": tools,
|
|
236
|
+
"findings": [],
|
|
237
|
+
"sanitization_applied": use_shield,
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
async with ToolOrchestrator(self.bridge_url) as orch:
|
|
241
|
+
for tool in tools:
|
|
242
|
+
try:
|
|
243
|
+
# Run tool scan
|
|
244
|
+
scan_result = await self._run_tool(orch, tool, target)
|
|
245
|
+
raw_output = scan_result.get("raw_output", "")
|
|
246
|
+
|
|
247
|
+
if use_shield and raw_output:
|
|
248
|
+
# Sanitize and analyze
|
|
249
|
+
analysis = await self.analyze_tool_output(
|
|
250
|
+
raw_output, tool, intent="analyze"
|
|
251
|
+
)
|
|
252
|
+
results["findings"].append({"tool": tool, "analysis": analysis})
|
|
253
|
+
else:
|
|
254
|
+
# Raw output without sanitization (not recommended)
|
|
255
|
+
results["findings"].append(
|
|
256
|
+
{"tool": tool, "raw": raw_output[:1000]} # Truncate
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
except Exception as e:
|
|
260
|
+
logger.error(f"Tool {tool} failed: {e}")
|
|
261
|
+
results["findings"].append({"tool": tool, "error": str(e)})
|
|
262
|
+
|
|
263
|
+
return results
|
|
264
|
+
|
|
265
|
+
async def _run_tool(self, orchestrator, tool: str, target: str) -> Dict[str, Any]:
|
|
266
|
+
"""Run specific tool through orchestrator"""
|
|
267
|
+
method_map = {
|
|
268
|
+
"nmap": orchestrator.scan_with_nmap,
|
|
269
|
+
"nuclei": orchestrator.scan_with_nuclei,
|
|
270
|
+
"gobuster": orchestrator.scan_with_gobuster,
|
|
271
|
+
"amass": orchestrator.enumerate_subdomains,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
method = method_map.get(tool)
|
|
275
|
+
if not method:
|
|
276
|
+
raise ValueError(f"Unknown tool: {tool}")
|
|
277
|
+
|
|
278
|
+
# Trigger scan
|
|
279
|
+
if tool == "amass":
|
|
280
|
+
result = await method(target, active=False)
|
|
281
|
+
else:
|
|
282
|
+
result = await method(target)
|
|
283
|
+
|
|
284
|
+
scan_id = result.get("scan_id")
|
|
285
|
+
|
|
286
|
+
# Wait for completion
|
|
287
|
+
status = await orchestrator.wait_for_scan(scan_id)
|
|
288
|
+
|
|
289
|
+
# Get results
|
|
290
|
+
scan_results = await orchestrator.get_scan_results(scan_id)
|
|
291
|
+
|
|
292
|
+
return {
|
|
293
|
+
"scan_id": scan_id,
|
|
294
|
+
"status": status.get("status"),
|
|
295
|
+
"raw_output": str(scan_results.get("results", "")),
|
|
296
|
+
}
|
modules/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Penetration Testing Modules for Zen AI
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .cve_database import CVEDatabase, CVEEntry, RansomwareEntry
|
|
6
|
+
from .exploit_assist import ExploitAssistModule
|
|
7
|
+
from .nuclei_integration import NucleiIntegration, NucleiTemplateManager
|
|
8
|
+
from .osint import (DomainInfo, EmailProfile, OSINTModule, OSINTResult,
|
|
9
|
+
check_email_breach, enumerate_subdomains, harvest_emails)
|
|
10
|
+
from .protonvpn import (ProtonVPNManager, VPNProtocol, VPNSecurityLevel,
|
|
11
|
+
VPNServer, VPNStatus, quick_connect, secure_connect)
|
|
12
|
+
from .recon import ReconModule
|
|
13
|
+
from .report_gen import ReportGenerator
|
|
14
|
+
from .sql_injection_db import (DBType, SQLInjectionDatabase, SQLITechnique,
|
|
15
|
+
SQLPayload)
|
|
16
|
+
from .vuln_scanner import VulnScannerModule
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
"ReconModule",
|
|
20
|
+
"VulnScannerModule",
|
|
21
|
+
"ExploitAssistModule",
|
|
22
|
+
"ReportGenerator",
|
|
23
|
+
"NucleiIntegration",
|
|
24
|
+
"NucleiTemplateManager",
|
|
25
|
+
"SQLInjectionDatabase",
|
|
26
|
+
"SQLPayload",
|
|
27
|
+
"SQLITechnique",
|
|
28
|
+
"DBType",
|
|
29
|
+
"CVEDatabase",
|
|
30
|
+
"CVEEntry",
|
|
31
|
+
"RansomwareEntry",
|
|
32
|
+
"ProtonVPNManager",
|
|
33
|
+
"VPNProtocol",
|
|
34
|
+
"VPNSecurityLevel",
|
|
35
|
+
"VPNStatus",
|
|
36
|
+
"VPNServer",
|
|
37
|
+
"quick_connect",
|
|
38
|
+
"secure_connect",
|
|
39
|
+
"OSINTModule",
|
|
40
|
+
"DomainInfo",
|
|
41
|
+
"EmailProfile",
|
|
42
|
+
"OSINTResult",
|
|
43
|
+
"harvest_emails",
|
|
44
|
+
"enumerate_subdomains",
|
|
45
|
+
"check_email_breach",
|
|
46
|
+
]
|