zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
autonomous/agent_loop.py
ADDED
|
@@ -0,0 +1,1370 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Autonomous Agent Loop Engine for Zen AI Pentest Framework
|
|
3
|
+
|
|
4
|
+
Implementiert einen vollständigen ReAct (Reasoning + Acting) Loop mit:
|
|
5
|
+
- State Machine für kontrollierte Ausführung
|
|
6
|
+
- Multi-Layer Memory Management
|
|
7
|
+
- Integrierte Tool-Ausführung mit Retry-Logik
|
|
8
|
+
- Progress Tracking und Fehlerbehandlung
|
|
9
|
+
|
|
10
|
+
Based on: ReAct Pattern (https://arxiv.org/abs/2210.03629)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from enum import Enum, auto
|
|
14
|
+
from typing import Dict, List, Any, Optional, Callable, Tuple
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from abc import ABC, abstractmethod
|
|
18
|
+
import asyncio
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
import uuid
|
|
22
|
+
import time
|
|
23
|
+
import traceback
|
|
24
|
+
|
|
25
|
+
# Configure logging
|
|
26
|
+
logging.basicConfig(level=logging.INFO)
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class AgentState(Enum):
|
|
31
|
+
"""
|
|
32
|
+
Zustandsmaschine für den Agenten-Loop.
|
|
33
|
+
|
|
34
|
+
States:
|
|
35
|
+
IDLE: Wartet auf Start
|
|
36
|
+
PLANNING: Erstellt Aktionsplan
|
|
37
|
+
EXECUTING: Führt Aktionen aus
|
|
38
|
+
OBSERVING: Analysiert Ergebnisse
|
|
39
|
+
REFLECTING: Evaluiert Fortschritt
|
|
40
|
+
COMPLETED: Ziel erreicht
|
|
41
|
+
ERROR: Fehler aufgetreten
|
|
42
|
+
PAUSED: Wartet auf Eingabe (Human-in-the-loop)
|
|
43
|
+
"""
|
|
44
|
+
IDLE = auto()
|
|
45
|
+
PLANNING = auto()
|
|
46
|
+
EXECUTING = auto()
|
|
47
|
+
OBSERVING = auto()
|
|
48
|
+
REFLECTING = auto()
|
|
49
|
+
COMPLETED = auto()
|
|
50
|
+
ERROR = auto()
|
|
51
|
+
PAUSED = auto()
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class ToolType(Enum):
|
|
55
|
+
"""Verfügbare Tool-Typen für den Agenten."""
|
|
56
|
+
NMAP_SCANNER = "nmap_scanner"
|
|
57
|
+
NUCLEI_SCANNER = "nuclei_scanner"
|
|
58
|
+
EXPLOIT_VALIDATOR = "exploit_validator"
|
|
59
|
+
REPORT_GENERATOR = "report_generator"
|
|
60
|
+
SUBDOMAIN_ENUMERATOR = "subdomain_enumerator"
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class AgentMemory:
|
|
65
|
+
"""
|
|
66
|
+
Container für alle Memory-Typen des Agenten.
|
|
67
|
+
|
|
68
|
+
Attributes:
|
|
69
|
+
session_id: Eindeutige Session-ID
|
|
70
|
+
created_at: Zeitpunkt der Erstellung
|
|
71
|
+
goal: Aktuelles Ziel des Agenten
|
|
72
|
+
target: Ziel-System/IP/Domain
|
|
73
|
+
scope: Scope-Beschränkungen
|
|
74
|
+
"""
|
|
75
|
+
session_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
76
|
+
created_at: datetime = field(default_factory=datetime.now)
|
|
77
|
+
goal: str = ""
|
|
78
|
+
target: str = ""
|
|
79
|
+
scope: Dict[str, Any] = field(default_factory=dict)
|
|
80
|
+
|
|
81
|
+
# Kurzzeit-Memory (Session-basiert)
|
|
82
|
+
short_term: List[Dict[str, Any]] = field(default_factory=list)
|
|
83
|
+
max_short_term: int = 100
|
|
84
|
+
|
|
85
|
+
# Langzeit-Memory (persistiert)
|
|
86
|
+
long_term: Dict[str, Any] = field(default_factory=dict)
|
|
87
|
+
|
|
88
|
+
# Kontext-Fenster für LLM
|
|
89
|
+
context_window: List[Dict[str, Any]] = field(default_factory=list)
|
|
90
|
+
max_context_window: int = 20
|
|
91
|
+
|
|
92
|
+
# Plan und aktueller Status
|
|
93
|
+
current_plan: List[Dict[str, Any]] = field(default_factory=list)
|
|
94
|
+
plan_step: int = 0
|
|
95
|
+
|
|
96
|
+
# Findings und Ergebnisse
|
|
97
|
+
findings: List[Dict[str, Any]] = field(default_factory=list)
|
|
98
|
+
execution_history: List[Dict[str, Any]] = field(default_factory=list)
|
|
99
|
+
|
|
100
|
+
def add_to_short_term(self, entry: Dict[str, Any]) -> None:
|
|
101
|
+
"""Fügt einen Eintrag zum Kurzzeit-Memory hinzu."""
|
|
102
|
+
entry['timestamp'] = datetime.now().isoformat()
|
|
103
|
+
entry['id'] = str(uuid.uuid4())
|
|
104
|
+
self.short_term.append(entry)
|
|
105
|
+
|
|
106
|
+
# Begrenze Größe
|
|
107
|
+
if len(self.short_term) > self.max_short_term:
|
|
108
|
+
self.short_term = self.short_term[-self.max_short_term:]
|
|
109
|
+
|
|
110
|
+
def add_to_context_window(self, entry: Dict[str, Any]) -> None:
|
|
111
|
+
"""Fügt einen Eintrag zum LLM Kontext-Fenster hinzu."""
|
|
112
|
+
entry['timestamp'] = datetime.now().isoformat()
|
|
113
|
+
self.context_window.append(entry)
|
|
114
|
+
|
|
115
|
+
# Begrenze Größe für LLM Kontext
|
|
116
|
+
if len(self.context_window) > self.max_context_window:
|
|
117
|
+
self.context_window = self.context_window[-self.max_context_window:]
|
|
118
|
+
|
|
119
|
+
def get_context_for_llm(self) -> str:
|
|
120
|
+
"""Erstellt formatierten Kontext für LLM Prompts."""
|
|
121
|
+
context_parts = [
|
|
122
|
+
f"Goal: {self.goal}",
|
|
123
|
+
f"Target: {self.target}",
|
|
124
|
+
f"Progress: Step {self.plan_step + 1}/{len(self.current_plan) if self.current_plan else '?'}",
|
|
125
|
+
"\nRecent Actions:"
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
# Letzte 5 Aktionen aus dem Kontext-Fenster
|
|
129
|
+
for entry in self.context_window[-5:]:
|
|
130
|
+
entry_type = entry.get('type', 'unknown')
|
|
131
|
+
content = entry.get('content', '')
|
|
132
|
+
context_parts.append(f" [{entry_type.upper()}] {content[:100]}...")
|
|
133
|
+
|
|
134
|
+
if self.findings:
|
|
135
|
+
context_parts.append(f"\nFindings: {len(self.findings)}")
|
|
136
|
+
|
|
137
|
+
return "\n".join(context_parts)
|
|
138
|
+
|
|
139
|
+
def add_finding(self, finding: Dict[str, Any]) -> None:
|
|
140
|
+
"""Fügt einen Security Finding hinzu."""
|
|
141
|
+
finding['timestamp'] = datetime.now().isoformat()
|
|
142
|
+
finding['id'] = str(uuid.uuid4())
|
|
143
|
+
self.findings.append(finding)
|
|
144
|
+
|
|
145
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
146
|
+
"""Konvertiert Memory zu Dictionary."""
|
|
147
|
+
return {
|
|
148
|
+
'session_id': self.session_id,
|
|
149
|
+
'created_at': self.created_at.isoformat(),
|
|
150
|
+
'goal': self.goal,
|
|
151
|
+
'target': self.target,
|
|
152
|
+
'scope': self.scope,
|
|
153
|
+
'short_term_count': len(self.short_term),
|
|
154
|
+
'findings_count': len(self.findings),
|
|
155
|
+
'plan_step': self.plan_step,
|
|
156
|
+
'current_plan_length': len(self.current_plan)
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@dataclass
|
|
161
|
+
class ToolResult:
|
|
162
|
+
"""
|
|
163
|
+
Ergebnis einer Tool-Ausführung.
|
|
164
|
+
|
|
165
|
+
Attributes:
|
|
166
|
+
tool_name: Name des ausgeführten Tools
|
|
167
|
+
success: Ob die Ausführung erfolgreich war
|
|
168
|
+
data: Extrahierte Daten
|
|
169
|
+
raw_output: Rohe Tool-Ausgabe
|
|
170
|
+
error_message: Fehlermeldung bei Misserfolg
|
|
171
|
+
execution_time: Dauer der Ausführung in Sekunden
|
|
172
|
+
timestamp: Zeitpunkt der Ausführung
|
|
173
|
+
metadata: Zusätzliche Metadaten
|
|
174
|
+
"""
|
|
175
|
+
tool_name: str
|
|
176
|
+
success: bool
|
|
177
|
+
data: Dict[str, Any] = field(default_factory=dict)
|
|
178
|
+
raw_output: str = ""
|
|
179
|
+
error_message: Optional[str] = None
|
|
180
|
+
execution_time: float = 0.0
|
|
181
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
182
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
183
|
+
|
|
184
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
185
|
+
"""Konvertiert ToolResult zu Dictionary."""
|
|
186
|
+
return {
|
|
187
|
+
'tool_name': self.tool_name,
|
|
188
|
+
'success': self.success,
|
|
189
|
+
'data': self.data,
|
|
190
|
+
'raw_output': self.raw_output[:500] if len(self.raw_output) > 500 else self.raw_output,
|
|
191
|
+
'error_message': self.error_message,
|
|
192
|
+
'execution_time': self.execution_time,
|
|
193
|
+
'timestamp': self.timestamp.isoformat(),
|
|
194
|
+
'metadata': self.metadata
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@dataclass
|
|
199
|
+
class PlanStep:
|
|
200
|
+
"""
|
|
201
|
+
Einzelner Schritt im Ausführungsplan.
|
|
202
|
+
|
|
203
|
+
Attributes:
|
|
204
|
+
step_id: Eindeutige ID des Schritts
|
|
205
|
+
tool_type: Typ des zu verwendenden Tools
|
|
206
|
+
action: Beschreibung der Aktion
|
|
207
|
+
parameters: Parameter für das Tool
|
|
208
|
+
depends_on: IDs der Schritte, die vorher abgeschlossen sein müssen
|
|
209
|
+
completed: Ob der Schritt abgeschlossen ist
|
|
210
|
+
result: Ergebnis des Schritts
|
|
211
|
+
"""
|
|
212
|
+
step_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
213
|
+
tool_type: ToolType = ToolType.NMAP_SCANNER
|
|
214
|
+
action: str = ""
|
|
215
|
+
parameters: Dict[str, Any] = field(default_factory=dict)
|
|
216
|
+
depends_on: List[str] = field(default_factory=list)
|
|
217
|
+
completed: bool = False
|
|
218
|
+
result: Optional[ToolResult] = None
|
|
219
|
+
|
|
220
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
221
|
+
"""Konvertiert PlanStep zu Dictionary."""
|
|
222
|
+
return {
|
|
223
|
+
'step_id': self.step_id,
|
|
224
|
+
'tool_type': self.tool_type.value,
|
|
225
|
+
'action': self.action,
|
|
226
|
+
'parameters': self.parameters,
|
|
227
|
+
'depends_on': self.depends_on,
|
|
228
|
+
'completed': self.completed,
|
|
229
|
+
'result': self.result.to_dict() if self.result else None
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
# Abstract Tool Classes
|
|
234
|
+
class BaseTool(ABC):
|
|
235
|
+
"""Abstrakte Basisklasse für alle Tools."""
|
|
236
|
+
|
|
237
|
+
def __init__(self, name: str, timeout: int = 300):
|
|
238
|
+
self.name = name
|
|
239
|
+
self.timeout = timeout
|
|
240
|
+
self.logger = logging.getLogger(f"{__name__}.{name}")
|
|
241
|
+
|
|
242
|
+
@abstractmethod
|
|
243
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
244
|
+
"""Führt das Tool mit den gegebenen Parametern aus."""
|
|
245
|
+
pass
|
|
246
|
+
|
|
247
|
+
def validate_parameters(self, parameters: Dict[str, Any]) -> Tuple[bool, str]:
|
|
248
|
+
"""Validiert die Tool-Parameter."""
|
|
249
|
+
return True, ""
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class NmapScanner(BaseTool):
|
|
253
|
+
"""Nmap Port Scanner Integration."""
|
|
254
|
+
|
|
255
|
+
def __init__(self, timeout: int = 600):
|
|
256
|
+
super().__init__("NmapScanner", timeout)
|
|
257
|
+
self.default_options = "-sV -sC -O --script=vuln"
|
|
258
|
+
|
|
259
|
+
def validate_parameters(self, parameters: Dict[str, Any]) -> Tuple[bool, str]:
|
|
260
|
+
"""Validiert Nmap-spezifische Parameter."""
|
|
261
|
+
target = parameters.get('target')
|
|
262
|
+
if not target:
|
|
263
|
+
return False, "Target is required"
|
|
264
|
+
|
|
265
|
+
# Validiere IP oder Domain
|
|
266
|
+
if not isinstance(target, str) or len(target) < 1:
|
|
267
|
+
return False, "Invalid target format"
|
|
268
|
+
|
|
269
|
+
return True, ""
|
|
270
|
+
|
|
271
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
272
|
+
"""Führt Nmap Scan aus."""
|
|
273
|
+
start_time = time.time()
|
|
274
|
+
|
|
275
|
+
try:
|
|
276
|
+
target = parameters.get('target')
|
|
277
|
+
options = parameters.get('options', self.default_options)
|
|
278
|
+
ports = parameters.get('ports', '1-1000')
|
|
279
|
+
|
|
280
|
+
# Baue Nmap Kommando
|
|
281
|
+
cmd = f"nmap {options} -p {ports} {target}"
|
|
282
|
+
|
|
283
|
+
self.logger.info(f"Executing: {cmd}")
|
|
284
|
+
|
|
285
|
+
# Führe aus (simuliert für Produktionscode)
|
|
286
|
+
# In echter Implementierung: subprocess oder async subprocess
|
|
287
|
+
await asyncio.sleep(0.5) # Simuliere Ausführung
|
|
288
|
+
|
|
289
|
+
# Simulierte Ausgabe
|
|
290
|
+
output = self._simulate_scan_output(target, ports)
|
|
291
|
+
parsed_data = self._parse_output(output)
|
|
292
|
+
|
|
293
|
+
execution_time = time.time() - start_time
|
|
294
|
+
|
|
295
|
+
return ToolResult(
|
|
296
|
+
tool_name=self.name,
|
|
297
|
+
success=True,
|
|
298
|
+
data=parsed_data,
|
|
299
|
+
raw_output=output,
|
|
300
|
+
execution_time=execution_time,
|
|
301
|
+
metadata={'ports_scanned': ports, 'options': options}
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
except Exception as e:
|
|
305
|
+
execution_time = time.time() - start_time
|
|
306
|
+
self.logger.error(f"Nmap execution failed: {str(e)}")
|
|
307
|
+
return ToolResult(
|
|
308
|
+
tool_name=self.name,
|
|
309
|
+
success=False,
|
|
310
|
+
error_message=str(e),
|
|
311
|
+
execution_time=execution_time
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
def _simulate_scan_output(self, target: str, ports: str) -> str:
|
|
315
|
+
"""Simuliert Nmap Output für Demo-Zwecke."""
|
|
316
|
+
return f"""
|
|
317
|
+
Starting Nmap 7.94 ( https://nmap.org ) at {datetime.now().strftime('%Y-%m-%d %H:%M')}
|
|
318
|
+
Nmap scan report for {target}
|
|
319
|
+
Host is up (0.045s latency).
|
|
320
|
+
Not shown: 995 closed tcp ports (reset)
|
|
321
|
+
PORT STATE SERVICE VERSION
|
|
322
|
+
22/tcp open ssh OpenSSH 8.9p1 Ubuntu 3ubuntu0.1
|
|
323
|
+
80/tcp open http Apache httpd 2.4.52
|
|
324
|
+
443/tcp open ssl/http Apache httpd 2.4.52
|
|
325
|
+
3306/tcp open mysql MySQL 8.0.32
|
|
326
|
+
8080/tcp open http-proxy nginx 1.18.0
|
|
327
|
+
|
|
328
|
+
Service detection performed.
|
|
329
|
+
OS and Service detection performed.
|
|
330
|
+
"""
|
|
331
|
+
|
|
332
|
+
def _parse_output(self, output: str) -> Dict[str, Any]:
|
|
333
|
+
"""Parst Nmap Output."""
|
|
334
|
+
open_ports = []
|
|
335
|
+
services = []
|
|
336
|
+
|
|
337
|
+
for line in output.split('\n'):
|
|
338
|
+
if '/tcp' in line and 'open' in line:
|
|
339
|
+
parts = line.split()
|
|
340
|
+
if len(parts) >= 3:
|
|
341
|
+
port = parts[0].split('/')[0]
|
|
342
|
+
service = parts[2]
|
|
343
|
+
version = ' '.join(parts[3:]) if len(parts) > 3 else 'unknown'
|
|
344
|
+
open_ports.append({
|
|
345
|
+
'port': int(port),
|
|
346
|
+
'service': service,
|
|
347
|
+
'version': version
|
|
348
|
+
})
|
|
349
|
+
services.append(service)
|
|
350
|
+
|
|
351
|
+
return {
|
|
352
|
+
'open_ports': open_ports,
|
|
353
|
+
'services': list(set(services)),
|
|
354
|
+
'port_count': len(open_ports)
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
class NucleiScanner(BaseTool):
|
|
359
|
+
"""Nuclei Vulnerability Scanner Integration."""
|
|
360
|
+
|
|
361
|
+
def __init__(self, timeout: int = 600):
|
|
362
|
+
super().__init__("NucleiScanner", timeout)
|
|
363
|
+
self.default_templates = "cves,exposures,vulnerabilities"
|
|
364
|
+
|
|
365
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
366
|
+
"""Führt Nuclei Scan aus."""
|
|
367
|
+
start_time = time.time()
|
|
368
|
+
|
|
369
|
+
try:
|
|
370
|
+
target = parameters.get('target')
|
|
371
|
+
templates = parameters.get('templates', self.default_templates)
|
|
372
|
+
severity = parameters.get('severity', 'critical,high,medium')
|
|
373
|
+
|
|
374
|
+
cmd = f"nuclei -u {target} -t {templates} -severity {severity} -json"
|
|
375
|
+
|
|
376
|
+
self.logger.info(f"Executing: {cmd}")
|
|
377
|
+
await asyncio.sleep(0.5) # Simuliere Ausführung
|
|
378
|
+
|
|
379
|
+
# Simulierte Vulnerabilities
|
|
380
|
+
findings = self._simulate_findings(target)
|
|
381
|
+
|
|
382
|
+
execution_time = time.time() - start_time
|
|
383
|
+
|
|
384
|
+
return ToolResult(
|
|
385
|
+
tool_name=self.name,
|
|
386
|
+
success=True,
|
|
387
|
+
data={'findings': findings, 'count': len(findings)},
|
|
388
|
+
raw_output=json.dumps(findings, indent=2),
|
|
389
|
+
execution_time=execution_time,
|
|
390
|
+
metadata={'templates': templates, 'severity_filter': severity}
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
except Exception as e:
|
|
394
|
+
execution_time = time.time() - start_time
|
|
395
|
+
return ToolResult(
|
|
396
|
+
tool_name=self.name,
|
|
397
|
+
success=False,
|
|
398
|
+
error_message=str(e),
|
|
399
|
+
execution_time=execution_time
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
def _simulate_findings(self, target: str) -> List[Dict[str, Any]]:
|
|
403
|
+
"""Simuliert Nuclei Findings für Demo."""
|
|
404
|
+
return [
|
|
405
|
+
{
|
|
406
|
+
'template': 'CVE-2023-1234',
|
|
407
|
+
'severity': 'critical',
|
|
408
|
+
'host': target,
|
|
409
|
+
'matched': f'{target}/vulnerable-endpoint',
|
|
410
|
+
'description': 'Remote Code Execution vulnerability detected'
|
|
411
|
+
},
|
|
412
|
+
{
|
|
413
|
+
'template': 'exposed-panel',
|
|
414
|
+
'severity': 'medium',
|
|
415
|
+
'host': target,
|
|
416
|
+
'matched': f'{target}/admin',
|
|
417
|
+
'description': 'Exposed admin panel'
|
|
418
|
+
}
|
|
419
|
+
]
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
class ExploitValidator(BaseTool):
|
|
423
|
+
"""
|
|
424
|
+
Validiert potenzielle Exploits mit dem ExploitValidator-System.
|
|
425
|
+
|
|
426
|
+
Nutzt Docker-Sandboxing, Evidence Collection und Safety Controls
|
|
427
|
+
für sichere Exploit-Validierung.
|
|
428
|
+
"""
|
|
429
|
+
|
|
430
|
+
def __init__(
|
|
431
|
+
self,
|
|
432
|
+
timeout: int = 300,
|
|
433
|
+
safety_level: str = "controlled",
|
|
434
|
+
use_docker: bool = False # Disabled by default for compatibility
|
|
435
|
+
):
|
|
436
|
+
super().__init__("ExploitValidator", timeout)
|
|
437
|
+
self.safety_level = safety_level
|
|
438
|
+
self.use_docker = use_docker
|
|
439
|
+
self._validator = None
|
|
440
|
+
|
|
441
|
+
async def _get_validator(self):
|
|
442
|
+
"""Lazy initialization of ExploitValidator."""
|
|
443
|
+
if self._validator is None:
|
|
444
|
+
# Import here to avoid circular imports
|
|
445
|
+
from .exploit_validator import ExploitValidator, SafetyLevel, ScopeConfig
|
|
446
|
+
|
|
447
|
+
safety_map = {
|
|
448
|
+
'read_only': SafetyLevel.READ_ONLY,
|
|
449
|
+
'validate_only': SafetyLevel.VALIDATE_ONLY,
|
|
450
|
+
'controlled': SafetyLevel.CONTROLLED,
|
|
451
|
+
'full': SafetyLevel.FULL
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
safety = safety_map.get(self.safety_level, SafetyLevel.CONTROLLED)
|
|
455
|
+
|
|
456
|
+
self._validator = ExploitValidator(
|
|
457
|
+
safety_level=safety,
|
|
458
|
+
scope_config=ScopeConfig(),
|
|
459
|
+
sandbox_config=None, # Use defaults
|
|
460
|
+
enable_playwright=False # Disable for headless environments
|
|
461
|
+
)
|
|
462
|
+
return self._validator
|
|
463
|
+
|
|
464
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
465
|
+
"""
|
|
466
|
+
Validiert einen Exploit.
|
|
467
|
+
|
|
468
|
+
Parameters:
|
|
469
|
+
target: Ziel-URL oder Host
|
|
470
|
+
vulnerability: Schwachstellen-Typ (sqli, xss, rce, etc.)
|
|
471
|
+
exploit_code: Exploit-Code (optional)
|
|
472
|
+
exploit_type: Exploit-Typ Enum-Wert
|
|
473
|
+
parameters: Zusätzliche Parameter
|
|
474
|
+
"""
|
|
475
|
+
start_time = time.time()
|
|
476
|
+
|
|
477
|
+
try:
|
|
478
|
+
target = parameters.get('target')
|
|
479
|
+
vulnerability = parameters.get('vulnerability', 'unknown')
|
|
480
|
+
exploit_code = parameters.get('exploit_code', '')
|
|
481
|
+
exploit_type_str = parameters.get('exploit_type', 'web_rce')
|
|
482
|
+
extra_params = parameters.get('parameters', {})
|
|
483
|
+
|
|
484
|
+
self.logger.info(f"Validating {vulnerability} on {target}")
|
|
485
|
+
|
|
486
|
+
# If no exploit code provided, generate a basic test
|
|
487
|
+
if not exploit_code:
|
|
488
|
+
exploit_code = self._generate_test_payload(vulnerability)
|
|
489
|
+
|
|
490
|
+
# Get validator and execute
|
|
491
|
+
validator = await self._get_validator()
|
|
492
|
+
|
|
493
|
+
# Import ExploitType here
|
|
494
|
+
from .exploit_validator import ExploitType
|
|
495
|
+
|
|
496
|
+
# Map vulnerability to ExploitType
|
|
497
|
+
type_map = {
|
|
498
|
+
'sqli': ExploitType.WEB_SQLI,
|
|
499
|
+
'sql_injection': ExploitType.WEB_SQLI,
|
|
500
|
+
'xss': ExploitType.WEB_XSS,
|
|
501
|
+
'rce': ExploitType.WEB_RCE,
|
|
502
|
+
'lfi': ExploitType.WEB_LFI,
|
|
503
|
+
'rfi': ExploitType.WEB_RFI,
|
|
504
|
+
'command_injection': ExploitType.WEB_CMD_INJECTION,
|
|
505
|
+
'csrf': ExploitType.WEB_CSRF,
|
|
506
|
+
'ssrf': ExploitType.WEB_SSRF,
|
|
507
|
+
'xxe': ExploitType.WEB_XXE,
|
|
508
|
+
'path_traversal': ExploitType.WEB_PATH_TRAVERSAL,
|
|
509
|
+
'service': ExploitType.SERVICE,
|
|
510
|
+
'privesc': ExploitType.PRIVESC,
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
exploit_type = type_map.get(
|
|
514
|
+
exploit_type_str.lower().replace('-', '_'),
|
|
515
|
+
ExploitType.WEB_RCE
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
# Run validation
|
|
519
|
+
result = await validator.validate(
|
|
520
|
+
exploit_code=exploit_code,
|
|
521
|
+
target=target,
|
|
522
|
+
exploit_type=exploit_type,
|
|
523
|
+
parameters=extra_params,
|
|
524
|
+
timeout=self.timeout
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
execution_time = time.time() - start_time
|
|
528
|
+
|
|
529
|
+
# Convert to ToolResult format
|
|
530
|
+
validation_result = {
|
|
531
|
+
'vulnerability': vulnerability,
|
|
532
|
+
'target': target,
|
|
533
|
+
'exploitable': result.success,
|
|
534
|
+
'confidence': 0.9 if result.success else 0.1,
|
|
535
|
+
'evidence': result.evidence.to_dict() if result.evidence else {},
|
|
536
|
+
'output': result.output,
|
|
537
|
+
'error': result.error,
|
|
538
|
+
'risk_level': result.severity or 'unknown',
|
|
539
|
+
'remediation': result.remediation,
|
|
540
|
+
'validator_id': result.validator_id,
|
|
541
|
+
'execution_time': result.execution_time
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
return ToolResult(
|
|
545
|
+
tool_name=self.name,
|
|
546
|
+
success=result.success,
|
|
547
|
+
data=validation_result,
|
|
548
|
+
raw_output=result.to_json(),
|
|
549
|
+
execution_time=execution_time
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
except Exception as e:
|
|
553
|
+
execution_time = time.time() - start_time
|
|
554
|
+
self.logger.error(f"Exploit validation failed: {e}")
|
|
555
|
+
return ToolResult(
|
|
556
|
+
tool_name=self.name,
|
|
557
|
+
success=False,
|
|
558
|
+
error_message=str(e),
|
|
559
|
+
execution_time=execution_time
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
def _generate_test_payload(self, vulnerability: str) -> str:
|
|
563
|
+
"""Generate a basic test payload based on vulnerability type."""
|
|
564
|
+
payloads = {
|
|
565
|
+
'sqli': "' OR '1'='1",
|
|
566
|
+
'sql_injection': "' OR '1'='1",
|
|
567
|
+
'xss': "<script>alert('XSS')</script>",
|
|
568
|
+
'rce': "; echo 'RCE_TEST';",
|
|
569
|
+
'lfi': "../../../etc/passwd",
|
|
570
|
+
'command_injection': "; id;",
|
|
571
|
+
}
|
|
572
|
+
return payloads.get(vulnerability.lower(), "# No payload generated")
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
class ReportGenerator(BaseTool):
|
|
576
|
+
"""Generiert Penetration Testing Reports."""
|
|
577
|
+
|
|
578
|
+
def __init__(self, timeout: int = 120):
|
|
579
|
+
super().__init__("ReportGenerator", timeout)
|
|
580
|
+
|
|
581
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
582
|
+
"""Generiert einen Report."""
|
|
583
|
+
start_time = time.time()
|
|
584
|
+
|
|
585
|
+
try:
|
|
586
|
+
findings = parameters.get('findings', [])
|
|
587
|
+
target = parameters.get('target', 'unknown')
|
|
588
|
+
format_type = parameters.get('format', 'json')
|
|
589
|
+
|
|
590
|
+
self.logger.info(f"Generating report for {target}")
|
|
591
|
+
|
|
592
|
+
report = {
|
|
593
|
+
'title': f'Penetration Test Report - {target}',
|
|
594
|
+
'generated_at': datetime.now().isoformat(),
|
|
595
|
+
'target': target,
|
|
596
|
+
'summary': {
|
|
597
|
+
'total_findings': len(findings),
|
|
598
|
+
'critical': len([f for f in findings if f.get('severity') == 'critical']),
|
|
599
|
+
'high': len([f for f in findings if f.get('severity') == 'high']),
|
|
600
|
+
'medium': len([f for f in findings if f.get('severity') == 'medium']),
|
|
601
|
+
'low': len([f for f in findings if f.get('severity') == 'low'])
|
|
602
|
+
},
|
|
603
|
+
'findings': findings,
|
|
604
|
+
'recommendations': self._generate_recommendations(findings)
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
execution_time = time.time() - start_time
|
|
608
|
+
|
|
609
|
+
return ToolResult(
|
|
610
|
+
tool_name=self.name,
|
|
611
|
+
success=True,
|
|
612
|
+
data=report,
|
|
613
|
+
raw_output=json.dumps(report, indent=2),
|
|
614
|
+
execution_time=execution_time,
|
|
615
|
+
metadata={'format': format_type}
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
except Exception as e:
|
|
619
|
+
execution_time = time.time() - start_time
|
|
620
|
+
return ToolResult(
|
|
621
|
+
tool_name=self.name,
|
|
622
|
+
success=False,
|
|
623
|
+
error_message=str(e),
|
|
624
|
+
execution_time=execution_time
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
def _generate_recommendations(self, findings: List[Dict]) -> List[str]:
|
|
628
|
+
"""Generiert Empfehlungen basierend auf Findings."""
|
|
629
|
+
recommendations = []
|
|
630
|
+
|
|
631
|
+
severities = {f.get('severity') for f in findings}
|
|
632
|
+
|
|
633
|
+
if 'critical' in severities:
|
|
634
|
+
recommendations.append("Address critical vulnerabilities immediately")
|
|
635
|
+
if 'high' in severities:
|
|
636
|
+
recommendations.append("Prioritize high severity findings")
|
|
637
|
+
|
|
638
|
+
recommendations.append("Implement regular security scanning")
|
|
639
|
+
recommendations.append("Review and update security policies")
|
|
640
|
+
|
|
641
|
+
return recommendations
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
class SubdomainEnumerator(BaseTool):
|
|
645
|
+
"""Subdomain Enumeration Tool."""
|
|
646
|
+
|
|
647
|
+
def __init__(self, timeout: int = 300):
|
|
648
|
+
super().__init__("SubdomainEnumerator", timeout)
|
|
649
|
+
|
|
650
|
+
async def execute(self, parameters: Dict[str, Any]) -> ToolResult:
|
|
651
|
+
"""Führt Subdomain Enumeration aus."""
|
|
652
|
+
start_time = time.time()
|
|
653
|
+
|
|
654
|
+
try:
|
|
655
|
+
domain = parameters.get('target')
|
|
656
|
+
wordlist = parameters.get('wordlist', 'default')
|
|
657
|
+
recursive = parameters.get('recursive', False)
|
|
658
|
+
|
|
659
|
+
self.logger.info(f"Enumerating subdomains for {domain}")
|
|
660
|
+
await asyncio.sleep(0.4)
|
|
661
|
+
|
|
662
|
+
# Simulierte Subdomains
|
|
663
|
+
subdomains = [
|
|
664
|
+
f'www.{domain}',
|
|
665
|
+
f'mail.{domain}',
|
|
666
|
+
f'ftp.{domain}',
|
|
667
|
+
f'admin.{domain}',
|
|
668
|
+
f'blog.{domain}',
|
|
669
|
+
f'api.{domain}',
|
|
670
|
+
f'staging.{domain}',
|
|
671
|
+
f'dev.{domain}'
|
|
672
|
+
]
|
|
673
|
+
|
|
674
|
+
result_data = {
|
|
675
|
+
'domain': domain,
|
|
676
|
+
'subdomains': subdomains,
|
|
677
|
+
'count': len(subdomains),
|
|
678
|
+
'wordlist': wordlist,
|
|
679
|
+
'recursive': recursive
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
execution_time = time.time() - start_time
|
|
683
|
+
|
|
684
|
+
return ToolResult(
|
|
685
|
+
tool_name=self.name,
|
|
686
|
+
success=True,
|
|
687
|
+
data=result_data,
|
|
688
|
+
raw_output='\n'.join(subdomains),
|
|
689
|
+
execution_time=execution_time,
|
|
690
|
+
metadata={'enumeration_method': 'brute_force'}
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
except Exception as e:
|
|
694
|
+
execution_time = time.time() - start_time
|
|
695
|
+
return ToolResult(
|
|
696
|
+
tool_name=self.name,
|
|
697
|
+
success=False,
|
|
698
|
+
error_message=str(e),
|
|
699
|
+
execution_time=execution_time
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
class ToolRegistry:
|
|
704
|
+
"""Registry für alle verfügbaren Tools."""
|
|
705
|
+
|
|
706
|
+
def __init__(self):
|
|
707
|
+
self.tools: Dict[ToolType, BaseTool] = {
|
|
708
|
+
ToolType.NMAP_SCANNER: NmapScanner(),
|
|
709
|
+
ToolType.NUCLEI_SCANNER: NucleiScanner(),
|
|
710
|
+
ToolType.EXPLOIT_VALIDATOR: ExploitValidator(),
|
|
711
|
+
ToolType.REPORT_GENERATOR: ReportGenerator(),
|
|
712
|
+
ToolType.SUBDOMAIN_ENUMERATOR: SubdomainEnumerator()
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
def get_tool(self, tool_type: ToolType) -> Optional[BaseTool]:
|
|
716
|
+
"""Holt ein Tool nach Typ."""
|
|
717
|
+
return self.tools.get(tool_type)
|
|
718
|
+
|
|
719
|
+
def list_tools(self) -> List[str]:
|
|
720
|
+
"""Listet alle verfügbaren Tools."""
|
|
721
|
+
return [t.value for t in self.tools.keys()]
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
class AutonomousAgentLoop:
|
|
725
|
+
"""
|
|
726
|
+
Autonomous Agent Loop Engine mit ReAct Pattern.
|
|
727
|
+
|
|
728
|
+
Diese Klasse implementiert einen vollständigen ReAct (Reasoning + Acting)
|
|
729
|
+
Loop mit State Machine, Memory Management und Tool Integration.
|
|
730
|
+
|
|
731
|
+
Example:
|
|
732
|
+
agent = AutonomousAgentLoop(llm_client=my_llm)
|
|
733
|
+
result = await agent.run(
|
|
734
|
+
goal="Find vulnerabilities on target",
|
|
735
|
+
target="192.168.1.1",
|
|
736
|
+
scope={"depth": "comprehensive"}
|
|
737
|
+
)
|
|
738
|
+
"""
|
|
739
|
+
|
|
740
|
+
def __init__(
|
|
741
|
+
self,
|
|
742
|
+
llm_client: Optional[Any] = None,
|
|
743
|
+
max_iterations: int = 50,
|
|
744
|
+
retry_attempts: int = 3,
|
|
745
|
+
retry_delay: float = 2.0,
|
|
746
|
+
enable_progress_tracking: bool = True
|
|
747
|
+
):
|
|
748
|
+
"""
|
|
749
|
+
Initialisiert den Autonomous Agent Loop.
|
|
750
|
+
|
|
751
|
+
Args:
|
|
752
|
+
llm_client: LLM Client für Reasoning (optional)
|
|
753
|
+
max_iterations: Maximale Anzahl von Iterationen
|
|
754
|
+
retry_attempts: Anzahl der Retry-Versuche bei Fehlern
|
|
755
|
+
retry_delay: Verzögerung zwischen Retrys in Sekunden
|
|
756
|
+
enable_progress_tracking: Ob Progress Tracking aktiviert ist
|
|
757
|
+
"""
|
|
758
|
+
self.llm = llm_client
|
|
759
|
+
self.max_iterations = max_iterations
|
|
760
|
+
self.retry_attempts = retry_attempts
|
|
761
|
+
self.retry_delay = retry_delay
|
|
762
|
+
self.enable_progress_tracking = enable_progress_tracking
|
|
763
|
+
|
|
764
|
+
# State Machine
|
|
765
|
+
self.state = AgentState.IDLE
|
|
766
|
+
self.previous_state: Optional[AgentState] = None
|
|
767
|
+
|
|
768
|
+
# Memory
|
|
769
|
+
self.memory: Optional[AgentMemory] = None
|
|
770
|
+
|
|
771
|
+
# Tools
|
|
772
|
+
self.tool_registry = ToolRegistry()
|
|
773
|
+
|
|
774
|
+
# Progress Tracking
|
|
775
|
+
self.progress: Dict[str, Any] = {
|
|
776
|
+
'current_iteration': 0,
|
|
777
|
+
'total_iterations': max_iterations,
|
|
778
|
+
'completed_steps': 0,
|
|
779
|
+
'total_steps': 0,
|
|
780
|
+
'findings_count': 0,
|
|
781
|
+
'errors': []
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
# Callbacks
|
|
785
|
+
self.state_callbacks: Dict[AgentState, List[Callable]] = {
|
|
786
|
+
state: [] for state in AgentState
|
|
787
|
+
}
|
|
788
|
+
self.progress_callback: Optional[Callable[[Dict], None]] = None
|
|
789
|
+
|
|
790
|
+
# Execution tracking
|
|
791
|
+
self.start_time: Optional[float] = None
|
|
792
|
+
self.end_time: Optional[float] = None
|
|
793
|
+
|
|
794
|
+
self.logger = logging.getLogger(__name__)
|
|
795
|
+
|
|
796
|
+
def register_state_callback(self, state: AgentState, callback: Callable) -> None:
|
|
797
|
+
"""
|
|
798
|
+
Registriert einen Callback für einen Zustand.
|
|
799
|
+
|
|
800
|
+
Args:
|
|
801
|
+
state: Der Zustand für den Callback
|
|
802
|
+
callback: Funktion die aufgerufen wird
|
|
803
|
+
"""
|
|
804
|
+
self.state_callbacks[state].append(callback)
|
|
805
|
+
|
|
806
|
+
def set_progress_callback(self, callback: Callable[[Dict], None]) -> None:
|
|
807
|
+
"""
|
|
808
|
+
Setzt den Progress Callback.
|
|
809
|
+
|
|
810
|
+
Args:
|
|
811
|
+
callback: Funktion die mit Progress-Updates aufgerufen wird
|
|
812
|
+
"""
|
|
813
|
+
self.progress_callback = callback
|
|
814
|
+
|
|
815
|
+
def _transition_to(self, new_state: AgentState) -> None:
|
|
816
|
+
"""
|
|
817
|
+
Wechselt zu einem neuen Zustand und trigger Callbacks.
|
|
818
|
+
|
|
819
|
+
Args:
|
|
820
|
+
new_state: Der neue Zustand
|
|
821
|
+
"""
|
|
822
|
+
self.previous_state = self.state
|
|
823
|
+
self.state = new_state
|
|
824
|
+
|
|
825
|
+
self.logger.info(f"State transition: {self.previous_state.name} -> {new_state.name}")
|
|
826
|
+
|
|
827
|
+
# Trigger callbacks
|
|
828
|
+
for callback in self.state_callbacks.get(new_state, []):
|
|
829
|
+
try:
|
|
830
|
+
callback(new_state)
|
|
831
|
+
except Exception as e:
|
|
832
|
+
self.logger.error(f"State callback error: {e}")
|
|
833
|
+
|
|
834
|
+
def _update_progress(self, updates: Dict[str, Any]) -> None:
|
|
835
|
+
"""Aktualisiert den Progress und ruft Callback auf."""
|
|
836
|
+
self.progress.update(updates)
|
|
837
|
+
|
|
838
|
+
if self.enable_progress_tracking and self.progress_callback:
|
|
839
|
+
try:
|
|
840
|
+
self.progress_callback(self.progress.copy())
|
|
841
|
+
except Exception as e:
|
|
842
|
+
self.logger.error(f"Progress callback error: {e}")
|
|
843
|
+
|
|
844
|
+
async def run(
|
|
845
|
+
self,
|
|
846
|
+
goal: str,
|
|
847
|
+
target: str,
|
|
848
|
+
scope: Optional[Dict[str, Any]] = None
|
|
849
|
+
) -> Dict[str, Any]:
|
|
850
|
+
"""
|
|
851
|
+
Haupt-Einstiegspunkt für den Autonomous Agent Loop.
|
|
852
|
+
|
|
853
|
+
Führt den kompletten ReAct Loop aus:
|
|
854
|
+
1. PLANNING: Erstellt einen Plan basierend auf dem Ziel
|
|
855
|
+
2. EXECUTING: Führt Tools aus
|
|
856
|
+
3. OBSERVING: Analysiert Ergebnisse
|
|
857
|
+
4. REFLECTING: Evaluiert Fortschritt und passt Plan an
|
|
858
|
+
|
|
859
|
+
Args:
|
|
860
|
+
goal: Das zu erreichende Ziel (z.B. "Find all open ports")
|
|
861
|
+
target: Das Ziel-System (IP, Domain, URL)
|
|
862
|
+
scope: Optionale Scope-Beschränkungen
|
|
863
|
+
|
|
864
|
+
Returns:
|
|
865
|
+
Dict mit execution_result, findings, statistics und metadata
|
|
866
|
+
"""
|
|
867
|
+
self.start_time = time.time()
|
|
868
|
+
self._transition_to(AgentState.PLANNING)
|
|
869
|
+
|
|
870
|
+
# Initialisiere Memory
|
|
871
|
+
self.memory = AgentMemory(
|
|
872
|
+
goal=goal,
|
|
873
|
+
target=target,
|
|
874
|
+
scope=scope or {}
|
|
875
|
+
)
|
|
876
|
+
|
|
877
|
+
self.logger.info(f"Starting autonomous execution: {goal} on {target}")
|
|
878
|
+
|
|
879
|
+
try:
|
|
880
|
+
# PLANNING Phase
|
|
881
|
+
plan = await self.plan()
|
|
882
|
+
self.memory.current_plan = [step.to_dict() for step in plan]
|
|
883
|
+
self.progress['total_steps'] = len(plan)
|
|
884
|
+
|
|
885
|
+
# Haupt-Loop
|
|
886
|
+
iteration = 0
|
|
887
|
+
while iteration < self.max_iterations:
|
|
888
|
+
iteration += 1
|
|
889
|
+
self.progress['current_iteration'] = iteration
|
|
890
|
+
|
|
891
|
+
# Prüfe ob alle Schritte abgeschlossen
|
|
892
|
+
if self.memory.plan_step >= len(plan):
|
|
893
|
+
self.logger.info("All plan steps completed")
|
|
894
|
+
break
|
|
895
|
+
|
|
896
|
+
current_step = plan[self.memory.plan_step]
|
|
897
|
+
|
|
898
|
+
# EXECUTING Phase
|
|
899
|
+
self._transition_to(AgentState.EXECUTING)
|
|
900
|
+
result = await self._execute_with_retry(current_step)
|
|
901
|
+
|
|
902
|
+
# OBSERVING Phase
|
|
903
|
+
self._transition_to(AgentState.OBSERVING)
|
|
904
|
+
await self.observe(result) # Observation stored in memory
|
|
905
|
+
|
|
906
|
+
# Speichere Ergebnis
|
|
907
|
+
current_step.result = result
|
|
908
|
+
current_step.completed = True
|
|
909
|
+
self.memory.plan_step += 1
|
|
910
|
+
self.progress['completed_steps'] = self.memory.plan_step
|
|
911
|
+
|
|
912
|
+
# Extrahiere Findings
|
|
913
|
+
if result.success and result.data:
|
|
914
|
+
await self._extract_findings(result)
|
|
915
|
+
|
|
916
|
+
# REFLECTING Phase
|
|
917
|
+
self._transition_to(AgentState.REFLECTING)
|
|
918
|
+
should_continue = await self.reflect()
|
|
919
|
+
|
|
920
|
+
if not should_continue:
|
|
921
|
+
self.logger.info("Reflection indicated completion")
|
|
922
|
+
break
|
|
923
|
+
|
|
924
|
+
# Update Context Window
|
|
925
|
+
self.memory.add_to_context_window({
|
|
926
|
+
'type': 'execution',
|
|
927
|
+
'step': current_step.action,
|
|
928
|
+
'result': result.success,
|
|
929
|
+
'findings': len(self.memory.findings)
|
|
930
|
+
})
|
|
931
|
+
|
|
932
|
+
self._transition_to(AgentState.COMPLETED)
|
|
933
|
+
return await self._compile_final_result()
|
|
934
|
+
|
|
935
|
+
except Exception as e:
|
|
936
|
+
self.logger.error(f"Execution failed: {str(e)}")
|
|
937
|
+
self._transition_to(AgentState.ERROR)
|
|
938
|
+
self.progress['errors'].append({
|
|
939
|
+
'timestamp': datetime.now().isoformat(),
|
|
940
|
+
'error': str(e),
|
|
941
|
+
'traceback': traceback.format_exc()
|
|
942
|
+
})
|
|
943
|
+
return self._compile_error_result(e)
|
|
944
|
+
|
|
945
|
+
finally:
|
|
946
|
+
self.end_time = time.time()
|
|
947
|
+
|
|
948
|
+
async def plan(self) -> List[PlanStep]:
|
|
949
|
+
"""
|
|
950
|
+
Erstellt einen Aktionsplan basierend auf dem Ziel.
|
|
951
|
+
|
|
952
|
+
Analysiert das Ziel und erstellt eine Sequenz von PlanSteps
|
|
953
|
+
mit den passenden Tools.
|
|
954
|
+
|
|
955
|
+
Returns:
|
|
956
|
+
Liste von PlanSteps zur Zielerreichung
|
|
957
|
+
"""
|
|
958
|
+
self.logger.info("Planning phase started")
|
|
959
|
+
|
|
960
|
+
goal_lower = self.memory.goal.lower() if self.memory else ""
|
|
961
|
+
target = self.memory.target if self.memory else ""
|
|
962
|
+
|
|
963
|
+
plan: List[PlanStep] = []
|
|
964
|
+
|
|
965
|
+
# Entscheidungslogik basierend auf Ziel
|
|
966
|
+
if 'port' in goal_lower or 'service' in goal_lower:
|
|
967
|
+
plan.append(PlanStep(
|
|
968
|
+
tool_type=ToolType.NMAP_SCANNER,
|
|
969
|
+
action=f"Scan open ports on {target}",
|
|
970
|
+
parameters={'target': target, 'ports': '1-1000'}
|
|
971
|
+
))
|
|
972
|
+
|
|
973
|
+
if 'subdomain' in goal_lower or 'enumerate' in goal_lower:
|
|
974
|
+
plan.append(PlanStep(
|
|
975
|
+
tool_type=ToolType.SUBDOMAIN_ENUMERATOR,
|
|
976
|
+
action=f"Enumerate subdomains of {target}",
|
|
977
|
+
parameters={'target': target}
|
|
978
|
+
))
|
|
979
|
+
|
|
980
|
+
if 'vulnerability' in goal_lower or 'scan' in goal_lower:
|
|
981
|
+
plan.append(PlanStep(
|
|
982
|
+
tool_type=ToolType.NUCLEI_SCANNER,
|
|
983
|
+
action=f"Scan {target} for vulnerabilities",
|
|
984
|
+
parameters={'target': target}
|
|
985
|
+
))
|
|
986
|
+
|
|
987
|
+
if 'exploit' in goal_lower:
|
|
988
|
+
plan.append(PlanStep(
|
|
989
|
+
tool_type=ToolType.EXPLOIT_VALIDATOR,
|
|
990
|
+
action=f"Validate exploits on {target}",
|
|
991
|
+
parameters={'target': target}
|
|
992
|
+
))
|
|
993
|
+
|
|
994
|
+
# Immer einen Report generieren
|
|
995
|
+
plan.append(PlanStep(
|
|
996
|
+
tool_type=ToolType.REPORT_GENERATOR,
|
|
997
|
+
action="Generate final report",
|
|
998
|
+
parameters={'target': target}
|
|
999
|
+
))
|
|
1000
|
+
|
|
1001
|
+
# Falls kein spezifischer Plan erstellt wurde, Standard-Plan
|
|
1002
|
+
if not plan:
|
|
1003
|
+
plan = [
|
|
1004
|
+
PlanStep(
|
|
1005
|
+
tool_type=ToolType.NMAP_SCANNER,
|
|
1006
|
+
action=f"Initial reconnaissance of {target}",
|
|
1007
|
+
parameters={'target': target}
|
|
1008
|
+
),
|
|
1009
|
+
PlanStep(
|
|
1010
|
+
tool_type=ToolType.NUCLEI_SCANNER,
|
|
1011
|
+
action=f"Vulnerability scan of {target}",
|
|
1012
|
+
parameters={'target': target}
|
|
1013
|
+
),
|
|
1014
|
+
PlanStep(
|
|
1015
|
+
tool_type=ToolType.REPORT_GENERATOR,
|
|
1016
|
+
action="Generate findings report",
|
|
1017
|
+
parameters={'target': target}
|
|
1018
|
+
)
|
|
1019
|
+
]
|
|
1020
|
+
|
|
1021
|
+
self.logger.info(f"Plan created with {len(plan)} steps")
|
|
1022
|
+
|
|
1023
|
+
# Speichere Plan im Memory
|
|
1024
|
+
for step in plan:
|
|
1025
|
+
self.memory.add_to_short_term({
|
|
1026
|
+
'type': 'plan_step',
|
|
1027
|
+
'content': step.action,
|
|
1028
|
+
'tool': step.tool_type.value
|
|
1029
|
+
})
|
|
1030
|
+
|
|
1031
|
+
return plan
|
|
1032
|
+
|
|
1033
|
+
async def execute_action(self, action: Dict[str, Any]) -> ToolResult:
|
|
1034
|
+
"""
|
|
1035
|
+
Führt eine einzelne Aktion aus.
|
|
1036
|
+
|
|
1037
|
+
Args:
|
|
1038
|
+
action: Dictionary mit tool_type und parameters
|
|
1039
|
+
|
|
1040
|
+
Returns:
|
|
1041
|
+
ToolResult mit dem Ausführungsergebnis
|
|
1042
|
+
"""
|
|
1043
|
+
tool_type_str = action.get('tool_type', 'nmap_scanner')
|
|
1044
|
+
parameters = action.get('parameters', {})
|
|
1045
|
+
|
|
1046
|
+
# Konvertiere String zu Enum
|
|
1047
|
+
try:
|
|
1048
|
+
tool_type = ToolType(tool_type_str)
|
|
1049
|
+
except ValueError:
|
|
1050
|
+
return ToolResult(
|
|
1051
|
+
tool_name=tool_type_str,
|
|
1052
|
+
success=False,
|
|
1053
|
+
error_message=f"Unknown tool type: {tool_type_str}"
|
|
1054
|
+
)
|
|
1055
|
+
|
|
1056
|
+
tool = self.tool_registry.get_tool(tool_type)
|
|
1057
|
+
if not tool:
|
|
1058
|
+
return ToolResult(
|
|
1059
|
+
tool_name=tool_type_str,
|
|
1060
|
+
success=False,
|
|
1061
|
+
error_message=f"Tool not found: {tool_type_str}"
|
|
1062
|
+
)
|
|
1063
|
+
|
|
1064
|
+
# Validiere Parameter
|
|
1065
|
+
valid, error = tool.validate_parameters(parameters)
|
|
1066
|
+
if not valid:
|
|
1067
|
+
return ToolResult(
|
|
1068
|
+
tool_name=tool.name,
|
|
1069
|
+
success=False,
|
|
1070
|
+
error_message=f"Parameter validation failed: {error}"
|
|
1071
|
+
)
|
|
1072
|
+
|
|
1073
|
+
# Führe Tool aus
|
|
1074
|
+
self.logger.info(f"Executing {tool.name} with params: {parameters}")
|
|
1075
|
+
return await tool.execute(parameters)
|
|
1076
|
+
|
|
1077
|
+
async def _execute_with_retry(self, step: PlanStep) -> ToolResult:
|
|
1078
|
+
"""
|
|
1079
|
+
Führt einen Plan-Schritt mit Retry-Logik aus.
|
|
1080
|
+
|
|
1081
|
+
Args:
|
|
1082
|
+
step: Der auszuführende PlanStep
|
|
1083
|
+
|
|
1084
|
+
Returns:
|
|
1085
|
+
ToolResult mit dem Ergebnis
|
|
1086
|
+
"""
|
|
1087
|
+
last_error = None
|
|
1088
|
+
|
|
1089
|
+
for attempt in range(1, self.retry_attempts + 1):
|
|
1090
|
+
try:
|
|
1091
|
+
self.logger.info(f"Executing step '{step.action}' (attempt {attempt}/{self.retry_attempts})")
|
|
1092
|
+
|
|
1093
|
+
tool = self.tool_registry.get_tool(step.tool_type)
|
|
1094
|
+
if not tool:
|
|
1095
|
+
raise ValueError(f"Tool {step.tool_type.value} not found")
|
|
1096
|
+
|
|
1097
|
+
result = await tool.execute(step.parameters)
|
|
1098
|
+
|
|
1099
|
+
if result.success:
|
|
1100
|
+
return result
|
|
1101
|
+
|
|
1102
|
+
# Tool lieferte Fehler, versuche Retry
|
|
1103
|
+
last_error = result.error_message
|
|
1104
|
+
self.logger.warning(f"Attempt {attempt} failed: {last_error}")
|
|
1105
|
+
|
|
1106
|
+
if attempt < self.retry_attempts:
|
|
1107
|
+
await asyncio.sleep(self.retry_delay * attempt)
|
|
1108
|
+
|
|
1109
|
+
except Exception as e:
|
|
1110
|
+
last_error = str(e)
|
|
1111
|
+
self.logger.error(f"Exception on attempt {attempt}: {e}")
|
|
1112
|
+
|
|
1113
|
+
if attempt < self.retry_attempts:
|
|
1114
|
+
await asyncio.sleep(self.retry_delay * attempt)
|
|
1115
|
+
|
|
1116
|
+
# Alle Versuche fehlgeschlagen
|
|
1117
|
+
return ToolResult(
|
|
1118
|
+
tool_name=step.tool_type.value,
|
|
1119
|
+
success=False,
|
|
1120
|
+
error_message=f"All {self.retry_attempts} attempts failed. Last error: {last_error}"
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
async def observe(self, result: ToolResult) -> Dict[str, Any]:
|
|
1124
|
+
"""
|
|
1125
|
+
Analysiert das Ergebnis einer Aktion.
|
|
1126
|
+
|
|
1127
|
+
Args:
|
|
1128
|
+
result: Das ToolResult zur Analyse
|
|
1129
|
+
|
|
1130
|
+
Returns:
|
|
1131
|
+
Dictionary mit Analyse-Ergebnissen
|
|
1132
|
+
"""
|
|
1133
|
+
self.logger.info(f"Observing result from {result.tool_name}")
|
|
1134
|
+
|
|
1135
|
+
observation = {
|
|
1136
|
+
'tool': result.tool_name,
|
|
1137
|
+
'success': result.success,
|
|
1138
|
+
'execution_time': result.execution_time,
|
|
1139
|
+
'timestamp': result.timestamp.isoformat(),
|
|
1140
|
+
'findings_extracted': 0
|
|
1141
|
+
}
|
|
1142
|
+
|
|
1143
|
+
if result.success:
|
|
1144
|
+
# Analysiere Daten
|
|
1145
|
+
data = result.data or {}
|
|
1146
|
+
|
|
1147
|
+
if 'open_ports' in data:
|
|
1148
|
+
observation['open_ports'] = len(data['open_ports'])
|
|
1149
|
+
if 'findings' in data:
|
|
1150
|
+
observation['vulnerabilities'] = len(data['findings'])
|
|
1151
|
+
if 'subdomains' in data:
|
|
1152
|
+
observation['subdomains'] = len(data['subdomains'])
|
|
1153
|
+
|
|
1154
|
+
# Speichere Observation
|
|
1155
|
+
self.memory.add_to_short_term({
|
|
1156
|
+
'type': 'observation',
|
|
1157
|
+
'content': f"{result.tool_name} completed successfully",
|
|
1158
|
+
'data_keys': list(data.keys())
|
|
1159
|
+
})
|
|
1160
|
+
else:
|
|
1161
|
+
observation['error'] = result.error_message
|
|
1162
|
+
|
|
1163
|
+
self.memory.add_to_short_term({
|
|
1164
|
+
'type': 'observation',
|
|
1165
|
+
'content': f"{result.tool_name} failed: {result.error_message}",
|
|
1166
|
+
'error': True
|
|
1167
|
+
})
|
|
1168
|
+
|
|
1169
|
+
return observation
|
|
1170
|
+
|
|
1171
|
+
async def reflect(self) -> bool:
|
|
1172
|
+
"""
|
|
1173
|
+
Evaluiert ob das Ziel erreicht wurde oder weitere Aktionen nötig sind.
|
|
1174
|
+
|
|
1175
|
+
Returns:
|
|
1176
|
+
True wenn weitere Aktionen nötig sind, False wenn beendet
|
|
1177
|
+
"""
|
|
1178
|
+
self.logger.info("Reflection phase")
|
|
1179
|
+
|
|
1180
|
+
# Prüfe ob kritische Fehler aufgetreten sind
|
|
1181
|
+
recent_errors = [
|
|
1182
|
+
e for e in self.progress['errors']
|
|
1183
|
+
if (datetime.now() - datetime.fromisoformat(e['timestamp'])).seconds < 60
|
|
1184
|
+
]
|
|
1185
|
+
|
|
1186
|
+
if len(recent_errors) > 5:
|
|
1187
|
+
self.logger.error("Too many recent errors, stopping")
|
|
1188
|
+
return False
|
|
1189
|
+
|
|
1190
|
+
# Prüfe ob Ziel erreicht (basierend auf Findings)
|
|
1191
|
+
critical_findings = [f for f in self.memory.findings if f.get('severity') == 'critical']
|
|
1192
|
+
|
|
1193
|
+
# Wenn kritische Findings gefunden und Report generiert, beenden
|
|
1194
|
+
if critical_findings and self.memory.plan_step >= len(self.memory.current_plan) - 1:
|
|
1195
|
+
self.logger.info("Critical findings detected and report ready")
|
|
1196
|
+
# Könnte hier entscheiden zu beenden oder weiterzumachen
|
|
1197
|
+
|
|
1198
|
+
# Prüfe ob maximale Iterationen erreicht
|
|
1199
|
+
if self.progress['current_iteration'] >= self.max_iterations:
|
|
1200
|
+
self.logger.info("Max iterations reached")
|
|
1201
|
+
return False
|
|
1202
|
+
|
|
1203
|
+
# Reflexion: Update Context
|
|
1204
|
+
self.memory.add_to_context_window({
|
|
1205
|
+
'type': 'reflection',
|
|
1206
|
+
'content': f"Iteration {self.progress['current_iteration']} completed",
|
|
1207
|
+
'findings_count': len(self.memory.findings),
|
|
1208
|
+
'progress': f"{self.memory.plan_step}/{len(self.memory.current_plan)}"
|
|
1209
|
+
})
|
|
1210
|
+
|
|
1211
|
+
return True
|
|
1212
|
+
|
|
1213
|
+
async def _extract_findings(self, result: ToolResult) -> None:
|
|
1214
|
+
"""Extrahiert Security Findings aus Tool-Ergebnissen."""
|
|
1215
|
+
data = result.data or {}
|
|
1216
|
+
|
|
1217
|
+
# Nmap Findings
|
|
1218
|
+
if 'open_ports' in data:
|
|
1219
|
+
for port_info in data['open_ports']:
|
|
1220
|
+
self.memory.add_finding({
|
|
1221
|
+
'type': 'open_port',
|
|
1222
|
+
'severity': 'info',
|
|
1223
|
+
'source': result.tool_name,
|
|
1224
|
+
'details': port_info
|
|
1225
|
+
})
|
|
1226
|
+
|
|
1227
|
+
# Nuclei Findings
|
|
1228
|
+
if 'findings' in data:
|
|
1229
|
+
for vuln in data['findings']:
|
|
1230
|
+
self.memory.add_finding({
|
|
1231
|
+
'type': 'vulnerability',
|
|
1232
|
+
'severity': vuln.get('severity', 'unknown'),
|
|
1233
|
+
'source': result.tool_name,
|
|
1234
|
+
'details': vuln
|
|
1235
|
+
})
|
|
1236
|
+
|
|
1237
|
+
# Subdomain Findings
|
|
1238
|
+
if 'subdomains' in data:
|
|
1239
|
+
for subdomain in data['subdomains']:
|
|
1240
|
+
self.memory.add_finding({
|
|
1241
|
+
'type': 'subdomain',
|
|
1242
|
+
'severity': 'info',
|
|
1243
|
+
'source': result.tool_name,
|
|
1244
|
+
'details': subdomain
|
|
1245
|
+
})
|
|
1246
|
+
|
|
1247
|
+
self.progress['findings_count'] = len(self.memory.findings)
|
|
1248
|
+
|
|
1249
|
+
async def _compile_final_result(self) -> Dict[str, Any]:
|
|
1250
|
+
"""Kompiliert das finale Ergebnis."""
|
|
1251
|
+
execution_time = self.end_time - self.start_time if self.start_time and self.end_time else 0
|
|
1252
|
+
|
|
1253
|
+
# Generiere finalen Report
|
|
1254
|
+
report_tool = self.tool_registry.get_tool(ToolType.REPORT_GENERATOR)
|
|
1255
|
+
report_result = await report_tool.execute({
|
|
1256
|
+
'target': self.memory.target,
|
|
1257
|
+
'findings': self.memory.findings,
|
|
1258
|
+
'format': 'json'
|
|
1259
|
+
})
|
|
1260
|
+
|
|
1261
|
+
return {
|
|
1262
|
+
'success': True,
|
|
1263
|
+
'state': self.state.name,
|
|
1264
|
+
'execution': {
|
|
1265
|
+
'goal': self.memory.goal if self.memory else "",
|
|
1266
|
+
'target': self.memory.target if self.memory else "",
|
|
1267
|
+
'duration_seconds': round(execution_time, 2),
|
|
1268
|
+
'iterations': self.progress['current_iteration'],
|
|
1269
|
+
'steps_completed': self.progress['completed_steps'],
|
|
1270
|
+
'total_steps': self.progress['total_steps']
|
|
1271
|
+
},
|
|
1272
|
+
'findings': {
|
|
1273
|
+
'count': len(self.memory.findings) if self.memory else 0,
|
|
1274
|
+
'items': self.memory.findings if self.memory else []
|
|
1275
|
+
},
|
|
1276
|
+
'report': report_result.data if report_result.success else None,
|
|
1277
|
+
'memory': self.memory.to_dict() if self.memory else {},
|
|
1278
|
+
'progress': self.progress,
|
|
1279
|
+
'timestamp': datetime.now().isoformat()
|
|
1280
|
+
}
|
|
1281
|
+
|
|
1282
|
+
def _compile_error_result(self, error: Exception) -> Dict[str, Any]:
|
|
1283
|
+
"""Kompiliert ein Fehler-Ergebnis."""
|
|
1284
|
+
return {
|
|
1285
|
+
'success': False,
|
|
1286
|
+
'state': self.state.name,
|
|
1287
|
+
'error': {
|
|
1288
|
+
'message': str(error),
|
|
1289
|
+
'type': type(error).__name__,
|
|
1290
|
+
'traceback': traceback.format_exc()
|
|
1291
|
+
},
|
|
1292
|
+
'progress': self.progress,
|
|
1293
|
+
'timestamp': datetime.now().isoformat()
|
|
1294
|
+
}
|
|
1295
|
+
|
|
1296
|
+
def get_state(self) -> AgentState:
|
|
1297
|
+
"""Gibt den aktuellen Zustand zurück."""
|
|
1298
|
+
return self.state
|
|
1299
|
+
|
|
1300
|
+
def get_progress(self) -> Dict[str, Any]:
|
|
1301
|
+
"""Gibt den aktuellen Progress zurück."""
|
|
1302
|
+
return self.progress.copy()
|
|
1303
|
+
|
|
1304
|
+
def is_running(self) -> bool:
|
|
1305
|
+
"""Prüft ob der Agent läuft."""
|
|
1306
|
+
return self.state in [
|
|
1307
|
+
AgentState.PLANNING,
|
|
1308
|
+
AgentState.EXECUTING,
|
|
1309
|
+
AgentState.OBSERVING,
|
|
1310
|
+
AgentState.REFLECTING
|
|
1311
|
+
]
|
|
1312
|
+
|
|
1313
|
+
def pause(self) -> None:
|
|
1314
|
+
"""Pausiert die Ausführung (Human-in-the-loop)."""
|
|
1315
|
+
if self.is_running():
|
|
1316
|
+
self._transition_to(AgentState.PAUSED)
|
|
1317
|
+
|
|
1318
|
+
def resume(self) -> None:
|
|
1319
|
+
"""Setzt die Ausführung fort."""
|
|
1320
|
+
if self.state == AgentState.PAUSED and self.previous_state:
|
|
1321
|
+
self._transition_to(self.previous_state)
|
|
1322
|
+
|
|
1323
|
+
|
|
1324
|
+
# Factory Function
|
|
1325
|
+
def create_agent_loop(
|
|
1326
|
+
llm_client: Optional[Any] = None,
|
|
1327
|
+
max_iterations: int = 50,
|
|
1328
|
+
retry_attempts: int = 3
|
|
1329
|
+
) -> AutonomousAgentLoop:
|
|
1330
|
+
"""
|
|
1331
|
+
Factory-Funktion zum Erstellen eines AutonomousAgentLoop.
|
|
1332
|
+
|
|
1333
|
+
Args:
|
|
1334
|
+
llm_client: Optionaler LLM Client
|
|
1335
|
+
max_iterations: Maximale Iterationen
|
|
1336
|
+
retry_attempts: Anzahl der Retry-Versuche
|
|
1337
|
+
|
|
1338
|
+
Returns:
|
|
1339
|
+
Konfigurierte AutonomousAgentLoop Instanz
|
|
1340
|
+
"""
|
|
1341
|
+
return AutonomousAgentLoop(
|
|
1342
|
+
llm_client=llm_client,
|
|
1343
|
+
max_iterations=max_iterations,
|
|
1344
|
+
retry_attempts=retry_attempts
|
|
1345
|
+
)
|
|
1346
|
+
|
|
1347
|
+
|
|
1348
|
+
# Example usage
|
|
1349
|
+
if __name__ == "__main__":
|
|
1350
|
+
async def main():
|
|
1351
|
+
# Erstelle Agent
|
|
1352
|
+
agent = create_agent_loop(max_iterations=10)
|
|
1353
|
+
|
|
1354
|
+
# Definiere Progress Callback
|
|
1355
|
+
def on_progress(progress):
|
|
1356
|
+
print(f"Progress: {progress['completed_steps']}/{progress['total_steps']} steps")
|
|
1357
|
+
|
|
1358
|
+
agent.set_progress_callback(on_progress)
|
|
1359
|
+
|
|
1360
|
+
# Führe Scan aus
|
|
1361
|
+
result = await agent.run(
|
|
1362
|
+
goal="Find vulnerabilities and open ports",
|
|
1363
|
+
target="example.com",
|
|
1364
|
+
scope={"depth": "standard"}
|
|
1365
|
+
)
|
|
1366
|
+
|
|
1367
|
+
print("\n=== Execution Result ===")
|
|
1368
|
+
print(json.dumps(result, indent=2, default=str))
|
|
1369
|
+
|
|
1370
|
+
asyncio.run(main())
|