zen-ai-pentest 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. agents/__init__.py +28 -0
  2. agents/agent_base.py +239 -0
  3. agents/agent_orchestrator.py +346 -0
  4. agents/analysis_agent.py +225 -0
  5. agents/cli.py +258 -0
  6. agents/exploit_agent.py +224 -0
  7. agents/integration.py +211 -0
  8. agents/post_scan_agent.py +937 -0
  9. agents/react_agent.py +384 -0
  10. agents/react_agent_enhanced.py +616 -0
  11. agents/react_agent_vm.py +298 -0
  12. agents/research_agent.py +176 -0
  13. api/__init__.py +11 -0
  14. api/auth.py +123 -0
  15. api/main.py +1027 -0
  16. api/schemas.py +357 -0
  17. api/websocket.py +97 -0
  18. autonomous/__init__.py +122 -0
  19. autonomous/agent.py +253 -0
  20. autonomous/agent_loop.py +1370 -0
  21. autonomous/exploit_validator.py +1537 -0
  22. autonomous/memory.py +448 -0
  23. autonomous/react.py +339 -0
  24. autonomous/tool_executor.py +488 -0
  25. backends/__init__.py +16 -0
  26. backends/chatgpt_direct.py +133 -0
  27. backends/claude_direct.py +130 -0
  28. backends/duckduckgo.py +138 -0
  29. backends/openrouter.py +120 -0
  30. benchmarks/__init__.py +149 -0
  31. benchmarks/benchmark_engine.py +904 -0
  32. benchmarks/ci_benchmark.py +785 -0
  33. benchmarks/comparison.py +729 -0
  34. benchmarks/metrics.py +553 -0
  35. benchmarks/run_benchmarks.py +809 -0
  36. ci_cd/__init__.py +2 -0
  37. core/__init__.py +17 -0
  38. core/async_pool.py +282 -0
  39. core/asyncio_fix.py +222 -0
  40. core/cache.py +472 -0
  41. core/container.py +277 -0
  42. core/database.py +114 -0
  43. core/input_validator.py +353 -0
  44. core/models.py +288 -0
  45. core/orchestrator.py +611 -0
  46. core/plugin_manager.py +571 -0
  47. core/rate_limiter.py +405 -0
  48. core/secure_config.py +328 -0
  49. core/shield_integration.py +296 -0
  50. modules/__init__.py +46 -0
  51. modules/cve_database.py +362 -0
  52. modules/exploit_assist.py +330 -0
  53. modules/nuclei_integration.py +480 -0
  54. modules/osint.py +604 -0
  55. modules/protonvpn.py +554 -0
  56. modules/recon.py +165 -0
  57. modules/sql_injection_db.py +826 -0
  58. modules/tool_orchestrator.py +498 -0
  59. modules/vuln_scanner.py +292 -0
  60. modules/wordlist_generator.py +566 -0
  61. risk_engine/__init__.py +99 -0
  62. risk_engine/business_impact.py +267 -0
  63. risk_engine/business_impact_calculator.py +563 -0
  64. risk_engine/cvss.py +156 -0
  65. risk_engine/epss.py +190 -0
  66. risk_engine/example_usage.py +294 -0
  67. risk_engine/false_positive_engine.py +1073 -0
  68. risk_engine/scorer.py +304 -0
  69. web_ui/backend/main.py +471 -0
  70. zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
  71. zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
  72. zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
  73. zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
  74. zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
  75. zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
@@ -0,0 +1,616 @@
1
+ """
2
+ Enhanced ReAct Agent Loop mit Plan-and-Execute + Reflection
3
+ Issue #18: [2026-Q1] ReAct / Plan-and-Execute Reasoning Loop
4
+
5
+ Verbessert den bestehenden ReAct Agent um:
6
+ 1. Plan Phase - Explizite Planung vor Ausführung
7
+ 2. Reflection Phase - Analyse der Ergebnisse
8
+ 3. Memory Integration - Langfristiges Lernen
9
+ 4. Better Error Recovery - Robuste Fehlerbehandlung
10
+ """
11
+
12
+ from typing import List, TypedDict, Annotated, Literal, Dict, Optional, Any
13
+ from dataclasses import dataclass, field
14
+ from datetime import datetime
15
+ import json
16
+ import logging
17
+
18
+ from langchain_core.tools import tool, BaseTool
19
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage, SystemMessage
20
+ from langchain_core.prompts import ChatPromptTemplate
21
+ from langchain_core.runnables import RunnableConfig
22
+ from langgraph.graph import StateGraph, START, END, add_messages
23
+ from langgraph.checkpoint.memory import MemorySaver
24
+
25
+ from ..core.llm_backend import LLMBackend
26
+ from ..tools.nmap_integration import NmapTool
27
+ from ..tools.nuclei_integration import NucleiTool
28
+ from ..tools.ffuf_integration import FfufTool
29
+ from ..database.cve_database import CVEDatabase
30
+ from ..tools.tool_registry import ToolRegistry, ToolCategory, ToolSafetyLevel
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ class PlanStep(TypedDict):
36
+ """Einzelner Schritt im Plan"""
37
+ step_number: int
38
+ action: str
39
+ tool: Optional[str]
40
+ expected_outcome: str
41
+ completed: bool
42
+ result: Optional[str]
43
+
44
+
45
+ class AgentStateEnhanced(TypedDict):
46
+ """Erweiterter State für den ReAct Loop"""
47
+ messages: Annotated[List[BaseMessage], add_messages]
48
+ findings: List[dict]
49
+ target: str
50
+ objective: str
51
+ iteration: int
52
+ max_iterations: int
53
+ status: Literal["planning", "executing", "observing", "reflecting", "completed", "error"]
54
+
55
+ # Neue Felder für Plan-and-Execute
56
+ plan: List[PlanStep]
57
+ current_step_index: int
58
+ reflections: List[dict]
59
+ memory_context: Dict[str, Any] # Langfristiges Gedächtnis
60
+
61
+ # Fehlerbehandlung
62
+ error_count: int
63
+ max_errors: int
64
+ last_error: Optional[str]
65
+
66
+
67
+ @dataclass
68
+ class ReActAgentConfigEnhanced:
69
+ """Erweiterte Konfiguration"""
70
+ max_iterations: int = 10
71
+ max_plan_steps: int = 5
72
+ enable_sandbox: bool = True
73
+ auto_approve_dangerous: bool = False
74
+ use_human_in_the_loop: bool = True
75
+ llm_model: str = "gpt-4o"
76
+ enable_reflection: bool = True
77
+ enable_planning: bool = True
78
+ memory_enabled: bool = True
79
+
80
+
81
+ class ReActAgentEnhanced:
82
+ """
83
+ Enhanced ReAct Agent mit Plan-and-Execute + Reflection
84
+
85
+ Phase 1: PLAN - LLM erstellt einen strukturierten Plan
86
+ Phase 2: EXECUTE - Führt Plan-Schritte aus (Tools)
87
+ Phase 3: OBSERVE - Sammelt Ergebnisse
88
+ Phase 4: REFLECT - Bewertet Fortschritt und passt Plan an
89
+ Phase 5: LOOP oder END
90
+ """
91
+
92
+ def __init__(self, config: ReActAgentConfigEnhanced = None):
93
+ self.config = config or ReActAgentConfigEnhanced()
94
+ self.llm = LLMBackend(model=self.config.llm_model)
95
+ self.cve_db = CVEDatabase()
96
+ self.registry = ToolRegistry()
97
+
98
+ # Tools in Registry laden
99
+ self._initialize_tools()
100
+
101
+ # Hole Tools aus Registry
102
+ self.tools = self.registry.get_all_tools()
103
+ self.tools_by_name = {t.name: t for t in self.tools}
104
+
105
+ # LangGraph Workflow
106
+ self.graph = self._build_graph()
107
+
108
+ # Memory für langfristiges Lernen
109
+ self.session_memory: Dict[str, Any] = {}
110
+
111
+ logger.info(f"Enhanced ReActAgent initialisiert mit {len(self.tools)} Tools aus Registry")
112
+
113
+ def _initialize_tools(self):
114
+ """Initialisiert Pentest-Tools in der Registry"""
115
+
116
+ @tool
117
+ def scan_ports(target: str, ports: str = "top-1000") -> str:
118
+ """Scannt Ports auf dem Target mit Nmap"""
119
+ nmap = NmapTool()
120
+ result = nmap.scan(target, ports)
121
+ return json.dumps(result, indent=2)
122
+
123
+ self.registry.register(
124
+ tool=scan_ports,
125
+ category=ToolCategory.RECONNAISSANCE,
126
+ safety_level=ToolSafetyLevel.SAFE,
127
+ tags=["port", "scan", "nmap"]
128
+ )
129
+
130
+ @tool
131
+ def scan_vulnerabilities(target: str, templates: str = "critical,high") -> str:
132
+ """Scannt nach CVEs mit Nuclei"""
133
+ nuclei = NucleiTool()
134
+ result = nuclei.scan(target, severity=templates)
135
+ return json.dumps(result, indent=2)
136
+
137
+ self.registry.register(
138
+ tool=scan_vulnerabilities,
139
+ category=ToolCategory.SCANNING,
140
+ safety_level=ToolSafetyLevel.SAFE,
141
+ tags=["vulnerability", "scan", "nuclei", "cve"]
142
+ )
143
+
144
+ @tool
145
+ def enumerate_directories(target: str, wordlist: str = "common.txt") -> str:
146
+ """Enumerate directories mit ffuf"""
147
+ ffuf = FfufTool()
148
+ result = ffuf.directory_bruteforce(target, wordlist)
149
+ return json.dumps(result, indent=2)
150
+
151
+ self.registry.register(
152
+ tool=enumerate_directories,
153
+ category=ToolCategory.RECONNAISSANCE,
154
+ safety_level=ToolSafetyLevel.SAFE,
155
+ tags=["directory", "enumeration", "ffuf", "fuzzing"]
156
+ )
157
+
158
+ @tool
159
+ def lookup_cve(cve_id: str) -> str:
160
+ """Sucht CVE-Details in der Datenbank"""
161
+ cve = self.cve_db.get_cve(cve_id)
162
+ if cve:
163
+ return json.dumps({
164
+ "id": cve.id,
165
+ "severity": cve.severity,
166
+ "cvss": cve.cvss_score,
167
+ "description": cve.description,
168
+ "epss": cve.epss_score
169
+ }, indent=2)
170
+ return f"CVE {cve_id} nicht gefunden"
171
+
172
+ self.registry.register(
173
+ tool=lookup_cve,
174
+ category=ToolCategory.UTILITY,
175
+ safety_level=ToolSafetyLevel.SAFE,
176
+ tags=["cve", "lookup", "database"]
177
+ )
178
+
179
+ @tool
180
+ def validate_exploit(cve_id: str, target: str) -> str:
181
+ """Validiert ob ein Exploit auf dem Target funktioniert (read-only)"""
182
+ return f"Exploit-Validierung für {cve_id} auf {target}: Noch nicht implementiert"
183
+
184
+ self.registry.register(
185
+ tool=validate_exploit,
186
+ category=ToolCategory.EXPLOITATION,
187
+ safety_level=ToolSafetyLevel.DANGEROUS,
188
+ requires_approval=True,
189
+ tags=["exploit", "validation", "dangerous"]
190
+ )
191
+
192
+ def _build_graph(self) -> StateGraph:
193
+ """Baut den erweiterten LangGraph Workflow"""
194
+
195
+ llm_with_tools = self.llm.bind_tools(self.tools)
196
+
197
+ # === PHASE 1: PLAN ===
198
+ def plan_node(state: AgentStateEnhanced) -> AgentStateEnhanced:
199
+ """Plan Node: Erstellt einen strukturierten Plan"""
200
+ logger.info(f"[PLAN] Iteration {state['iteration']}")
201
+
202
+ if not self.config.enable_planning:
203
+ # Skip planning, direkt zu execution
204
+ return {**state, "status": "executing"}
205
+
206
+ system_prompt = """Du bist ein strategischer Pentest-Planer.
207
+
208
+ Analysiere das Ziel und erstelle einen strukturierten Plan mit maximal 5 Schritten.
209
+
210
+ Für jeden Schritt definiere:
211
+ 1. action: Was soll gemacht werden?
212
+ 2. tool: Welches Tool wird genutzt (scan_ports, scan_vulnerabilities, enumerate_directories, lookup_cve, validate_exploit)?
213
+ 3. expected_outcome: Was erwarten wir als Ergebnis?
214
+
215
+ Beispiel-Plan:
216
+ {
217
+ "plan": [
218
+ {"step": 1, "action": "Port Scan", "tool": "scan_ports", "expected_outcome": "Offene Ports identifizieren"},
219
+ {"step": 2, "action": "Vulnerability Scan", "tool": "scan_vulnerabilities", "expected_outcome": "CVEs finden"}
220
+ ]
221
+ }
222
+
223
+ WICHTIG:
224
+ - Sei spezifisch bei den Tools
225
+ - Berücksichtige vorherige Ergebnisse
226
+ - Passe den Plan an wenn nötig"""
227
+
228
+ messages = [
229
+ SystemMessage(content=system_prompt),
230
+ HumanMessage(content=f"Ziel: {state['target']}\nAufgabe: {state['objective']}\nVorherige Ergebnisse: {len(state['findings'])} findings")
231
+ ]
232
+
233
+ # LLM generiert Plan
234
+ response = self.llm.invoke(messages)
235
+
236
+ # Parse Plan aus Response
237
+ try:
238
+ plan_data = self._parse_plan_from_response(response.content)
239
+ plan = [
240
+ PlanStep(
241
+ step_number=i+1,
242
+ action=step["action"],
243
+ tool=step.get("tool"),
244
+ expected_outcome=step["expected_outcome"],
245
+ completed=False,
246
+ result=None
247
+ )
248
+ for i, step in enumerate(plan_data.get("plan", []))
249
+ ]
250
+ except Exception as e:
251
+ logger.error(f"Fehler beim Parsen des Plans: {e}")
252
+ plan = []
253
+
254
+ return {
255
+ **state,
256
+ "plan": plan,
257
+ "current_step_index": 0,
258
+ "status": "executing" if plan else "completed",
259
+ "messages": state["messages"] + [AIMessage(content=f"Plan erstellt: {len(plan)} Schritte")]
260
+ }
261
+
262
+ # === PHASE 2: EXECUTE ===
263
+ def execute_node(state: AgentStateEnhanced) -> AgentStateEnhanced:
264
+ """Execute Node: Führt aktuellen Plan-Schritt aus"""
265
+ logger.info(f"[EXECUTE] Schritt {state['current_step_index'] + 1}/{len(state['plan'])}")
266
+
267
+ # Aktuellen Schritt holen
268
+ if not state["plan"] or state["current_step_index"] >= len(state["plan"]):
269
+ return {**state, "status": "completed"}
270
+
271
+ current_step = state["plan"][state["current_step_index"]]
272
+
273
+ # Wenn kein Tool nötig, überspringen
274
+ if not current_step.get("tool"):
275
+ return {
276
+ **state,
277
+ "current_step_index": state["current_step_index"] + 1,
278
+ "status": "observing"
279
+ }
280
+
281
+ tool_name = current_step["tool"]
282
+
283
+ # Safety Check
284
+ if self._is_dangerous_tool(tool_name) and self.config.use_human_in_the_loop:
285
+ if not self.config.auto_approve_dangerous:
286
+ result = f"[PENDING APPROVAL] Tool {tool_name} erfordert manuelle Freigabe"
287
+ return {
288
+ **state,
289
+ "status": "reflecting",
290
+ "messages": state["messages"] + [ToolMessage(content=result, tool_call_id="pending")]
291
+ }
292
+
293
+ # Tool ausführen
294
+ try:
295
+ tool_func = self.tools_by_name.get(tool_name)
296
+ if tool_func:
297
+ # Args aus dem Kontext bauen
298
+ args = {"target": state["target"]}
299
+ result = tool_func.invoke(args)
300
+
301
+ # Schritt aktualisieren
302
+ state["plan"][state["current_step_index"]]["result"] = result
303
+ state["plan"][state["current_step_index"]]["completed"] = True
304
+
305
+ # Finding speichern
306
+ finding = {
307
+ "tool": tool_name,
308
+ "step": state["current_step_index"] + 1,
309
+ "result": result,
310
+ "timestamp": datetime.now().isoformat()
311
+ }
312
+
313
+ return {
314
+ **state,
315
+ "findings": state["findings"] + [finding],
316
+ "status": "observing",
317
+ "messages": state["messages"] + [
318
+ ToolMessage(content=result[:500], tool_call_id=f"step_{state['current_step_index']}")
319
+ ]
320
+ }
321
+ else:
322
+ error = f"Tool {tool_name} nicht gefunden"
323
+ return {
324
+ **state,
325
+ "status": "reflecting",
326
+ "last_error": error,
327
+ "error_count": state.get("error_count", 0) + 1
328
+ }
329
+
330
+ except Exception as e:
331
+ error = f"Fehler bei {tool_name}: {str(e)}"
332
+ logger.error(error)
333
+ return {
334
+ **state,
335
+ "status": "reflecting",
336
+ "last_error": error,
337
+ "error_count": state.get("error_count", 0) + 1
338
+ }
339
+
340
+ # === PHASE 3: OBSERVE ===
341
+ def observe_node(state: AgentStateEnhanced) -> AgentStateEnhanced:
342
+ """Observe Node: Analysiert das Ergebnis"""
343
+ logger.info(f"[OBSERVE] Analysiere Ergebnisse")
344
+
345
+ current_step = state["plan"][state["current_step_index"]]
346
+ result = current_step.get("result", "")
347
+
348
+ # LLM analysiert das Ergebnis
349
+ system_prompt = """Du bist ein Pentest-Analyst.
350
+
351
+ Analysiere das Ergebnis des letzten Schritts:
352
+ - War es erfolgreich?
353
+ - Gibt es kritische Findings?
354
+ - Soll der Plan angepasst werden?
355
+
356
+ Antworte in 2-3 Sätzen."""
357
+
358
+ messages = [
359
+ SystemMessage(content=system_prompt),
360
+ HumanMessage(content=f"Schritt: {current_step['action']}\nErgebnis: {result[:1000]}")
361
+ ]
362
+
363
+ response = self.llm.invoke(messages)
364
+
365
+ return {
366
+ **state,
367
+ "status": "reflecting",
368
+ "messages": state["messages"] + [AIMessage(content=f"Observation: {response.content}")]
369
+ }
370
+
371
+ # === PHASE 4: REFLECT ===
372
+ def reflect_node(state: AgentStateEnhanced) -> AgentStateEnhanced:
373
+ """Reflect Node: Bewertet Fortschritt und entscheidet weiteres Vorgehen"""
374
+ logger.info(f"[REFLECT] Bewerte Fortschritt")
375
+
376
+ if not self.config.enable_reflection:
377
+ return self._advance_or_complete(state)
378
+
379
+ # Prüfe Fehler
380
+ if state.get("error_count", 0) >= state.get("max_errors", 3):
381
+ return {
382
+ **state,
383
+ "status": "error",
384
+ "messages": state["messages"] + [AIMessage(content="Zu viele Fehler - breche ab.")]
385
+ }
386
+
387
+ # Prüfe Iterations-Limit
388
+ if state["iteration"] >= state["max_iterations"]:
389
+ return {
390
+ **state,
391
+ "status": "completed",
392
+ "messages": state["messages"] + [AIMessage(content="Maximale Iterationen erreicht.")]
393
+ }
394
+
395
+ # Reflection mit LLM
396
+ system_prompt = """Du bist ein strategischer Berater für Pentests.
397
+
398
+ Bewerte den aktuellen Stand:
399
+ 1. Sind wir auf dem richtigen Weg?
400
+ 2. Soll der Plan angepasst werden?
401
+ 3. Brauchen wir weitere Iterationen?
402
+
403
+ Antworte mit einer klaren Empfehlung."""
404
+
405
+ messages = [
406
+ SystemMessage(content=system_prompt),
407
+ HumanMessage(content=f"Plan-Fortschritt: {state['current_step_index'] + 1}/{len(state['plan'])}\nFindings: {len(state['findings'])}")
408
+ ]
409
+
410
+ response = self.llm.invoke(messages)
411
+
412
+ # Speichere Reflection
413
+ reflection = {
414
+ "iteration": state["iteration"],
415
+ "timestamp": datetime.now().isoformat(),
416
+ "analysis": response.content,
417
+ "findings_count": len(state["findings"])
418
+ }
419
+
420
+ return self._advance_or_complete({
421
+ **state,
422
+ "reflections": state.get("reflections", []) + [reflection],
423
+ "messages": state["messages"] + [AIMessage(content=f"Reflection: {response.content}")]
424
+ })
425
+
426
+ # Hilfsfunktion für State Transition
427
+ def _advance_or_complete(state: AgentStateEnhanced) -> AgentStateEnhanced:
428
+ """Entscheidet ob weiter oder fertig"""
429
+ next_index = state["current_step_index"] + 1
430
+
431
+ if next_index >= len(state["plan"]):
432
+ # Plan ist komplett - neue Iteration oder fertig?
433
+ if state["iteration"] < state["max_iterations"] - 1:
434
+ # Neue Iteration mit neuem Plan
435
+ return {
436
+ **state,
437
+ "iteration": state["iteration"] + 1,
438
+ "current_step_index": 0,
439
+ "status": "planning"
440
+ }
441
+ else:
442
+ return {**state, "status": "completed"}
443
+ else:
444
+ # Nächster Schritt
445
+ return {
446
+ **state,
447
+ "current_step_index": next_index,
448
+ "status": "executing"
449
+ }
450
+
451
+ # === GRAPH BAUEN ===
452
+ workflow = StateGraph(AgentStateEnhanced)
453
+
454
+ workflow.add_node("plan", plan_node)
455
+ workflow.add_node("execute", execute_node)
456
+ workflow.add_node("observe", observe_node)
457
+ workflow.add_node("reflect", reflect_node)
458
+
459
+ # Entry Point
460
+ workflow.add_edge(START, "plan")
461
+
462
+ # Transitions
463
+ workflow.add_edge("plan", "execute")
464
+ workflow.add_edge("execute", "observe")
465
+ workflow.add_edge("observe", "reflect")
466
+
467
+ # Conditional from reflect
468
+ workflow.add_conditional_edges(
469
+ "reflect",
470
+ lambda state: state["status"],
471
+ {
472
+ "planning": "plan",
473
+ "executing": "execute",
474
+ "completed": END,
475
+ "error": END
476
+ }
477
+ )
478
+
479
+ checkpointer = MemorySaver()
480
+ return workflow.compile(checkpointer=checkpointer)
481
+
482
+ def _parse_plan_from_response(self, content: str) -> dict:
483
+ """Parst den Plan aus der LLM Response"""
484
+ # Versuche JSON zu extrahieren
485
+ try:
486
+ # Suche nach JSON-Block
487
+ if "```json" in content:
488
+ json_str = content.split("```json")[1].split("```")[0]
489
+ elif "```" in content:
490
+ json_str = content.split("```")[1].split("```")[0]
491
+ else:
492
+ json_str = content
493
+
494
+ return json.loads(json_str.strip())
495
+ except:
496
+ # Fallback: manuelles Parsen
497
+ logger.warning("Konnte Plan nicht als JSON parsen, nutze Fallback")
498
+ return {
499
+ "plan": [
500
+ {"action": "Port Scan", "tool": "scan_ports", "expected_outcome": "Offene Ports finden"},
501
+ {"action": "Vulnerability Scan", "tool": "scan_vulnerabilities", "expected_outcome": "CVEs identifizieren"}
502
+ ]
503
+ }
504
+
505
+ def _is_dangerous_tool(self, tool_name: str) -> bool:
506
+ """Prüft ob ein Tool als gefährlich eingestuft wird"""
507
+ dangerous = ["validate_exploit", "exploit", "sqlmap_exploit"]
508
+ return any(d in tool_name.lower() for d in dangerous)
509
+
510
+ def run(self, target: str, objective: str = "comprehensive scan") -> dict:
511
+ """Führt den Enhanced ReAct-Agent aus"""
512
+ initial_state: AgentStateEnhanced = {
513
+ "messages": [HumanMessage(content=f"{objective} on {target}")],
514
+ "findings": [],
515
+ "target": target,
516
+ "objective": objective,
517
+ "iteration": 0,
518
+ "max_iterations": self.config.max_iterations,
519
+ "status": "planning",
520
+ "plan": [],
521
+ "current_step_index": 0,
522
+ "reflections": [],
523
+ "memory_context": {},
524
+ "error_count": 0,
525
+ "max_errors": 3,
526
+ "last_error": None
527
+ }
528
+
529
+ logger.info(f"Starte Enhanced ReAct-Agent für {target}")
530
+
531
+ result = self.graph.invoke(
532
+ initial_state,
533
+ config={"configurable": {"thread_id": f"pentest_{target}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"}}
534
+ )
535
+
536
+ logger.info(f"Agent beendet nach {result['iteration']} Iterationen, {len(result['findings'])} findings")
537
+
538
+ return {
539
+ "findings": result["findings"],
540
+ "plan": result["plan"],
541
+ "reflections": result["reflections"],
542
+ "iterations": result["iteration"],
543
+ "status": result["status"],
544
+ "target": target,
545
+ "objective": objective
546
+ }
547
+
548
+ def generate_report(self, result: dict) -> str:
549
+ """Generiert einen detaillierten Report"""
550
+ report = []
551
+ report.append("=" * 70)
552
+ report.append("ZEN-AI-PENTEST REPORT (Enhanced ReAct)")
553
+ report.append("=" * 70)
554
+ report.append(f"\nTarget: {result['target']}")
555
+ report.append(f"Objective: {result['objective']}")
556
+ report.append(f"Status: {result['status']}")
557
+ report.append(f"Iterations: {result['iterations']}")
558
+ report.append("")
559
+
560
+ # Plan
561
+ if result.get('plan'):
562
+ report.append("EXECUTION PLAN:")
563
+ report.append("-" * 70)
564
+ for step in result['plan']:
565
+ status = "✓" if step.get('completed') else "○"
566
+ report.append(f"{status} Step {step['step_number']}: {step['action']}")
567
+ if step.get('result'):
568
+ report.append(f" Result: {step['result'][:100]}...")
569
+
570
+ # Findings
571
+ report.append("")
572
+ report.append("FINDINGS:")
573
+ report.append("-" * 70)
574
+ for i, finding in enumerate(result['findings'], 1):
575
+ report.append(f"\n{i}. {finding['tool']} (Step {finding['step']})")
576
+ report.append(f" {finding['result'][:200]}...")
577
+
578
+ # Reflections
579
+ if result.get('reflections'):
580
+ report.append("")
581
+ report.append("REFLECTIONS:")
582
+ report.append("-" * 70)
583
+ for ref in result['reflections']:
584
+ report.append(f"\nIteration {ref['iteration']}:")
585
+ report.append(f" {ref['analysis'][:200]}...")
586
+
587
+ report.append("")
588
+ report.append("=" * 70)
589
+
590
+ return "\n".join(report)
591
+
592
+
593
+ # Singleton
594
+ _default_enhanced_agent = None
595
+
596
+ def get_enhanced_agent(config: ReActAgentConfigEnhanced = None) -> ReActAgentEnhanced:
597
+ """Gibt die default Enhanced Agent-Instanz zurück"""
598
+ global _default_enhanced_agent
599
+ if _default_enhanced_agent is None or config is not None:
600
+ _default_enhanced_agent = ReActAgentEnhanced(config)
601
+ return _default_enhanced_agent
602
+
603
+
604
+ if __name__ == "__main__":
605
+ logging.basicConfig(level=logging.INFO)
606
+
607
+ config = ReActAgentConfigEnhanced(
608
+ max_iterations=3,
609
+ enable_planning=True,
610
+ enable_reflection=True
611
+ )
612
+
613
+ agent = ReActAgentEnhanced(config)
614
+ result = agent.run("scanme.nmap.org", objective="Port scan and vulnerability assessment")
615
+
616
+ print(agent.generate_report(result))