zen-ai-pentest 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +28 -0
- agents/agent_base.py +239 -0
- agents/agent_orchestrator.py +346 -0
- agents/analysis_agent.py +225 -0
- agents/cli.py +258 -0
- agents/exploit_agent.py +224 -0
- agents/integration.py +211 -0
- agents/post_scan_agent.py +937 -0
- agents/react_agent.py +384 -0
- agents/react_agent_enhanced.py +616 -0
- agents/react_agent_vm.py +298 -0
- agents/research_agent.py +176 -0
- api/__init__.py +11 -0
- api/auth.py +123 -0
- api/main.py +1027 -0
- api/schemas.py +357 -0
- api/websocket.py +97 -0
- autonomous/__init__.py +122 -0
- autonomous/agent.py +253 -0
- autonomous/agent_loop.py +1370 -0
- autonomous/exploit_validator.py +1537 -0
- autonomous/memory.py +448 -0
- autonomous/react.py +339 -0
- autonomous/tool_executor.py +488 -0
- backends/__init__.py +16 -0
- backends/chatgpt_direct.py +133 -0
- backends/claude_direct.py +130 -0
- backends/duckduckgo.py +138 -0
- backends/openrouter.py +120 -0
- benchmarks/__init__.py +149 -0
- benchmarks/benchmark_engine.py +904 -0
- benchmarks/ci_benchmark.py +785 -0
- benchmarks/comparison.py +729 -0
- benchmarks/metrics.py +553 -0
- benchmarks/run_benchmarks.py +809 -0
- ci_cd/__init__.py +2 -0
- core/__init__.py +17 -0
- core/async_pool.py +282 -0
- core/asyncio_fix.py +222 -0
- core/cache.py +472 -0
- core/container.py +277 -0
- core/database.py +114 -0
- core/input_validator.py +353 -0
- core/models.py +288 -0
- core/orchestrator.py +611 -0
- core/plugin_manager.py +571 -0
- core/rate_limiter.py +405 -0
- core/secure_config.py +328 -0
- core/shield_integration.py +296 -0
- modules/__init__.py +46 -0
- modules/cve_database.py +362 -0
- modules/exploit_assist.py +330 -0
- modules/nuclei_integration.py +480 -0
- modules/osint.py +604 -0
- modules/protonvpn.py +554 -0
- modules/recon.py +165 -0
- modules/sql_injection_db.py +826 -0
- modules/tool_orchestrator.py +498 -0
- modules/vuln_scanner.py +292 -0
- modules/wordlist_generator.py +566 -0
- risk_engine/__init__.py +99 -0
- risk_engine/business_impact.py +267 -0
- risk_engine/business_impact_calculator.py +563 -0
- risk_engine/cvss.py +156 -0
- risk_engine/epss.py +190 -0
- risk_engine/example_usage.py +294 -0
- risk_engine/false_positive_engine.py +1073 -0
- risk_engine/scorer.py +304 -0
- web_ui/backend/main.py +471 -0
- zen_ai_pentest-2.0.0.dist-info/METADATA +795 -0
- zen_ai_pentest-2.0.0.dist-info/RECORD +75 -0
- zen_ai_pentest-2.0.0.dist-info/WHEEL +5 -0
- zen_ai_pentest-2.0.0.dist-info/entry_points.txt +2 -0
- zen_ai_pentest-2.0.0.dist-info/licenses/LICENSE +21 -0
- zen_ai_pentest-2.0.0.dist-info/top_level.txt +10 -0
autonomous/memory.py
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory System for Autonomous Agents
|
|
3
|
+
|
|
4
|
+
Implements multi-layer memory:
|
|
5
|
+
- Working Memory: Current session context
|
|
6
|
+
- Short-term: Recent N actions
|
|
7
|
+
- Long-term: Vector store for semantic search
|
|
8
|
+
- Episodic: Full attack chains
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import uuid
|
|
13
|
+
from abc import ABC, abstractmethod
|
|
14
|
+
from dataclasses import dataclass, field, asdict
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
17
|
+
import hashlib
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class MemoryEntry:
|
|
22
|
+
"""A single memory entry."""
|
|
23
|
+
id: str
|
|
24
|
+
content: str
|
|
25
|
+
memory_type: str # 'thought', 'action', 'observation', 'finding', 'goal'
|
|
26
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
27
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
28
|
+
embedding: Optional[List[float]] = None
|
|
29
|
+
session_id: str = ""
|
|
30
|
+
|
|
31
|
+
def to_dict(self) -> Dict:
|
|
32
|
+
return {
|
|
33
|
+
'id': self.id,
|
|
34
|
+
'content': self.content,
|
|
35
|
+
'type': self.memory_type,
|
|
36
|
+
'metadata': self.metadata,
|
|
37
|
+
'timestamp': self.timestamp.isoformat(),
|
|
38
|
+
'session_id': self.session_id
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class BaseMemory(ABC):
|
|
43
|
+
"""Abstract base class for memory implementations."""
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
async def add(self, entry: MemoryEntry) -> None:
|
|
47
|
+
"""Add a memory entry."""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
@abstractmethod
|
|
51
|
+
async def search(self, query: str, limit: int = 5) -> List[MemoryEntry]:
|
|
52
|
+
"""Search for relevant memories."""
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
@abstractmethod
|
|
56
|
+
async def get_recent(self, limit: int = 10) -> List[MemoryEntry]:
|
|
57
|
+
"""Get recent memories."""
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class WorkingMemory(BaseMemory):
|
|
62
|
+
"""
|
|
63
|
+
Working memory for current session.
|
|
64
|
+
Keeps track of immediate context and recent actions.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(self, max_size: int = 100):
|
|
68
|
+
self.max_size = max_size
|
|
69
|
+
self.entries: List[MemoryEntry] = []
|
|
70
|
+
self.session_id = str(uuid.uuid4())
|
|
71
|
+
self.current_goal: Optional[str] = None
|
|
72
|
+
self.context: Dict[str, Any] = {}
|
|
73
|
+
|
|
74
|
+
async def add(self, entry: MemoryEntry) -> None:
|
|
75
|
+
"""Add entry to working memory."""
|
|
76
|
+
entry.session_id = self.session_id
|
|
77
|
+
self.entries.append(entry)
|
|
78
|
+
|
|
79
|
+
# Trim if exceeds max size
|
|
80
|
+
if len(self.entries) > self.max_size:
|
|
81
|
+
self.entries = self.entries[-self.max_size:]
|
|
82
|
+
|
|
83
|
+
async def search(self, query: str, limit: int = 5) -> List[MemoryEntry]:
|
|
84
|
+
"""Simple text search in working memory."""
|
|
85
|
+
query_lower = query.lower()
|
|
86
|
+
matches = [
|
|
87
|
+
e for e in self.entries
|
|
88
|
+
if query_lower in e.content.lower()
|
|
89
|
+
]
|
|
90
|
+
return matches[-limit:]
|
|
91
|
+
|
|
92
|
+
async def get_recent(self, limit: int = 10) -> List[MemoryEntry]:
|
|
93
|
+
"""Get most recent entries."""
|
|
94
|
+
return self.entries[-limit:]
|
|
95
|
+
|
|
96
|
+
async def get_context_window(self, n: int = 5) -> Dict[str, Any]:
|
|
97
|
+
"""Get the current context window for LLM."""
|
|
98
|
+
recent = await self.get_recent(n)
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
'session_id': self.session_id,
|
|
102
|
+
'goal': self.current_goal,
|
|
103
|
+
'context': self.context,
|
|
104
|
+
'recent_actions': [e.to_dict() for e in recent],
|
|
105
|
+
'memory_count': len(self.entries)
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
def set_goal(self, goal: str, context: Optional[Dict] = None):
|
|
109
|
+
"""Set the current goal."""
|
|
110
|
+
self.current_goal = goal
|
|
111
|
+
if context:
|
|
112
|
+
self.context.update(context)
|
|
113
|
+
|
|
114
|
+
def update_context(self, key: str, value: Any):
|
|
115
|
+
"""Update context information."""
|
|
116
|
+
self.context[key] = value
|
|
117
|
+
|
|
118
|
+
def clear(self):
|
|
119
|
+
"""Clear working memory."""
|
|
120
|
+
self.entries = []
|
|
121
|
+
self.current_goal = None
|
|
122
|
+
self.context = {}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class LongTermMemory(BaseMemory):
|
|
126
|
+
"""
|
|
127
|
+
Long-term memory with vector storage for semantic search.
|
|
128
|
+
|
|
129
|
+
Uses simple embedding-based retrieval.
|
|
130
|
+
In production, use ChromaDB, Pinecone, or similar.
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
def __init__(self, storage_path: Optional[str] = None):
|
|
134
|
+
self.storage_path = storage_path
|
|
135
|
+
self.entries: Dict[str, MemoryEntry] = {}
|
|
136
|
+
# Simple in-memory storage; replace with vector DB
|
|
137
|
+
|
|
138
|
+
def _simple_hash_embedding(self, text: str) -> List[float]:
|
|
139
|
+
"""
|
|
140
|
+
Create a simple embedding from text.
|
|
141
|
+
In production, use real embeddings (OpenAI, HuggingFace, etc.)
|
|
142
|
+
"""
|
|
143
|
+
# Simple bag-of-words style embedding
|
|
144
|
+
words = text.lower().split()
|
|
145
|
+
embedding = [0.0] * 128
|
|
146
|
+
|
|
147
|
+
for word in words:
|
|
148
|
+
hash_val = int(hashlib.md5(word.encode()).hexdigest(), 16)
|
|
149
|
+
idx = hash_val % 128
|
|
150
|
+
embedding[idx] += 1.0
|
|
151
|
+
|
|
152
|
+
# Normalize
|
|
153
|
+
magnitude = sum(x**2 for x in embedding) ** 0.5
|
|
154
|
+
if magnitude > 0:
|
|
155
|
+
embedding = [x / magnitude for x in embedding]
|
|
156
|
+
|
|
157
|
+
return embedding
|
|
158
|
+
|
|
159
|
+
def _cosine_similarity(self, a: List[float], b: List[float]) -> float:
|
|
160
|
+
"""Calculate cosine similarity between two vectors."""
|
|
161
|
+
dot_product = sum(x * y for x, y in zip(a, b))
|
|
162
|
+
magnitude_a = sum(x**2 for x in a) ** 0.5
|
|
163
|
+
magnitude_b = sum(x**2 for x in b) ** 0.5
|
|
164
|
+
|
|
165
|
+
if magnitude_a == 0 or magnitude_b == 0:
|
|
166
|
+
return 0.0
|
|
167
|
+
|
|
168
|
+
return dot_product / (magnitude_a * magnitude_b)
|
|
169
|
+
|
|
170
|
+
async def add(self, entry: MemoryEntry) -> None:
|
|
171
|
+
"""Add entry with embedding."""
|
|
172
|
+
# Generate embedding if not provided
|
|
173
|
+
if entry.embedding is None:
|
|
174
|
+
entry.embedding = self._simple_hash_embedding(entry.content)
|
|
175
|
+
|
|
176
|
+
self.entries[entry.id] = entry
|
|
177
|
+
|
|
178
|
+
# Persist if storage path provided
|
|
179
|
+
if self.storage_path:
|
|
180
|
+
await self._persist()
|
|
181
|
+
|
|
182
|
+
async def search(self, query: str, limit: int = 5) -> List[MemoryEntry]:
|
|
183
|
+
"""Semantic search using embeddings."""
|
|
184
|
+
query_embedding = self._simple_hash_embedding(query)
|
|
185
|
+
|
|
186
|
+
# Calculate similarities
|
|
187
|
+
scored = []
|
|
188
|
+
for entry in self.entries.values():
|
|
189
|
+
if entry.embedding:
|
|
190
|
+
similarity = self._cosine_similarity(query_embedding, entry.embedding)
|
|
191
|
+
scored.append((similarity, entry))
|
|
192
|
+
|
|
193
|
+
# Sort by similarity
|
|
194
|
+
scored.sort(key=lambda x: x[0], reverse=True)
|
|
195
|
+
|
|
196
|
+
return [entry for _, entry in scored[:limit]]
|
|
197
|
+
|
|
198
|
+
async def get_recent(self, limit: int = 10) -> List[MemoryEntry]:
|
|
199
|
+
"""Get most recent entries."""
|
|
200
|
+
sorted_entries = sorted(
|
|
201
|
+
self.entries.values(),
|
|
202
|
+
key=lambda e: e.timestamp,
|
|
203
|
+
reverse=True
|
|
204
|
+
)
|
|
205
|
+
return sorted_entries[:limit]
|
|
206
|
+
|
|
207
|
+
async def _persist(self):
|
|
208
|
+
"""Save to disk."""
|
|
209
|
+
if not self.storage_path:
|
|
210
|
+
return
|
|
211
|
+
|
|
212
|
+
data = {
|
|
213
|
+
k: {
|
|
214
|
+
**v.to_dict(),
|
|
215
|
+
'embedding': v.embedding
|
|
216
|
+
}
|
|
217
|
+
for k, v in self.entries.items()
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
with open(self.storage_path, 'w') as f:
|
|
221
|
+
json.dump(data, f)
|
|
222
|
+
|
|
223
|
+
async def load(self):
|
|
224
|
+
"""Load from disk."""
|
|
225
|
+
if not self.storage_path:
|
|
226
|
+
return
|
|
227
|
+
|
|
228
|
+
try:
|
|
229
|
+
with open(self.storage_path, 'r') as f:
|
|
230
|
+
data = json.load(f)
|
|
231
|
+
|
|
232
|
+
for entry_id, entry_data in data.items():
|
|
233
|
+
entry = MemoryEntry(
|
|
234
|
+
id=entry_data['id'],
|
|
235
|
+
content=entry_data['content'],
|
|
236
|
+
memory_type=entry_data['type'],
|
|
237
|
+
metadata=entry_data.get('metadata', {}),
|
|
238
|
+
timestamp=datetime.fromisoformat(entry_data['timestamp']),
|
|
239
|
+
embedding=entry_data.get('embedding'),
|
|
240
|
+
session_id=entry_data.get('session_id', '')
|
|
241
|
+
)
|
|
242
|
+
self.entries[entry_id] = entry
|
|
243
|
+
except FileNotFoundError:
|
|
244
|
+
pass
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
class EpisodicMemory:
|
|
248
|
+
"""
|
|
249
|
+
Episodic memory for storing complete attack chains.
|
|
250
|
+
Used for learning from past operations.
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
def __init__(self):
|
|
254
|
+
self.episodes: List[Dict] = []
|
|
255
|
+
|
|
256
|
+
def record_episode(
|
|
257
|
+
self,
|
|
258
|
+
goal: str,
|
|
259
|
+
steps: List[Dict],
|
|
260
|
+
outcome: str,
|
|
261
|
+
success: bool,
|
|
262
|
+
lessons_learned: List[str]
|
|
263
|
+
):
|
|
264
|
+
"""Record a complete attack episode."""
|
|
265
|
+
episode = {
|
|
266
|
+
'id': str(uuid.uuid4()),
|
|
267
|
+
'goal': goal,
|
|
268
|
+
'steps': steps,
|
|
269
|
+
'outcome': outcome,
|
|
270
|
+
'success': success,
|
|
271
|
+
'lessons_learned': lessons_learned,
|
|
272
|
+
'timestamp': datetime.now().isoformat()
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
self.episodes.append(episode)
|
|
276
|
+
|
|
277
|
+
def get_similar_episodes(self, goal: str) -> List[Dict]:
|
|
278
|
+
"""Find episodes with similar goals."""
|
|
279
|
+
# Simple keyword matching
|
|
280
|
+
goal_words = set(goal.lower().split())
|
|
281
|
+
|
|
282
|
+
scored = []
|
|
283
|
+
for episode in self.episodes:
|
|
284
|
+
episode_words = set(episode['goal'].lower().split())
|
|
285
|
+
similarity = len(goal_words & episode_words) / len(goal_words | episode_words)
|
|
286
|
+
scored.append((similarity, episode))
|
|
287
|
+
|
|
288
|
+
scored.sort(key=lambda x: x[0], reverse=True)
|
|
289
|
+
return [ep for _, ep in scored[:3]]
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
class MemoryManager:
|
|
293
|
+
"""
|
|
294
|
+
Unified interface for all memory systems.
|
|
295
|
+
Coordinates working, long-term, and episodic memory.
|
|
296
|
+
"""
|
|
297
|
+
|
|
298
|
+
def __init__(
|
|
299
|
+
self,
|
|
300
|
+
long_term_path: Optional[str] = None,
|
|
301
|
+
enable_embeddings: bool = False
|
|
302
|
+
):
|
|
303
|
+
self.working = WorkingMemory()
|
|
304
|
+
self.long_term = LongTermMemory(long_term_path)
|
|
305
|
+
self.episodic = EpisodicMemory()
|
|
306
|
+
self.enable_embeddings = enable_embeddings
|
|
307
|
+
|
|
308
|
+
async def add_goal(self, goal: str, context: Optional[Dict] = None):
|
|
309
|
+
"""Add a new goal to memory."""
|
|
310
|
+
self.working.set_goal(goal, context)
|
|
311
|
+
|
|
312
|
+
entry = MemoryEntry(
|
|
313
|
+
id=str(uuid.uuid4()),
|
|
314
|
+
content=goal,
|
|
315
|
+
memory_type='goal',
|
|
316
|
+
metadata={'context': context or {}}
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
await self.working.add(entry)
|
|
320
|
+
await self.long_term.add(entry)
|
|
321
|
+
|
|
322
|
+
async def add_experience(
|
|
323
|
+
self,
|
|
324
|
+
thought: Any,
|
|
325
|
+
action: Any,
|
|
326
|
+
observation: Any
|
|
327
|
+
):
|
|
328
|
+
"""Add a complete ReAct cycle to memory."""
|
|
329
|
+
# Add to working memory
|
|
330
|
+
thought_entry = MemoryEntry(
|
|
331
|
+
id=str(uuid.uuid4()),
|
|
332
|
+
content=str(thought.content) if hasattr(thought, 'content') else str(thought),
|
|
333
|
+
memory_type='thought',
|
|
334
|
+
metadata={'step': getattr(thought, 'step_number', 0)}
|
|
335
|
+
)
|
|
336
|
+
await self.working.add(thought_entry)
|
|
337
|
+
|
|
338
|
+
action_entry = MemoryEntry(
|
|
339
|
+
id=str(uuid.uuid4()),
|
|
340
|
+
content=f"{action.type.name}: {getattr(action, 'tool_name', '')}",
|
|
341
|
+
memory_type='action',
|
|
342
|
+
metadata={
|
|
343
|
+
'tool': getattr(action, 'tool_name', None),
|
|
344
|
+
'parameters': getattr(action, 'parameters', {}),
|
|
345
|
+
'step': getattr(action, 'step_number', 0)
|
|
346
|
+
}
|
|
347
|
+
)
|
|
348
|
+
await self.working.add(action_entry)
|
|
349
|
+
|
|
350
|
+
obs_content = str(observation.result) if hasattr(observation, 'result') else str(observation)
|
|
351
|
+
obs_entry = MemoryEntry(
|
|
352
|
+
id=str(uuid.uuid4()),
|
|
353
|
+
content=obs_content[:1000], # Truncate long outputs
|
|
354
|
+
memory_type='observation',
|
|
355
|
+
metadata={
|
|
356
|
+
'success': getattr(observation, 'success', True),
|
|
357
|
+
'step': getattr(observation, 'step_number', 0)
|
|
358
|
+
}
|
|
359
|
+
)
|
|
360
|
+
await self.working.add(obs_entry)
|
|
361
|
+
|
|
362
|
+
# Also add to long-term for important findings
|
|
363
|
+
if hasattr(observation, 'result') and observation.result:
|
|
364
|
+
findings = self._extract_findings(observation.result)
|
|
365
|
+
for finding in findings:
|
|
366
|
+
finding_entry = MemoryEntry(
|
|
367
|
+
id=str(uuid.uuid4()),
|
|
368
|
+
content=str(finding),
|
|
369
|
+
memory_type='finding',
|
|
370
|
+
metadata={'source': 'tool_execution'}
|
|
371
|
+
)
|
|
372
|
+
await self.long_term.add(finding_entry)
|
|
373
|
+
|
|
374
|
+
async def get_relevant_context(self, query: str) -> Dict[str, Any]:
|
|
375
|
+
"""Get relevant context from all memory layers."""
|
|
376
|
+
# Working memory context
|
|
377
|
+
working_context = await self.working.get_context_window()
|
|
378
|
+
|
|
379
|
+
# Long-term relevant memories
|
|
380
|
+
long_term_memories = await self.long_term.search(query, limit=3)
|
|
381
|
+
|
|
382
|
+
# Similar past episodes
|
|
383
|
+
similar_episodes = self.episodic.get_similar_episodes(query)
|
|
384
|
+
|
|
385
|
+
return {
|
|
386
|
+
'current_session': working_context,
|
|
387
|
+
'relevant_past': [m.to_dict() for m in long_term_memories],
|
|
388
|
+
'similar_episodes': similar_episodes
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
async def search(self, query: str) -> List[MemoryEntry]:
|
|
392
|
+
"""Search across memory systems."""
|
|
393
|
+
working_results = await self.working.search(query)
|
|
394
|
+
long_term_results = await self.long_term.search(query)
|
|
395
|
+
|
|
396
|
+
# Combine and deduplicate
|
|
397
|
+
all_results = working_results + long_term_results
|
|
398
|
+
seen = set()
|
|
399
|
+
unique = []
|
|
400
|
+
for r in all_results:
|
|
401
|
+
if r.id not in seen:
|
|
402
|
+
seen.add(r.id)
|
|
403
|
+
unique.append(r)
|
|
404
|
+
|
|
405
|
+
return unique
|
|
406
|
+
|
|
407
|
+
async def get_findings(self) -> List[Dict]:
|
|
408
|
+
"""Get all security findings from memory."""
|
|
409
|
+
findings = []
|
|
410
|
+
|
|
411
|
+
# Search for finding-type memories
|
|
412
|
+
results = await self.long_term.search('vulnerability finding exploit')
|
|
413
|
+
for entry in results:
|
|
414
|
+
if entry.memory_type == 'finding':
|
|
415
|
+
findings.append(entry.to_dict())
|
|
416
|
+
|
|
417
|
+
return findings
|
|
418
|
+
|
|
419
|
+
def _extract_findings(self, result: Any) -> List[Any]:
|
|
420
|
+
"""Extract security findings from tool results."""
|
|
421
|
+
findings = []
|
|
422
|
+
|
|
423
|
+
if isinstance(result, dict):
|
|
424
|
+
# Look for common finding patterns
|
|
425
|
+
if 'findings' in result:
|
|
426
|
+
findings.extend(result['findings'])
|
|
427
|
+
if 'open_ports' in result:
|
|
428
|
+
findings.extend(result['open_ports'])
|
|
429
|
+
if 'vulnerabilities' in result:
|
|
430
|
+
findings.extend(result['vulnerabilities'])
|
|
431
|
+
|
|
432
|
+
return findings
|
|
433
|
+
|
|
434
|
+
async def record_episode(self, outcome: str, success: bool):
|
|
435
|
+
"""Record the completion of an attack episode."""
|
|
436
|
+
recent = await self.working.get_recent(100)
|
|
437
|
+
|
|
438
|
+
self.episodic.record_episode(
|
|
439
|
+
goal=self.working.current_goal or "Unknown",
|
|
440
|
+
steps=[e.to_dict() for e in recent],
|
|
441
|
+
outcome=outcome,
|
|
442
|
+
success=success,
|
|
443
|
+
lessons_learned=[] # Could extract from analysis
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
async def load(self):
|
|
447
|
+
"""Load long-term memory from disk."""
|
|
448
|
+
await self.long_term.load()
|