iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
- reversecore_mcp/__init__.py +9 -0
- reversecore_mcp/core/__init__.py +78 -0
- reversecore_mcp/core/audit.py +101 -0
- reversecore_mcp/core/binary_cache.py +138 -0
- reversecore_mcp/core/command_spec.py +357 -0
- reversecore_mcp/core/config.py +432 -0
- reversecore_mcp/core/container.py +288 -0
- reversecore_mcp/core/decorators.py +152 -0
- reversecore_mcp/core/error_formatting.py +93 -0
- reversecore_mcp/core/error_handling.py +142 -0
- reversecore_mcp/core/evidence.py +229 -0
- reversecore_mcp/core/exceptions.py +296 -0
- reversecore_mcp/core/execution.py +240 -0
- reversecore_mcp/core/ghidra.py +642 -0
- reversecore_mcp/core/ghidra_helper.py +481 -0
- reversecore_mcp/core/ghidra_manager.py +234 -0
- reversecore_mcp/core/json_utils.py +131 -0
- reversecore_mcp/core/loader.py +73 -0
- reversecore_mcp/core/logging_config.py +206 -0
- reversecore_mcp/core/memory.py +721 -0
- reversecore_mcp/core/metrics.py +198 -0
- reversecore_mcp/core/mitre_mapper.py +365 -0
- reversecore_mcp/core/plugin.py +45 -0
- reversecore_mcp/core/r2_helpers.py +404 -0
- reversecore_mcp/core/r2_pool.py +403 -0
- reversecore_mcp/core/report_generator.py +268 -0
- reversecore_mcp/core/resilience.py +252 -0
- reversecore_mcp/core/resource_manager.py +169 -0
- reversecore_mcp/core/result.py +132 -0
- reversecore_mcp/core/security.py +213 -0
- reversecore_mcp/core/validators.py +238 -0
- reversecore_mcp/dashboard/__init__.py +221 -0
- reversecore_mcp/prompts/__init__.py +56 -0
- reversecore_mcp/prompts/common.py +24 -0
- reversecore_mcp/prompts/game.py +280 -0
- reversecore_mcp/prompts/malware.py +1219 -0
- reversecore_mcp/prompts/report.py +150 -0
- reversecore_mcp/prompts/security.py +136 -0
- reversecore_mcp/resources.py +329 -0
- reversecore_mcp/server.py +727 -0
- reversecore_mcp/tools/__init__.py +49 -0
- reversecore_mcp/tools/analysis/__init__.py +74 -0
- reversecore_mcp/tools/analysis/capa_tools.py +215 -0
- reversecore_mcp/tools/analysis/die_tools.py +180 -0
- reversecore_mcp/tools/analysis/diff_tools.py +643 -0
- reversecore_mcp/tools/analysis/lief_tools.py +272 -0
- reversecore_mcp/tools/analysis/signature_tools.py +591 -0
- reversecore_mcp/tools/analysis/static_analysis.py +479 -0
- reversecore_mcp/tools/common/__init__.py +58 -0
- reversecore_mcp/tools/common/file_operations.py +352 -0
- reversecore_mcp/tools/common/memory_tools.py +516 -0
- reversecore_mcp/tools/common/patch_explainer.py +230 -0
- reversecore_mcp/tools/common/server_tools.py +115 -0
- reversecore_mcp/tools/ghidra/__init__.py +19 -0
- reversecore_mcp/tools/ghidra/decompilation.py +975 -0
- reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
- reversecore_mcp/tools/malware/__init__.py +61 -0
- reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
- reversecore_mcp/tools/malware/dormant_detector.py +756 -0
- reversecore_mcp/tools/malware/ioc_tools.py +228 -0
- reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
- reversecore_mcp/tools/malware/yara_tools.py +214 -0
- reversecore_mcp/tools/patch_explainer.py +19 -0
- reversecore_mcp/tools/radare2/__init__.py +13 -0
- reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
- reversecore_mcp/tools/radare2/r2_session.py +376 -0
- reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
- reversecore_mcp/tools/report/__init__.py +4 -0
- reversecore_mcp/tools/report/email.py +82 -0
- reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
- reversecore_mcp/tools/report/report_tools.py +1076 -0
- reversecore_mcp/tools/report/session.py +194 -0
- reversecore_mcp/tools/report_tools.py +11 -0
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Radare2 Connection Pool
|
|
3
|
+
|
|
4
|
+
This module provides a connection pool for managing persistent r2pipe instances.
|
|
5
|
+
It helps reduce the overhead of spawning new radare2 processes for every command.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Configurable pool size via REVERSECORE_R2_POOL_SIZE
|
|
9
|
+
- Configurable timeout via REVERSECORE_R2_POOL_TIMEOUT
|
|
10
|
+
- LRU eviction policy to limit memory usage
|
|
11
|
+
- Thread-safe and async-safe execution
|
|
12
|
+
- Automatic reconnection on failure
|
|
13
|
+
- Health checking for stale connections
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import threading
|
|
18
|
+
import time
|
|
19
|
+
from collections import OrderedDict
|
|
20
|
+
from collections.abc import AsyncGenerator, Generator
|
|
21
|
+
from contextlib import asynccontextmanager, contextmanager
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
import r2pipe
|
|
26
|
+
except ImportError:
|
|
27
|
+
r2pipe = None
|
|
28
|
+
|
|
29
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
30
|
+
|
|
31
|
+
logger = get_logger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class R2PoolTimeout(Exception):
|
|
35
|
+
"""Raised when connection acquisition times out."""
|
|
36
|
+
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class R2ConnectionPool:
|
|
41
|
+
"""
|
|
42
|
+
Manages a pool of persistent r2pipe connections.
|
|
43
|
+
|
|
44
|
+
Features:
|
|
45
|
+
- Configurable pool size (default from config or 10)
|
|
46
|
+
- Configurable acquisition timeout
|
|
47
|
+
- LRU eviction policy to limit memory usage
|
|
48
|
+
- Thread-safe and async-safe execution
|
|
49
|
+
- Automatic reconnection on failure
|
|
50
|
+
- Health checking for stale connections
|
|
51
|
+
|
|
52
|
+
Configuration:
|
|
53
|
+
Pool size and timeout can be configured via environment variables:
|
|
54
|
+
- REVERSECORE_R2_POOL_SIZE: Maximum number of connections (default: 3)
|
|
55
|
+
- REVERSECORE_R2_POOL_TIMEOUT: Acquisition timeout in seconds (default: 30)
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
max_connections: int | None = None,
|
|
61
|
+
acquisition_timeout: int | None = None,
|
|
62
|
+
health_check_interval: int = 60,
|
|
63
|
+
):
|
|
64
|
+
"""Initialize the connection pool.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
max_connections: Maximum number of connections to maintain.
|
|
68
|
+
If None, uses config value or default of 10.
|
|
69
|
+
acquisition_timeout: Timeout in seconds for acquiring a connection.
|
|
70
|
+
If None, uses config value or default of 30.
|
|
71
|
+
health_check_interval: Interval in seconds for health checks.
|
|
72
|
+
"""
|
|
73
|
+
# Lazy-load config to avoid circular imports
|
|
74
|
+
self._max_connections = max_connections
|
|
75
|
+
self._acquisition_timeout = acquisition_timeout
|
|
76
|
+
self._health_check_interval = health_check_interval
|
|
77
|
+
self._config_loaded = False
|
|
78
|
+
|
|
79
|
+
self._pool: OrderedDict[str, Any] = OrderedDict()
|
|
80
|
+
self._lock = threading.RLock()
|
|
81
|
+
self._async_lock: asyncio.Lock | None = None
|
|
82
|
+
self._async_lock_init_lock = threading.Lock()
|
|
83
|
+
self._last_access: dict[str, float] = {}
|
|
84
|
+
self._analyzed_files: set[str] = set()
|
|
85
|
+
self._last_health_check: dict[str, float] = {}
|
|
86
|
+
|
|
87
|
+
# Semaphore for limiting concurrent connections
|
|
88
|
+
self._connection_semaphore: threading.Semaphore | None = None
|
|
89
|
+
self._async_semaphore: asyncio.Semaphore | None = None
|
|
90
|
+
|
|
91
|
+
# Statistics
|
|
92
|
+
self._stats = {
|
|
93
|
+
"connections_created": 0,
|
|
94
|
+
"connections_evicted": 0,
|
|
95
|
+
"cache_hits": 0,
|
|
96
|
+
"cache_misses": 0,
|
|
97
|
+
"reconnections": 0,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
def _load_config(self) -> None:
|
|
101
|
+
"""Load configuration values lazily."""
|
|
102
|
+
if self._config_loaded:
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
from reversecore_mcp.core.config import get_config
|
|
107
|
+
|
|
108
|
+
config = get_config()
|
|
109
|
+
if self._max_connections is None:
|
|
110
|
+
self._max_connections = config.r2_pool_size
|
|
111
|
+
if self._acquisition_timeout is None:
|
|
112
|
+
self._acquisition_timeout = config.r2_pool_timeout
|
|
113
|
+
except Exception:
|
|
114
|
+
# Fallback to defaults if config is not available
|
|
115
|
+
if self._max_connections is None:
|
|
116
|
+
self._max_connections = 10
|
|
117
|
+
if self._acquisition_timeout is None:
|
|
118
|
+
self._acquisition_timeout = 30
|
|
119
|
+
|
|
120
|
+
self._config_loaded = True
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def max_connections(self) -> int:
|
|
124
|
+
"""Get maximum connections, loading from config if needed."""
|
|
125
|
+
self._load_config()
|
|
126
|
+
return self._max_connections or 10
|
|
127
|
+
|
|
128
|
+
@property
|
|
129
|
+
def acquisition_timeout(self) -> int:
|
|
130
|
+
"""Get acquisition timeout, loading from config if needed."""
|
|
131
|
+
self._load_config()
|
|
132
|
+
return self._acquisition_timeout or 30
|
|
133
|
+
|
|
134
|
+
def _get_connection_semaphore(self) -> threading.Semaphore:
|
|
135
|
+
"""Get or create a connection semaphore for rate limiting."""
|
|
136
|
+
if self._connection_semaphore is None:
|
|
137
|
+
self._connection_semaphore = threading.Semaphore(self.max_connections)
|
|
138
|
+
return self._connection_semaphore
|
|
139
|
+
|
|
140
|
+
def _get_async_semaphore(self) -> asyncio.Semaphore:
|
|
141
|
+
"""Get or create an async semaphore for rate limiting."""
|
|
142
|
+
if self._async_semaphore is None:
|
|
143
|
+
self._async_semaphore = asyncio.Semaphore(self.max_connections)
|
|
144
|
+
return self._async_semaphore
|
|
145
|
+
|
|
146
|
+
def _get_async_lock(self) -> asyncio.Lock:
|
|
147
|
+
"""Get or create an async lock for thread-safe async operations.
|
|
148
|
+
|
|
149
|
+
The lock is lazily initialized to ensure it's created in the correct
|
|
150
|
+
event loop context. Uses double-checked locking pattern for thread-safety.
|
|
151
|
+
"""
|
|
152
|
+
if self._async_lock is not None:
|
|
153
|
+
return self._async_lock
|
|
154
|
+
|
|
155
|
+
with self._async_lock_init_lock:
|
|
156
|
+
if self._async_lock is None:
|
|
157
|
+
self._async_lock = asyncio.Lock()
|
|
158
|
+
return self._async_lock
|
|
159
|
+
|
|
160
|
+
def _is_connection_healthy(self, file_path: str, r2: Any) -> bool:
|
|
161
|
+
"""Check if a connection is still healthy."""
|
|
162
|
+
try:
|
|
163
|
+
# Quick health check: try to get current seek position
|
|
164
|
+
result = r2.cmd("s")
|
|
165
|
+
return result is not None
|
|
166
|
+
except Exception:
|
|
167
|
+
return False
|
|
168
|
+
|
|
169
|
+
def _maybe_health_check(self, file_path: str, r2: Any) -> bool:
|
|
170
|
+
"""Perform health check if enough time has passed."""
|
|
171
|
+
now = time.time()
|
|
172
|
+
last_check = self._last_health_check.get(file_path, 0)
|
|
173
|
+
|
|
174
|
+
if now - last_check > self._health_check_interval:
|
|
175
|
+
self._last_health_check[file_path] = now
|
|
176
|
+
return self._is_connection_healthy(file_path, r2)
|
|
177
|
+
return True # Assume healthy if recently checked
|
|
178
|
+
|
|
179
|
+
def get_connection(self, file_path: str) -> Any:
|
|
180
|
+
"""Get or create an r2pipe connection for the given file."""
|
|
181
|
+
if r2pipe is None:
|
|
182
|
+
raise ImportError("r2pipe is not installed")
|
|
183
|
+
|
|
184
|
+
with self._lock:
|
|
185
|
+
self._last_access[file_path] = time.time()
|
|
186
|
+
|
|
187
|
+
if file_path in self._pool:
|
|
188
|
+
r2 = self._pool[file_path]
|
|
189
|
+
|
|
190
|
+
# Health check
|
|
191
|
+
if not self._maybe_health_check(file_path, r2):
|
|
192
|
+
logger.warning(f"Stale connection for {file_path}, reconnecting")
|
|
193
|
+
self._remove_connection_unsafe(file_path)
|
|
194
|
+
else:
|
|
195
|
+
self._pool.move_to_end(file_path)
|
|
196
|
+
self._stats["cache_hits"] += 1
|
|
197
|
+
return r2
|
|
198
|
+
|
|
199
|
+
self._stats["cache_misses"] += 1
|
|
200
|
+
|
|
201
|
+
# Evict if full
|
|
202
|
+
while len(self._pool) >= self.max_connections:
|
|
203
|
+
oldest_file, oldest_r2 = self._pool.popitem(last=False)
|
|
204
|
+
logger.debug(f"Evicting r2 connection for {oldest_file}")
|
|
205
|
+
self._stats["connections_evicted"] += 1
|
|
206
|
+
try:
|
|
207
|
+
oldest_r2.quit()
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.warning(f"Error closing r2 connection: {e}")
|
|
210
|
+
self._last_access.pop(oldest_file, None)
|
|
211
|
+
self._last_health_check.pop(oldest_file, None)
|
|
212
|
+
self._analyzed_files.discard(oldest_file)
|
|
213
|
+
|
|
214
|
+
# Create new connection
|
|
215
|
+
logger.info(f"Opening new r2 connection for {file_path}")
|
|
216
|
+
try:
|
|
217
|
+
r2 = r2pipe.open(file_path, flags=["-2"])
|
|
218
|
+
self._pool[file_path] = r2
|
|
219
|
+
self._last_health_check[file_path] = time.time()
|
|
220
|
+
self._stats["connections_created"] += 1
|
|
221
|
+
return r2
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error(f"Failed to open r2 connection for {file_path}: {e}")
|
|
224
|
+
raise
|
|
225
|
+
|
|
226
|
+
def _remove_connection_unsafe(self, file_path: str) -> None:
|
|
227
|
+
"""Remove a connection without locking (caller must hold lock)."""
|
|
228
|
+
if file_path in self._pool:
|
|
229
|
+
try:
|
|
230
|
+
self._pool[file_path].quit()
|
|
231
|
+
except Exception:
|
|
232
|
+
pass
|
|
233
|
+
del self._pool[file_path]
|
|
234
|
+
self._last_access.pop(file_path, None)
|
|
235
|
+
self._last_health_check.pop(file_path, None)
|
|
236
|
+
self._analyzed_files.discard(file_path)
|
|
237
|
+
|
|
238
|
+
def execute(self, file_path: str, command: str) -> str:
|
|
239
|
+
"""Execute a command on the r2 connection for the given file."""
|
|
240
|
+
with self._lock:
|
|
241
|
+
try:
|
|
242
|
+
r2 = self.get_connection(file_path)
|
|
243
|
+
return r2.cmd(command)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
logger.warning(f"r2 command failed, retrying connection: {e}")
|
|
246
|
+
self._remove_connection_unsafe(file_path)
|
|
247
|
+
self._stats["reconnections"] += 1
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
r2 = self.get_connection(file_path)
|
|
251
|
+
return r2.cmd(command)
|
|
252
|
+
except Exception as retry_error:
|
|
253
|
+
logger.error(f"Retry failed: {retry_error}")
|
|
254
|
+
raise
|
|
255
|
+
|
|
256
|
+
async def execute_async(self, file_path: str, command: str) -> str:
|
|
257
|
+
"""Execute a command asynchronously with proper async lock."""
|
|
258
|
+
async with self._get_async_lock():
|
|
259
|
+
return await asyncio.to_thread(self._execute_unsafe, file_path, command)
|
|
260
|
+
|
|
261
|
+
def _execute_unsafe(self, file_path: str, command: str) -> str:
|
|
262
|
+
"""Execute with thread lock for safe asyncio.to_thread usage.
|
|
263
|
+
|
|
264
|
+
Note: Despite the name 'unsafe', this method now acquires self._lock
|
|
265
|
+
to ensure thread-safety when called from asyncio.to_thread().
|
|
266
|
+
The async lock in execute_async() serializes async callers,
|
|
267
|
+
while this thread lock protects against concurrent sync callers.
|
|
268
|
+
"""
|
|
269
|
+
with self._lock: # Thread lock for safe pool access
|
|
270
|
+
try:
|
|
271
|
+
r2 = self._get_connection_unsafe(file_path)
|
|
272
|
+
return r2.cmd(command)
|
|
273
|
+
except Exception as e:
|
|
274
|
+
logger.warning(f"r2 command failed, retrying connection: {e}")
|
|
275
|
+
self._remove_connection_unsafe(file_path)
|
|
276
|
+
self._stats["reconnections"] += 1
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
r2 = self._get_connection_unsafe(file_path)
|
|
280
|
+
return r2.cmd(command)
|
|
281
|
+
except Exception as retry_error:
|
|
282
|
+
logger.error(f"Retry failed: {retry_error}")
|
|
283
|
+
raise
|
|
284
|
+
|
|
285
|
+
def _get_connection_unsafe(self, file_path: str) -> Any:
|
|
286
|
+
"""Get or create connection without locking (caller must hold lock)."""
|
|
287
|
+
if r2pipe is None:
|
|
288
|
+
raise ImportError("r2pipe is not installed")
|
|
289
|
+
|
|
290
|
+
self._last_access[file_path] = time.time()
|
|
291
|
+
|
|
292
|
+
if file_path in self._pool:
|
|
293
|
+
r2 = self._pool[file_path]
|
|
294
|
+
|
|
295
|
+
if not self._maybe_health_check(file_path, r2):
|
|
296
|
+
logger.warning(f"Stale connection for {file_path}, reconnecting")
|
|
297
|
+
self._remove_connection_unsafe(file_path)
|
|
298
|
+
else:
|
|
299
|
+
self._pool.move_to_end(file_path)
|
|
300
|
+
self._stats["cache_hits"] += 1
|
|
301
|
+
return r2
|
|
302
|
+
|
|
303
|
+
self._stats["cache_misses"] += 1
|
|
304
|
+
|
|
305
|
+
# Evict if full
|
|
306
|
+
while len(self._pool) >= self.max_connections:
|
|
307
|
+
oldest_file, oldest_r2 = self._pool.popitem(last=False)
|
|
308
|
+
logger.debug(f"Evicting r2 connection for {oldest_file}")
|
|
309
|
+
self._stats["connections_evicted"] += 1
|
|
310
|
+
try:
|
|
311
|
+
oldest_r2.quit()
|
|
312
|
+
except Exception as e:
|
|
313
|
+
logger.warning(f"Error closing r2 connection: {e}")
|
|
314
|
+
self._last_access.pop(oldest_file, None)
|
|
315
|
+
self._last_health_check.pop(oldest_file, None)
|
|
316
|
+
self._analyzed_files.discard(oldest_file)
|
|
317
|
+
|
|
318
|
+
# Create new connection
|
|
319
|
+
logger.info(f"Opening new r2 connection for {file_path}")
|
|
320
|
+
try:
|
|
321
|
+
r2 = r2pipe.open(file_path, flags=["-2"])
|
|
322
|
+
self._pool[file_path] = r2
|
|
323
|
+
self._last_health_check[file_path] = time.time()
|
|
324
|
+
self._stats["connections_created"] += 1
|
|
325
|
+
return r2
|
|
326
|
+
except Exception as e:
|
|
327
|
+
logger.error(f"Failed to open r2 connection for {file_path}: {e}")
|
|
328
|
+
raise
|
|
329
|
+
|
|
330
|
+
@asynccontextmanager
|
|
331
|
+
async def async_session(self, file_path: str) -> AsyncGenerator[Any, None]:
|
|
332
|
+
"""Async context manager for r2 connection.
|
|
333
|
+
|
|
334
|
+
Usage:
|
|
335
|
+
async with r2_pool.async_session(path) as r2:
|
|
336
|
+
result = r2.cmd('aaa')
|
|
337
|
+
"""
|
|
338
|
+
async with self._get_async_lock():
|
|
339
|
+
r2 = await asyncio.to_thread(self._get_connection_unsafe, file_path)
|
|
340
|
+
try:
|
|
341
|
+
yield r2
|
|
342
|
+
except Exception as e:
|
|
343
|
+
logger.warning(f"Error in async session: {e}")
|
|
344
|
+
# Invalidate connection on error
|
|
345
|
+
if file_path in self._pool:
|
|
346
|
+
del self._pool[file_path]
|
|
347
|
+
raise
|
|
348
|
+
|
|
349
|
+
@contextmanager
|
|
350
|
+
def sync_session(self, file_path: str) -> Generator[Any, None, None]:
|
|
351
|
+
"""Sync context manager for r2 connection.
|
|
352
|
+
|
|
353
|
+
Usage:
|
|
354
|
+
with r2_pool.sync_session(path) as r2:
|
|
355
|
+
result = r2.cmd('aaa')
|
|
356
|
+
"""
|
|
357
|
+
with self._lock:
|
|
358
|
+
r2 = self._get_connection_unsafe(file_path)
|
|
359
|
+
try:
|
|
360
|
+
yield r2
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.warning(f"Error in sync session: {e}")
|
|
363
|
+
self._remove_connection_unsafe(file_path)
|
|
364
|
+
raise
|
|
365
|
+
|
|
366
|
+
def close_all(self):
|
|
367
|
+
"""Close all connections in the pool."""
|
|
368
|
+
with self._lock:
|
|
369
|
+
for _file_path, r2 in self._pool.items():
|
|
370
|
+
try:
|
|
371
|
+
r2.quit()
|
|
372
|
+
except Exception:
|
|
373
|
+
pass
|
|
374
|
+
self._pool.clear()
|
|
375
|
+
self._last_access.clear()
|
|
376
|
+
self._last_health_check.clear()
|
|
377
|
+
self._analyzed_files.clear()
|
|
378
|
+
|
|
379
|
+
def is_analyzed(self, file_path: str) -> bool:
|
|
380
|
+
"""Check if the file has been analyzed."""
|
|
381
|
+
with self._lock:
|
|
382
|
+
return file_path in self._analyzed_files
|
|
383
|
+
|
|
384
|
+
def mark_analyzed(self, file_path: str):
|
|
385
|
+
"""Mark the file as analyzed."""
|
|
386
|
+
with self._lock:
|
|
387
|
+
if file_path in self._pool:
|
|
388
|
+
self._analyzed_files.add(file_path)
|
|
389
|
+
|
|
390
|
+
def get_stats(self) -> dict[str, Any]:
|
|
391
|
+
"""Get pool statistics."""
|
|
392
|
+
with self._lock:
|
|
393
|
+
return {
|
|
394
|
+
**self._stats,
|
|
395
|
+
"current_connections": len(self._pool),
|
|
396
|
+
"max_connections": self.max_connections,
|
|
397
|
+
"analyzed_files": len(self._analyzed_files),
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
# Global instance (for backward compatibility)
|
|
402
|
+
# New code should use: from reversecore_mcp.core.container import get_r2_pool
|
|
403
|
+
r2_pool = R2ConnectionPool()
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Evidence-based Report Generator for Malware Analysis.
|
|
3
|
+
|
|
4
|
+
This module generates professional SOC/IR reports with evidence tracking,
|
|
5
|
+
confidence levels, and clear differentiation between observed facts and inferences.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
|
|
12
|
+
from reversecore_mcp.core.evidence import (
|
|
13
|
+
EvidenceLevel,
|
|
14
|
+
MITREConfidence,
|
|
15
|
+
Finding,
|
|
16
|
+
Evidence,
|
|
17
|
+
MITRETechnique,
|
|
18
|
+
AnalysisMetadata,
|
|
19
|
+
)
|
|
20
|
+
from reversecore_mcp.core import json_utils as json
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class EvidenceBasedReport:
|
|
25
|
+
"""Evidence-based malware analysis report."""
|
|
26
|
+
|
|
27
|
+
metadata: AnalysisMetadata
|
|
28
|
+
executive_summary: str = ""
|
|
29
|
+
malware_family: str = "Unknown"
|
|
30
|
+
family_confidence: float = 0.0
|
|
31
|
+
family_evidence: list[str] = field(default_factory=list)
|
|
32
|
+
|
|
33
|
+
findings: list[Finding] = field(default_factory=list)
|
|
34
|
+
mitre_techniques: list[MITRETechnique] = field(default_factory=list)
|
|
35
|
+
iocs: dict[str, list[str]] = field(default_factory=dict)
|
|
36
|
+
|
|
37
|
+
recommendations: list[str] = field(default_factory=list)
|
|
38
|
+
yara_rule: Optional[str] = None
|
|
39
|
+
|
|
40
|
+
def add_finding(self, finding: Finding) -> None:
|
|
41
|
+
"""Add a finding to the report."""
|
|
42
|
+
self.findings.append(finding)
|
|
43
|
+
|
|
44
|
+
def add_mitre(self, technique: MITRETechnique) -> None:
|
|
45
|
+
"""Add a MITRE technique mapping."""
|
|
46
|
+
self.mitre_techniques.append(technique)
|
|
47
|
+
|
|
48
|
+
def add_ioc(self, ioc_type: str, value: str) -> None:
|
|
49
|
+
"""Add an IOC."""
|
|
50
|
+
if ioc_type not in self.iocs:
|
|
51
|
+
self.iocs[ioc_type] = []
|
|
52
|
+
if value not in self.iocs[ioc_type]:
|
|
53
|
+
self.iocs[ioc_type].append(value)
|
|
54
|
+
|
|
55
|
+
def set_family(self, family: str, confidence: float, evidence: list[str]) -> None:
|
|
56
|
+
"""Set malware family with confidence and evidence."""
|
|
57
|
+
self.malware_family = family
|
|
58
|
+
self.family_confidence = confidence
|
|
59
|
+
self.family_evidence = evidence
|
|
60
|
+
|
|
61
|
+
def finalize(self) -> None:
|
|
62
|
+
"""Finalize the report (set end time)."""
|
|
63
|
+
self.metadata.end_time = datetime.now()
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def observed_count(self) -> int:
|
|
67
|
+
return sum(1 for f in self.findings if f.level == EvidenceLevel.OBSERVED)
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def inferred_count(self) -> int:
|
|
71
|
+
return sum(1 for f in self.findings if f.level == EvidenceLevel.INFERRED)
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def possible_count(self) -> int:
|
|
75
|
+
return sum(1 for f in self.findings if f.level == EvidenceLevel.POSSIBLE)
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def overall_confidence(self) -> float:
|
|
79
|
+
"""Calculate overall report confidence."""
|
|
80
|
+
if not self.findings:
|
|
81
|
+
return 0.0
|
|
82
|
+
return sum(f.confidence for f in self.findings) / len(self.findings)
|
|
83
|
+
|
|
84
|
+
def generate_markdown(self) -> str:
|
|
85
|
+
"""Generate professional markdown report."""
|
|
86
|
+
lines = []
|
|
87
|
+
|
|
88
|
+
# Header
|
|
89
|
+
lines.append(f"# 🔬 Malware Analysis Report")
|
|
90
|
+
lines.append("")
|
|
91
|
+
lines.append(f"**Sample**: {self.metadata.sample_name}")
|
|
92
|
+
lines.append(f"**SHA256**: `{self.metadata.sample_hash}`")
|
|
93
|
+
lines.append(f"**Analysis Date**: {self.metadata.start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
94
|
+
lines.append(f"**Duration**: {self.metadata.duration_formatted}")
|
|
95
|
+
lines.append(f"**Analyst**: {self.metadata.analyst}")
|
|
96
|
+
lines.append("")
|
|
97
|
+
|
|
98
|
+
# Confidence Summary Box
|
|
99
|
+
lines.append("---")
|
|
100
|
+
lines.append("")
|
|
101
|
+
lines.append("## 📊 Confidence Summary")
|
|
102
|
+
lines.append("")
|
|
103
|
+
lines.append("| Metric | Value |")
|
|
104
|
+
lines.append("|--------|-------|")
|
|
105
|
+
lines.append(f"| **Overall Confidence** | {self.overall_confidence:.0%} |")
|
|
106
|
+
lines.append(f"| 🔍 Observed Findings | {self.observed_count} |")
|
|
107
|
+
lines.append(f"| 🔎 Inferred Findings | {self.inferred_count} |")
|
|
108
|
+
lines.append(f"| ❓ Possible Findings | {self.possible_count} |")
|
|
109
|
+
lines.append("")
|
|
110
|
+
|
|
111
|
+
# Malware Family Identification
|
|
112
|
+
lines.append("---")
|
|
113
|
+
lines.append("")
|
|
114
|
+
lines.append("## 🦠 Malware Identification")
|
|
115
|
+
lines.append("")
|
|
116
|
+
|
|
117
|
+
if self.family_confidence >= 0.8:
|
|
118
|
+
verdict = "✅ **CONFIRMED**"
|
|
119
|
+
elif self.family_confidence >= 0.6:
|
|
120
|
+
verdict = "🟡 **LIKELY**"
|
|
121
|
+
elif self.family_confidence >= 0.4:
|
|
122
|
+
verdict = "🟠 **POSSIBLE**"
|
|
123
|
+
else:
|
|
124
|
+
verdict = "❓ **UNCERTAIN**"
|
|
125
|
+
|
|
126
|
+
lines.append(f"**Family**: {self.malware_family}")
|
|
127
|
+
lines.append(f"**Confidence**: {self.family_confidence:.0%} {verdict}")
|
|
128
|
+
lines.append("")
|
|
129
|
+
|
|
130
|
+
if self.family_evidence:
|
|
131
|
+
lines.append("**Identification Evidence:**")
|
|
132
|
+
for ev in self.family_evidence:
|
|
133
|
+
lines.append(f" - {ev}")
|
|
134
|
+
lines.append("")
|
|
135
|
+
|
|
136
|
+
# Executive Summary
|
|
137
|
+
if self.executive_summary:
|
|
138
|
+
lines.append("---")
|
|
139
|
+
lines.append("")
|
|
140
|
+
lines.append("## 📋 Executive Summary")
|
|
141
|
+
lines.append("")
|
|
142
|
+
lines.append(self.executive_summary)
|
|
143
|
+
lines.append("")
|
|
144
|
+
|
|
145
|
+
# Findings by Evidence Level
|
|
146
|
+
lines.append("---")
|
|
147
|
+
lines.append("")
|
|
148
|
+
lines.append("## 🔍 Analysis Findings")
|
|
149
|
+
lines.append("")
|
|
150
|
+
lines.append("> **Legend**: 🔍 Observed (verified) | 🔎 Inferred (high confidence) | ❓ Possible (needs verification)")
|
|
151
|
+
lines.append("")
|
|
152
|
+
|
|
153
|
+
# Group findings by level
|
|
154
|
+
for level in [EvidenceLevel.OBSERVED, EvidenceLevel.INFERRED, EvidenceLevel.POSSIBLE]:
|
|
155
|
+
level_findings = [f for f in self.findings if f.level == level]
|
|
156
|
+
if level_findings:
|
|
157
|
+
lines.append(f"### {level.symbol} {level.value.upper()} Findings ({len(level_findings)})")
|
|
158
|
+
lines.append("")
|
|
159
|
+
for finding in level_findings:
|
|
160
|
+
lines.append(finding.format_markdown())
|
|
161
|
+
lines.append("")
|
|
162
|
+
|
|
163
|
+
# MITRE ATT&CK Mapping
|
|
164
|
+
if self.mitre_techniques:
|
|
165
|
+
lines.append("---")
|
|
166
|
+
lines.append("")
|
|
167
|
+
lines.append("## ⚔️ MITRE ATT&CK Mapping")
|
|
168
|
+
lines.append("")
|
|
169
|
+
lines.append("> **Confidence Levels**: ✅ Confirmed | 🟢 High | 🟡 Medium | 🔴 Low")
|
|
170
|
+
lines.append("")
|
|
171
|
+
lines.append("| Technique ID | Name | Tactic | Confidence |")
|
|
172
|
+
lines.append("|-------------|------|--------|------------|")
|
|
173
|
+
for tech in self.mitre_techniques:
|
|
174
|
+
lines.append(tech.format_markdown_row())
|
|
175
|
+
lines.append("")
|
|
176
|
+
|
|
177
|
+
# IOCs
|
|
178
|
+
if self.iocs:
|
|
179
|
+
lines.append("---")
|
|
180
|
+
lines.append("")
|
|
181
|
+
lines.append("## 🎯 Indicators of Compromise (IOCs)")
|
|
182
|
+
lines.append("")
|
|
183
|
+
for ioc_type, values in self.iocs.items():
|
|
184
|
+
lines.append(f"### {ioc_type.title()} ({len(values)})")
|
|
185
|
+
lines.append("")
|
|
186
|
+
for val in values:
|
|
187
|
+
lines.append(f"- `{val}`")
|
|
188
|
+
lines.append("")
|
|
189
|
+
|
|
190
|
+
# YARA Rule
|
|
191
|
+
if self.yara_rule:
|
|
192
|
+
lines.append("---")
|
|
193
|
+
lines.append("")
|
|
194
|
+
lines.append("## 📝 Detection Rule (YARA)")
|
|
195
|
+
lines.append("")
|
|
196
|
+
lines.append("```yara")
|
|
197
|
+
lines.append(self.yara_rule)
|
|
198
|
+
lines.append("```")
|
|
199
|
+
lines.append("")
|
|
200
|
+
|
|
201
|
+
# Recommendations
|
|
202
|
+
if self.recommendations:
|
|
203
|
+
lines.append("---")
|
|
204
|
+
lines.append("")
|
|
205
|
+
lines.append("## 🛡️ Recommendations")
|
|
206
|
+
lines.append("")
|
|
207
|
+
for i, rec in enumerate(self.recommendations, 1):
|
|
208
|
+
lines.append(f"{i}. {rec}")
|
|
209
|
+
lines.append("")
|
|
210
|
+
|
|
211
|
+
# Footer
|
|
212
|
+
lines.append("---")
|
|
213
|
+
lines.append("")
|
|
214
|
+
lines.append("## 📎 Report Metadata")
|
|
215
|
+
lines.append("")
|
|
216
|
+
lines.append(f"- **Session ID**: {self.metadata.session_id}")
|
|
217
|
+
lines.append(f"- **Start Time**: {self.metadata.start_time.isoformat()}")
|
|
218
|
+
if self.metadata.end_time:
|
|
219
|
+
lines.append(f"- **End Time**: {self.metadata.end_time.isoformat()}")
|
|
220
|
+
lines.append(f"- **Duration**: {self.metadata.duration_formatted}")
|
|
221
|
+
lines.append(f"- **Tools Used**: {', '.join(self.metadata.tools_used) if self.metadata.tools_used else 'N/A'}")
|
|
222
|
+
lines.append("")
|
|
223
|
+
lines.append("---")
|
|
224
|
+
lines.append(f"*Generated by Reversecore MCP Server*")
|
|
225
|
+
|
|
226
|
+
return "\n".join(lines)
|
|
227
|
+
|
|
228
|
+
def to_dict(self) -> dict[str, Any]:
|
|
229
|
+
"""Convert report to dictionary for JSON export."""
|
|
230
|
+
return {
|
|
231
|
+
"metadata": self.metadata.to_dict(),
|
|
232
|
+
"executive_summary": self.executive_summary,
|
|
233
|
+
"malware_family": self.malware_family,
|
|
234
|
+
"family_confidence": self.family_confidence,
|
|
235
|
+
"family_evidence": self.family_evidence,
|
|
236
|
+
"confidence_summary": {
|
|
237
|
+
"overall": round(self.overall_confidence, 2),
|
|
238
|
+
"observed_count": self.observed_count,
|
|
239
|
+
"inferred_count": self.inferred_count,
|
|
240
|
+
"possible_count": self.possible_count,
|
|
241
|
+
},
|
|
242
|
+
"findings": [f.to_dict() for f in self.findings],
|
|
243
|
+
"mitre_techniques": [t.to_dict() for t in self.mitre_techniques],
|
|
244
|
+
"iocs": self.iocs,
|
|
245
|
+
"recommendations": self.recommendations,
|
|
246
|
+
"yara_rule": self.yara_rule,
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
def to_json(self, indent: int = 2) -> str:
|
|
250
|
+
"""Export report as JSON."""
|
|
251
|
+
return json.dumps(self.to_dict(), indent=indent)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def create_report(
|
|
255
|
+
session_id: str,
|
|
256
|
+
sample_name: str,
|
|
257
|
+
sample_hash: str,
|
|
258
|
+
analyst: str = "Reversecore MCP",
|
|
259
|
+
) -> EvidenceBasedReport:
|
|
260
|
+
"""Create a new evidence-based report."""
|
|
261
|
+
metadata = AnalysisMetadata(
|
|
262
|
+
session_id=session_id,
|
|
263
|
+
sample_name=sample_name,
|
|
264
|
+
sample_hash=sample_hash,
|
|
265
|
+
start_time=datetime.now(),
|
|
266
|
+
analyst=analyst,
|
|
267
|
+
)
|
|
268
|
+
return EvidenceBasedReport(metadata=metadata)
|