iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
- reversecore_mcp/__init__.py +9 -0
- reversecore_mcp/core/__init__.py +78 -0
- reversecore_mcp/core/audit.py +101 -0
- reversecore_mcp/core/binary_cache.py +138 -0
- reversecore_mcp/core/command_spec.py +357 -0
- reversecore_mcp/core/config.py +432 -0
- reversecore_mcp/core/container.py +288 -0
- reversecore_mcp/core/decorators.py +152 -0
- reversecore_mcp/core/error_formatting.py +93 -0
- reversecore_mcp/core/error_handling.py +142 -0
- reversecore_mcp/core/evidence.py +229 -0
- reversecore_mcp/core/exceptions.py +296 -0
- reversecore_mcp/core/execution.py +240 -0
- reversecore_mcp/core/ghidra.py +642 -0
- reversecore_mcp/core/ghidra_helper.py +481 -0
- reversecore_mcp/core/ghidra_manager.py +234 -0
- reversecore_mcp/core/json_utils.py +131 -0
- reversecore_mcp/core/loader.py +73 -0
- reversecore_mcp/core/logging_config.py +206 -0
- reversecore_mcp/core/memory.py +721 -0
- reversecore_mcp/core/metrics.py +198 -0
- reversecore_mcp/core/mitre_mapper.py +365 -0
- reversecore_mcp/core/plugin.py +45 -0
- reversecore_mcp/core/r2_helpers.py +404 -0
- reversecore_mcp/core/r2_pool.py +403 -0
- reversecore_mcp/core/report_generator.py +268 -0
- reversecore_mcp/core/resilience.py +252 -0
- reversecore_mcp/core/resource_manager.py +169 -0
- reversecore_mcp/core/result.py +132 -0
- reversecore_mcp/core/security.py +213 -0
- reversecore_mcp/core/validators.py +238 -0
- reversecore_mcp/dashboard/__init__.py +221 -0
- reversecore_mcp/prompts/__init__.py +56 -0
- reversecore_mcp/prompts/common.py +24 -0
- reversecore_mcp/prompts/game.py +280 -0
- reversecore_mcp/prompts/malware.py +1219 -0
- reversecore_mcp/prompts/report.py +150 -0
- reversecore_mcp/prompts/security.py +136 -0
- reversecore_mcp/resources.py +329 -0
- reversecore_mcp/server.py +727 -0
- reversecore_mcp/tools/__init__.py +49 -0
- reversecore_mcp/tools/analysis/__init__.py +74 -0
- reversecore_mcp/tools/analysis/capa_tools.py +215 -0
- reversecore_mcp/tools/analysis/die_tools.py +180 -0
- reversecore_mcp/tools/analysis/diff_tools.py +643 -0
- reversecore_mcp/tools/analysis/lief_tools.py +272 -0
- reversecore_mcp/tools/analysis/signature_tools.py +591 -0
- reversecore_mcp/tools/analysis/static_analysis.py +479 -0
- reversecore_mcp/tools/common/__init__.py +58 -0
- reversecore_mcp/tools/common/file_operations.py +352 -0
- reversecore_mcp/tools/common/memory_tools.py +516 -0
- reversecore_mcp/tools/common/patch_explainer.py +230 -0
- reversecore_mcp/tools/common/server_tools.py +115 -0
- reversecore_mcp/tools/ghidra/__init__.py +19 -0
- reversecore_mcp/tools/ghidra/decompilation.py +975 -0
- reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
- reversecore_mcp/tools/malware/__init__.py +61 -0
- reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
- reversecore_mcp/tools/malware/dormant_detector.py +756 -0
- reversecore_mcp/tools/malware/ioc_tools.py +228 -0
- reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
- reversecore_mcp/tools/malware/yara_tools.py +214 -0
- reversecore_mcp/tools/patch_explainer.py +19 -0
- reversecore_mcp/tools/radare2/__init__.py +13 -0
- reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
- reversecore_mcp/tools/radare2/r2_session.py +376 -0
- reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
- reversecore_mcp/tools/report/__init__.py +4 -0
- reversecore_mcp/tools/report/email.py +82 -0
- reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
- reversecore_mcp/tools/report/report_tools.py +1076 -0
- reversecore_mcp/tools/report/session.py +194 -0
- reversecore_mcp/tools/report_tools.py +11 -0
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Resilience patterns for Reversecore_MCP.
|
|
3
|
+
|
|
4
|
+
This module implements the Circuit Breaker pattern to prevent cascading failures
|
|
5
|
+
when external tools (like Radare2 or Ghidra) become unstable.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import functools
|
|
9
|
+
import inspect
|
|
10
|
+
import threading
|
|
11
|
+
import time
|
|
12
|
+
from collections.abc import Callable
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from typing import TypeVar
|
|
15
|
+
|
|
16
|
+
from reversecore_mcp.core.exceptions import ToolExecutionError
|
|
17
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
F = TypeVar("F", bound=Callable)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class CircuitState(Enum):
|
|
25
|
+
CLOSED = "CLOSED" # Normal operation
|
|
26
|
+
OPEN = "OPEN" # Failing, requests blocked
|
|
27
|
+
HALF_OPEN = "HALF_OPEN" # Testing recovery
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class CircuitBreaker:
|
|
31
|
+
"""
|
|
32
|
+
Circuit Breaker implementation with thread-safe state transitions.
|
|
33
|
+
|
|
34
|
+
If a tool fails 'failure_threshold' times within a window, the circuit opens
|
|
35
|
+
and blocks requests for 'recovery_timeout' seconds.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, name: str, failure_threshold: int = 5, recovery_timeout: int = 60):
|
|
39
|
+
self.name = name
|
|
40
|
+
self.failure_threshold = failure_threshold
|
|
41
|
+
self.recovery_timeout = recovery_timeout
|
|
42
|
+
|
|
43
|
+
self.state = CircuitState.CLOSED
|
|
44
|
+
self.failures = 0
|
|
45
|
+
self.last_failure_time = 0.0
|
|
46
|
+
self.next_attempt_time = 0.0
|
|
47
|
+
|
|
48
|
+
# Thread lock for atomic state transitions
|
|
49
|
+
self._lock = threading.Lock()
|
|
50
|
+
|
|
51
|
+
def allow_request(self) -> bool:
|
|
52
|
+
"""Check if a request is allowed (thread-safe)."""
|
|
53
|
+
with self._lock:
|
|
54
|
+
if self.state == CircuitState.CLOSED:
|
|
55
|
+
return True
|
|
56
|
+
|
|
57
|
+
if self.state == CircuitState.OPEN:
|
|
58
|
+
if time.time() >= self.next_attempt_time:
|
|
59
|
+
logger.info(f"Circuit {self.name} entering HALF_OPEN state")
|
|
60
|
+
self.state = CircuitState.HALF_OPEN
|
|
61
|
+
return True
|
|
62
|
+
return False
|
|
63
|
+
|
|
64
|
+
if self.state == CircuitState.HALF_OPEN:
|
|
65
|
+
# Only one request allowed in HALF_OPEN - lock ensures this
|
|
66
|
+
return True
|
|
67
|
+
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
def record_success(self):
|
|
71
|
+
"""Record a successful execution (thread-safe)."""
|
|
72
|
+
with self._lock:
|
|
73
|
+
if self.state == CircuitState.HALF_OPEN:
|
|
74
|
+
logger.info(f"Circuit {self.name} recovered (CLOSED)")
|
|
75
|
+
self.state = CircuitState.CLOSED
|
|
76
|
+
self.failures = 0
|
|
77
|
+
elif self.state == CircuitState.CLOSED:
|
|
78
|
+
self.failures = 0
|
|
79
|
+
|
|
80
|
+
def record_failure(self):
|
|
81
|
+
"""Record a failed execution (thread-safe)."""
|
|
82
|
+
with self._lock:
|
|
83
|
+
self.failures += 1
|
|
84
|
+
self.last_failure_time = time.time()
|
|
85
|
+
|
|
86
|
+
if self.state == CircuitState.CLOSED:
|
|
87
|
+
if self.failures >= self.failure_threshold:
|
|
88
|
+
logger.warning(f"Circuit {self.name} opened due to {self.failures} failures")
|
|
89
|
+
self.state = CircuitState.OPEN
|
|
90
|
+
self.next_attempt_time = time.time() + self.recovery_timeout
|
|
91
|
+
|
|
92
|
+
elif self.state == CircuitState.HALF_OPEN:
|
|
93
|
+
logger.warning(f"Circuit {self.name} failed recovery, reopening")
|
|
94
|
+
self.state = CircuitState.OPEN
|
|
95
|
+
self.next_attempt_time = time.time() + self.recovery_timeout
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# Global registry of circuit breakers
|
|
99
|
+
_breakers: dict[str, CircuitBreaker] = {}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_circuit_breaker(name: str, **kwargs) -> CircuitBreaker:
|
|
103
|
+
"""Get or create a circuit breaker for the given name."""
|
|
104
|
+
if name not in _breakers:
|
|
105
|
+
_breakers[name] = CircuitBreaker(name, **kwargs)
|
|
106
|
+
return _breakers[name]
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def circuit_breaker(
|
|
110
|
+
tool_name: str, failure_threshold: int = 5, recovery_timeout: int = 60
|
|
111
|
+
) -> Callable[[F], F]:
|
|
112
|
+
"""
|
|
113
|
+
Decorator to apply circuit breaker pattern to a function.
|
|
114
|
+
|
|
115
|
+
Automatically detects if the decorated function is async or sync
|
|
116
|
+
and applies the appropriate wrapper.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
tool_name: Name of the tool for circuit breaker tracking
|
|
120
|
+
failure_threshold: Number of failures before opening circuit
|
|
121
|
+
recovery_timeout: Seconds to wait before attempting recovery
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Decorated function with circuit breaker protection
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
def decorator(func: F) -> F:
|
|
128
|
+
breaker = get_circuit_breaker(
|
|
129
|
+
tool_name,
|
|
130
|
+
failure_threshold=failure_threshold,
|
|
131
|
+
recovery_timeout=recovery_timeout,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
def _get_error_message() -> str:
|
|
135
|
+
"""Generate error message for circuit open state."""
|
|
136
|
+
remaining = int(breaker.next_attempt_time - time.time())
|
|
137
|
+
return (
|
|
138
|
+
f"Tool '{tool_name}' is temporarily unavailable due to repeated failures. "
|
|
139
|
+
f"Please try again in {max(0, remaining)} seconds."
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Check if function is async
|
|
143
|
+
if inspect.iscoroutinefunction(func):
|
|
144
|
+
|
|
145
|
+
@functools.wraps(func)
|
|
146
|
+
async def async_wrapper(*args, **kwargs):
|
|
147
|
+
if not breaker.allow_request():
|
|
148
|
+
raise ToolExecutionError(_get_error_message())
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
result = await func(*args, **kwargs)
|
|
152
|
+
breaker.record_success()
|
|
153
|
+
return result
|
|
154
|
+
except Exception:
|
|
155
|
+
breaker.record_failure()
|
|
156
|
+
raise
|
|
157
|
+
|
|
158
|
+
return async_wrapper # type: ignore[return-value]
|
|
159
|
+
|
|
160
|
+
# Sync version
|
|
161
|
+
@functools.wraps(func)
|
|
162
|
+
def sync_wrapper(*args, **kwargs):
|
|
163
|
+
if not breaker.allow_request():
|
|
164
|
+
raise ToolExecutionError(_get_error_message())
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
result = func(*args, **kwargs)
|
|
168
|
+
breaker.record_success()
|
|
169
|
+
return result
|
|
170
|
+
except Exception:
|
|
171
|
+
breaker.record_failure()
|
|
172
|
+
raise
|
|
173
|
+
|
|
174
|
+
return sync_wrapper # type: ignore[return-value]
|
|
175
|
+
|
|
176
|
+
return decorator
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def circuit_breaker_sync(
|
|
180
|
+
tool_name: str, failure_threshold: int = 5, recovery_timeout: int = 60
|
|
181
|
+
) -> Callable[[F], F]:
|
|
182
|
+
"""
|
|
183
|
+
Explicit sync-only circuit breaker decorator.
|
|
184
|
+
|
|
185
|
+
Use this when you want to explicitly mark a function as sync,
|
|
186
|
+
or when the auto-detection in circuit_breaker doesn't work correctly.
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
def decorator(func: F) -> F:
|
|
190
|
+
breaker = get_circuit_breaker(
|
|
191
|
+
tool_name,
|
|
192
|
+
failure_threshold=failure_threshold,
|
|
193
|
+
recovery_timeout=recovery_timeout,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
@functools.wraps(func)
|
|
197
|
+
def wrapper(*args, **kwargs):
|
|
198
|
+
if not breaker.allow_request():
|
|
199
|
+
raise ToolExecutionError(
|
|
200
|
+
f"Tool '{tool_name}' is temporarily unavailable due to repeated failures. "
|
|
201
|
+
f"Please try again in {int(breaker.next_attempt_time - time.time())} seconds."
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
result = func(*args, **kwargs)
|
|
206
|
+
breaker.record_success()
|
|
207
|
+
return result
|
|
208
|
+
except Exception:
|
|
209
|
+
breaker.record_failure()
|
|
210
|
+
raise
|
|
211
|
+
|
|
212
|
+
return wrapper # type: ignore[return-value]
|
|
213
|
+
|
|
214
|
+
return decorator
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def circuit_breaker_async(
|
|
218
|
+
tool_name: str, failure_threshold: int = 5, recovery_timeout: int = 60
|
|
219
|
+
) -> Callable[[F], F]:
|
|
220
|
+
"""
|
|
221
|
+
Explicit async-only circuit breaker decorator.
|
|
222
|
+
|
|
223
|
+
Use this when you want to explicitly mark a function as async,
|
|
224
|
+
or when the auto-detection in circuit_breaker doesn't work correctly.
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
def decorator(func: F) -> F:
|
|
228
|
+
breaker = get_circuit_breaker(
|
|
229
|
+
tool_name,
|
|
230
|
+
failure_threshold=failure_threshold,
|
|
231
|
+
recovery_timeout=recovery_timeout,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
@functools.wraps(func)
|
|
235
|
+
async def wrapper(*args, **kwargs):
|
|
236
|
+
if not breaker.allow_request():
|
|
237
|
+
raise ToolExecutionError(
|
|
238
|
+
f"Tool '{tool_name}' is temporarily unavailable due to repeated failures. "
|
|
239
|
+
f"Please try again in {int(breaker.next_attempt_time - time.time())} seconds."
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
result = await func(*args, **kwargs)
|
|
244
|
+
breaker.record_success()
|
|
245
|
+
return result
|
|
246
|
+
except Exception:
|
|
247
|
+
breaker.record_failure()
|
|
248
|
+
raise
|
|
249
|
+
|
|
250
|
+
return wrapper # type: ignore[return-value]
|
|
251
|
+
|
|
252
|
+
return decorator
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Resource Manager for Reversecore_MCP.
|
|
3
|
+
|
|
4
|
+
This module handles periodic cleanup of temporary files and stale cache entries
|
|
5
|
+
to prevent resource exhaustion over time.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import time
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from contextlib import suppress
|
|
13
|
+
|
|
14
|
+
from reversecore_mcp.core import config
|
|
15
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ResourceManager:
|
|
21
|
+
"""
|
|
22
|
+
Manages background cleanup tasks.
|
|
23
|
+
|
|
24
|
+
Responsibilities:
|
|
25
|
+
1. Clean up stale temporary files (disk)
|
|
26
|
+
2. Reap zombie processes (kernel)
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, cleanup_interval: int = 3600, pid_check_interval: int = 60): # Default: 1h files, 60s PIDs
|
|
30
|
+
self.cleanup_interval = cleanup_interval
|
|
31
|
+
self.pid_check_interval = pid_check_interval
|
|
32
|
+
self._task: asyncio.Task | None = None
|
|
33
|
+
self._pid_task: asyncio.Task | None = None
|
|
34
|
+
self._running = False
|
|
35
|
+
self._tracked_pids: set[int] = set()
|
|
36
|
+
|
|
37
|
+
def track_pid(self, pid: int) -> None:
|
|
38
|
+
"""Track a subprocess PID for zombie cleanup."""
|
|
39
|
+
self._tracked_pids.add(pid)
|
|
40
|
+
|
|
41
|
+
async def start(self):
|
|
42
|
+
"""Start the background cleanup tasks."""
|
|
43
|
+
if self._running:
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
self._running = True
|
|
47
|
+
self._task = asyncio.create_task(self._cleanup_loop())
|
|
48
|
+
self._pid_task = asyncio.create_task(self._pid_check_loop())
|
|
49
|
+
logger.info(
|
|
50
|
+
f"Resource Manager started (cleanup: {self.cleanup_interval}s, pid_check: {self.pid_check_interval}s)"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
async def stop(self):
|
|
54
|
+
"""Stop all background cleanup tasks."""
|
|
55
|
+
self._running = False
|
|
56
|
+
for task in [self._task, self._pid_task]:
|
|
57
|
+
if task:
|
|
58
|
+
task.cancel()
|
|
59
|
+
with suppress(asyncio.CancelledError):
|
|
60
|
+
await task
|
|
61
|
+
logger.info("Resource Manager stopped")
|
|
62
|
+
|
|
63
|
+
async def _cleanup_loop(self):
|
|
64
|
+
"""Main resource cleanup loop (files, logs)."""
|
|
65
|
+
while self._running:
|
|
66
|
+
try:
|
|
67
|
+
await asyncio.sleep(self.cleanup_interval)
|
|
68
|
+
await self.cleanup()
|
|
69
|
+
except asyncio.CancelledError:
|
|
70
|
+
break
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.error(f"Error in cleanup loop: {e}")
|
|
73
|
+
|
|
74
|
+
async def _pid_check_loop(self):
|
|
75
|
+
"""FAST PID health check loop (zombies)."""
|
|
76
|
+
while self._running:
|
|
77
|
+
try:
|
|
78
|
+
await asyncio.sleep(self.pid_check_interval)
|
|
79
|
+
self._reap_zombies()
|
|
80
|
+
except asyncio.CancelledError:
|
|
81
|
+
break
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.error(f"Error in PID check loop: {e}")
|
|
84
|
+
|
|
85
|
+
def _reap_zombies(self):
|
|
86
|
+
"""Reap tracked zombie processes."""
|
|
87
|
+
if not self._tracked_pids:
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
reaped_count = 0
|
|
91
|
+
dead_pids = set()
|
|
92
|
+
|
|
93
|
+
for pid in list(self._tracked_pids):
|
|
94
|
+
try:
|
|
95
|
+
# Check if process is still alive and defunct
|
|
96
|
+
# waitpid with WNOHANG returns (pid, status) if dead, (0, 0) if running
|
|
97
|
+
wpid, status = os.waitpid(pid, os.WNOHANG)
|
|
98
|
+
if wpid > 0:
|
|
99
|
+
# Process was a zombie and is now reaped
|
|
100
|
+
dead_pids.add(pid)
|
|
101
|
+
reaped_count += 1
|
|
102
|
+
except ChildProcessError:
|
|
103
|
+
# Process already gone or not our child
|
|
104
|
+
dead_pids.add(pid)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.debug(f"Failed to check PID {pid}: {e}")
|
|
107
|
+
|
|
108
|
+
if dead_pids:
|
|
109
|
+
self._tracked_pids -= dead_pids
|
|
110
|
+
if reaped_count > 0:
|
|
111
|
+
logger.info(f"Reaped {reaped_count} zombie processes")
|
|
112
|
+
|
|
113
|
+
async def cleanup(self):
|
|
114
|
+
"""Perform cleanup operations."""
|
|
115
|
+
logger.info("Starting periodic resource cleanup...")
|
|
116
|
+
|
|
117
|
+
# 1. Clear stale binary cache
|
|
118
|
+
# (BinaryMetadataCache already checks mtime, but we can clear old entries if needed)
|
|
119
|
+
# For now, we just log.
|
|
120
|
+
# binary_cache.clear() # Too aggressive?
|
|
121
|
+
|
|
122
|
+
# 2. Clean up temporary files
|
|
123
|
+
cfg = config.get_config()
|
|
124
|
+
workspace = cfg.workspace
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
# Clean .tmp files older than 24 hours
|
|
128
|
+
now = time.time()
|
|
129
|
+
max_age = 24 * 3600
|
|
130
|
+
|
|
131
|
+
cleaned_count = 0
|
|
132
|
+
|
|
133
|
+
# OPTIMIZATION: Use itertools.chain to avoid multiple glob calls and iterations
|
|
134
|
+
# This combines all temp file patterns into a single iterable for better performance
|
|
135
|
+
from itertools import chain
|
|
136
|
+
|
|
137
|
+
# Combine all patterns into a single iterable
|
|
138
|
+
temp_files = chain(
|
|
139
|
+
workspace.glob("*.tmp"), workspace.glob(".r2_*"), workspace.glob("*.r2")
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# PERFORMANCE NOTE: For very large numbers of temp files (>1000),
|
|
143
|
+
# consider using batch deletion with os.unlink_many() or parallel deletion
|
|
144
|
+
# However, this is a rare case and the current implementation is sufficient
|
|
145
|
+
for temp_file in temp_files:
|
|
146
|
+
try:
|
|
147
|
+
if temp_file.is_file():
|
|
148
|
+
mtime = temp_file.stat().st_mtime
|
|
149
|
+
if now - mtime > max_age:
|
|
150
|
+
temp_file.unlink()
|
|
151
|
+
cleaned_count += 1
|
|
152
|
+
except Exception as e:
|
|
153
|
+
logger.warning(f"Failed to delete temp file {temp_file}: {e}")
|
|
154
|
+
|
|
155
|
+
if cleaned_count > 0:
|
|
156
|
+
logger.info(f"Cleaned up {cleaned_count} stale temporary files")
|
|
157
|
+
|
|
158
|
+
except Exception as e:
|
|
159
|
+
logger.error(f"Failed to clean temp files: {e}")
|
|
160
|
+
|
|
161
|
+
# 3. Check r2_pool health?
|
|
162
|
+
# r2_pool manages itself via LRU.
|
|
163
|
+
|
|
164
|
+
logger.info("Resource cleanup complete")
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
# Global instance (for backward compatibility)
|
|
168
|
+
# New code should use: from reversecore_mcp.core.container import get_resource_manager
|
|
169
|
+
resource_manager = ResourceManager()
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"""Pydantic models for structured tool results."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Literal, Union
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
from typing import NotRequired, TypedDict
|
|
11
|
+
except ImportError:
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# TypedDict definitions for common tool result structures
|
|
16
|
+
class FunctionInfo(TypedDict):
|
|
17
|
+
"""Information about a function in a binary."""
|
|
18
|
+
|
|
19
|
+
name: str
|
|
20
|
+
address: str
|
|
21
|
+
size: NotRequired[int]
|
|
22
|
+
signature: NotRequired[str]
|
|
23
|
+
callees: NotRequired[list[str]]
|
|
24
|
+
callers: NotRequired[list[str]]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class DisassemblyResult(TypedDict):
|
|
28
|
+
"""Result of disassembly operation."""
|
|
29
|
+
|
|
30
|
+
address: str
|
|
31
|
+
mnemonic: str
|
|
32
|
+
operands: str
|
|
33
|
+
bytes: NotRequired[str]
|
|
34
|
+
comment: NotRequired[str]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class DecompilationResult(TypedDict):
|
|
38
|
+
"""Result of decompilation operation."""
|
|
39
|
+
|
|
40
|
+
function_name: str
|
|
41
|
+
source_code: str
|
|
42
|
+
decompiler: NotRequired[str]
|
|
43
|
+
address: NotRequired[str]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class BinaryMetadata(TypedDict):
|
|
47
|
+
"""Metadata about a binary file."""
|
|
48
|
+
|
|
49
|
+
file_path: str
|
|
50
|
+
file_size: int
|
|
51
|
+
file_type: str
|
|
52
|
+
architecture: NotRequired[str]
|
|
53
|
+
endianness: NotRequired[str]
|
|
54
|
+
entry_point: NotRequired[str]
|
|
55
|
+
sections: NotRequired[list[dict[str, Any]]]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class YaraRuleResult(TypedDict):
|
|
59
|
+
"""Result of YARA rule generation."""
|
|
60
|
+
|
|
61
|
+
rule_name: str
|
|
62
|
+
rule_content: str
|
|
63
|
+
patterns_count: NotRequired[int]
|
|
64
|
+
meta: NotRequired[dict[str, str]]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ScanResult(TypedDict):
|
|
68
|
+
"""Result of a security scan."""
|
|
69
|
+
|
|
70
|
+
findings: list[dict[str, Any]]
|
|
71
|
+
severity: NotRequired[str]
|
|
72
|
+
recommendations: NotRequired[list[str]]
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class EmulationResult(TypedDict):
|
|
76
|
+
"""Result of code emulation."""
|
|
77
|
+
|
|
78
|
+
final_registers: dict[str, Any]
|
|
79
|
+
steps_executed: int
|
|
80
|
+
status: str
|
|
81
|
+
memory_writes: NotRequired[list[dict[str, Any]]]
|
|
82
|
+
syscalls: NotRequired[list[str]]
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class ErrorDetails(TypedDict, total=False):
|
|
86
|
+
"""Details for error responses."""
|
|
87
|
+
|
|
88
|
+
max_size: int
|
|
89
|
+
actual_size: int
|
|
90
|
+
exception_type: str
|
|
91
|
+
timeout_seconds: int
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class ToolSuccess(BaseModel):
|
|
95
|
+
"""Represents a successful tool invocation."""
|
|
96
|
+
|
|
97
|
+
status: Literal["success"] = "success"
|
|
98
|
+
data: str | dict[str, Any]
|
|
99
|
+
metadata: dict[str, Any] | None = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ToolError(BaseModel):
|
|
103
|
+
"""Represents a failed tool invocation."""
|
|
104
|
+
|
|
105
|
+
status: Literal["error"] = "error"
|
|
106
|
+
error_code: str
|
|
107
|
+
message: str
|
|
108
|
+
hint: str | None = None
|
|
109
|
+
details: dict[str, Any] | None = None
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
ToolResult = Union[ToolSuccess, ToolError]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def success(data: str | dict[str, Any], **metadata: Any) -> ToolSuccess:
|
|
116
|
+
"""Create a ToolSuccess instance with optional metadata."""
|
|
117
|
+
return ToolSuccess(data=data, metadata=metadata or None)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def failure(
|
|
121
|
+
error_code: str,
|
|
122
|
+
message: str,
|
|
123
|
+
hint: str | None = None,
|
|
124
|
+
**details: Any,
|
|
125
|
+
) -> ToolError:
|
|
126
|
+
"""Create a ToolError instance with optional hint/details."""
|
|
127
|
+
return ToolError(
|
|
128
|
+
error_code=error_code,
|
|
129
|
+
message=message,
|
|
130
|
+
hint=hint,
|
|
131
|
+
details=details or None,
|
|
132
|
+
)
|