iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
- reversecore_mcp/__init__.py +9 -0
- reversecore_mcp/core/__init__.py +78 -0
- reversecore_mcp/core/audit.py +101 -0
- reversecore_mcp/core/binary_cache.py +138 -0
- reversecore_mcp/core/command_spec.py +357 -0
- reversecore_mcp/core/config.py +432 -0
- reversecore_mcp/core/container.py +288 -0
- reversecore_mcp/core/decorators.py +152 -0
- reversecore_mcp/core/error_formatting.py +93 -0
- reversecore_mcp/core/error_handling.py +142 -0
- reversecore_mcp/core/evidence.py +229 -0
- reversecore_mcp/core/exceptions.py +296 -0
- reversecore_mcp/core/execution.py +240 -0
- reversecore_mcp/core/ghidra.py +642 -0
- reversecore_mcp/core/ghidra_helper.py +481 -0
- reversecore_mcp/core/ghidra_manager.py +234 -0
- reversecore_mcp/core/json_utils.py +131 -0
- reversecore_mcp/core/loader.py +73 -0
- reversecore_mcp/core/logging_config.py +206 -0
- reversecore_mcp/core/memory.py +721 -0
- reversecore_mcp/core/metrics.py +198 -0
- reversecore_mcp/core/mitre_mapper.py +365 -0
- reversecore_mcp/core/plugin.py +45 -0
- reversecore_mcp/core/r2_helpers.py +404 -0
- reversecore_mcp/core/r2_pool.py +403 -0
- reversecore_mcp/core/report_generator.py +268 -0
- reversecore_mcp/core/resilience.py +252 -0
- reversecore_mcp/core/resource_manager.py +169 -0
- reversecore_mcp/core/result.py +132 -0
- reversecore_mcp/core/security.py +213 -0
- reversecore_mcp/core/validators.py +238 -0
- reversecore_mcp/dashboard/__init__.py +221 -0
- reversecore_mcp/prompts/__init__.py +56 -0
- reversecore_mcp/prompts/common.py +24 -0
- reversecore_mcp/prompts/game.py +280 -0
- reversecore_mcp/prompts/malware.py +1219 -0
- reversecore_mcp/prompts/report.py +150 -0
- reversecore_mcp/prompts/security.py +136 -0
- reversecore_mcp/resources.py +329 -0
- reversecore_mcp/server.py +727 -0
- reversecore_mcp/tools/__init__.py +49 -0
- reversecore_mcp/tools/analysis/__init__.py +74 -0
- reversecore_mcp/tools/analysis/capa_tools.py +215 -0
- reversecore_mcp/tools/analysis/die_tools.py +180 -0
- reversecore_mcp/tools/analysis/diff_tools.py +643 -0
- reversecore_mcp/tools/analysis/lief_tools.py +272 -0
- reversecore_mcp/tools/analysis/signature_tools.py +591 -0
- reversecore_mcp/tools/analysis/static_analysis.py +479 -0
- reversecore_mcp/tools/common/__init__.py +58 -0
- reversecore_mcp/tools/common/file_operations.py +352 -0
- reversecore_mcp/tools/common/memory_tools.py +516 -0
- reversecore_mcp/tools/common/patch_explainer.py +230 -0
- reversecore_mcp/tools/common/server_tools.py +115 -0
- reversecore_mcp/tools/ghidra/__init__.py +19 -0
- reversecore_mcp/tools/ghidra/decompilation.py +975 -0
- reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
- reversecore_mcp/tools/malware/__init__.py +61 -0
- reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
- reversecore_mcp/tools/malware/dormant_detector.py +756 -0
- reversecore_mcp/tools/malware/ioc_tools.py +228 -0
- reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
- reversecore_mcp/tools/malware/yara_tools.py +214 -0
- reversecore_mcp/tools/patch_explainer.py +19 -0
- reversecore_mcp/tools/radare2/__init__.py +13 -0
- reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
- reversecore_mcp/tools/radare2/r2_session.py +376 -0
- reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
- reversecore_mcp/tools/report/__init__.py +4 -0
- reversecore_mcp/tools/report/email.py +82 -0
- reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
- reversecore_mcp/tools/report/report_tools.py +1076 -0
- reversecore_mcp/tools/report/session.py +194 -0
- reversecore_mcp/tools/report_tools.py +11 -0
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dependency Injection Container for Reversecore_MCP
|
|
3
|
+
|
|
4
|
+
This module provides a lightweight dependency injection container that:
|
|
5
|
+
- Centralizes service registration and lifecycle management
|
|
6
|
+
- Enables easy testing through service replacement
|
|
7
|
+
- Supports singleton and factory patterns
|
|
8
|
+
- Allows async initialization of services
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
from reversecore_mcp.core.container import container, ServiceContainer
|
|
12
|
+
|
|
13
|
+
# Register services
|
|
14
|
+
container.register_singleton('r2_pool', R2ConnectionPool)
|
|
15
|
+
container.register_factory('config', get_config)
|
|
16
|
+
|
|
17
|
+
# Get services
|
|
18
|
+
pool = container.get('r2_pool')
|
|
19
|
+
|
|
20
|
+
# Override for testing
|
|
21
|
+
container.override('r2_pool', mock_pool)
|
|
22
|
+
|
|
23
|
+
# Reset overrides
|
|
24
|
+
container.reset_overrides()
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
import asyncio
|
|
28
|
+
import threading
|
|
29
|
+
from collections.abc import Callable
|
|
30
|
+
from typing import Any, TypeVar
|
|
31
|
+
|
|
32
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
33
|
+
|
|
34
|
+
logger = get_logger(__name__)
|
|
35
|
+
|
|
36
|
+
T = TypeVar("T")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ServiceContainer:
|
|
40
|
+
"""
|
|
41
|
+
A lightweight dependency injection container.
|
|
42
|
+
|
|
43
|
+
Features:
|
|
44
|
+
- Singleton management (create once, reuse)
|
|
45
|
+
- Factory support (create new instance each time)
|
|
46
|
+
- Service overrides for testing
|
|
47
|
+
- Async initialization support
|
|
48
|
+
- Thread-safe operations
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self) -> None:
|
|
52
|
+
self._singletons: dict[str, Any] = {}
|
|
53
|
+
self._singleton_factories: dict[str, Callable[[], Any]] = {}
|
|
54
|
+
self._factories: dict[str, Callable[[], Any]] = {}
|
|
55
|
+
self._overrides: dict[str, Any] = {}
|
|
56
|
+
self._lock = threading.RLock()
|
|
57
|
+
self._initialized = False
|
|
58
|
+
|
|
59
|
+
def register_singleton(
|
|
60
|
+
self,
|
|
61
|
+
name: str,
|
|
62
|
+
factory: Callable[[], T] | type[T],
|
|
63
|
+
instance: T | None = None,
|
|
64
|
+
) -> None:
|
|
65
|
+
"""
|
|
66
|
+
Register a singleton service.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
name: Service name for lookup
|
|
70
|
+
factory: Callable that creates the service instance
|
|
71
|
+
instance: Optional pre-created instance (skips factory)
|
|
72
|
+
"""
|
|
73
|
+
with self._lock:
|
|
74
|
+
if instance is not None:
|
|
75
|
+
self._singletons[name] = instance
|
|
76
|
+
else:
|
|
77
|
+
self._singleton_factories[name] = factory
|
|
78
|
+
|
|
79
|
+
def register_factory(self, name: str, factory: Callable[[], T]) -> None:
|
|
80
|
+
"""
|
|
81
|
+
Register a factory service (new instance each call).
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
name: Service name for lookup
|
|
85
|
+
factory: Callable that creates a new service instance
|
|
86
|
+
"""
|
|
87
|
+
with self._lock:
|
|
88
|
+
self._factories[name] = factory
|
|
89
|
+
|
|
90
|
+
def get(self, name: str) -> Any:
|
|
91
|
+
"""
|
|
92
|
+
Get a service by name.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
name: Service name
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Service instance
|
|
99
|
+
|
|
100
|
+
Raises:
|
|
101
|
+
KeyError: If service not registered
|
|
102
|
+
"""
|
|
103
|
+
with self._lock:
|
|
104
|
+
# Check overrides first (for testing)
|
|
105
|
+
if name in self._overrides:
|
|
106
|
+
return self._overrides[name]
|
|
107
|
+
|
|
108
|
+
# Check existing singletons
|
|
109
|
+
if name in self._singletons:
|
|
110
|
+
return self._singletons[name]
|
|
111
|
+
|
|
112
|
+
# Check singleton factories (lazy initialization)
|
|
113
|
+
if name in self._singleton_factories:
|
|
114
|
+
instance = self._singleton_factories[name]()
|
|
115
|
+
self._singletons[name] = instance
|
|
116
|
+
|
|
117
|
+
# If container is already initialized, start the service immediately
|
|
118
|
+
if self._initialized and hasattr(instance, "start") and asyncio.iscoroutinefunction(instance.start):
|
|
119
|
+
# We can't await here as get is sync, but we can schedule it
|
|
120
|
+
# Warning: This creates a potential race condition for immediate use
|
|
121
|
+
# ideally initialize_async should have caught this.
|
|
122
|
+
# For safety, we log this event.
|
|
123
|
+
logger.warning(f"Service '{name}' instantiated after initialization. Scheduling start.")
|
|
124
|
+
asyncio.create_task(self._safe_start(name, instance))
|
|
125
|
+
|
|
126
|
+
return instance
|
|
127
|
+
|
|
128
|
+
# Check factories
|
|
129
|
+
if name in self._factories:
|
|
130
|
+
return self._factories[name]()
|
|
131
|
+
|
|
132
|
+
raise KeyError(f"Service '{name}' not registered")
|
|
133
|
+
|
|
134
|
+
def override(self, name: str, instance: Any) -> None:
|
|
135
|
+
"""
|
|
136
|
+
Override a service for testing.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
name: Service name to override
|
|
140
|
+
instance: Mock or test instance to use
|
|
141
|
+
"""
|
|
142
|
+
with self._lock:
|
|
143
|
+
self._overrides[name] = instance
|
|
144
|
+
logger.debug(f"Service '{name}' overridden for testing")
|
|
145
|
+
|
|
146
|
+
def reset_overrides(self) -> None:
|
|
147
|
+
"""Remove all test overrides."""
|
|
148
|
+
with self._lock:
|
|
149
|
+
self._overrides.clear()
|
|
150
|
+
logger.debug("All service overrides cleared")
|
|
151
|
+
|
|
152
|
+
def reset_singleton(self, name: str) -> None:
|
|
153
|
+
"""
|
|
154
|
+
Reset a singleton (force re-creation on next get).
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
name: Service name to reset
|
|
158
|
+
"""
|
|
159
|
+
with self._lock:
|
|
160
|
+
if name in self._singletons:
|
|
161
|
+
del self._singletons[name]
|
|
162
|
+
logger.debug(f"Singleton '{name}' reset")
|
|
163
|
+
|
|
164
|
+
def reset_all(self) -> None:
|
|
165
|
+
"""Reset all singletons and overrides."""
|
|
166
|
+
with self._lock:
|
|
167
|
+
self._singletons.clear()
|
|
168
|
+
self._overrides.clear()
|
|
169
|
+
logger.debug("All services reset")
|
|
170
|
+
|
|
171
|
+
def has(self, name: str) -> bool:
|
|
172
|
+
"""Check if a service is registered."""
|
|
173
|
+
with self._lock:
|
|
174
|
+
return (
|
|
175
|
+
name in self._overrides
|
|
176
|
+
or name in self._singletons
|
|
177
|
+
or name in self._singleton_factories
|
|
178
|
+
or name in self._factories
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
async def _safe_start(self, name: str, instance: Any) -> None:
|
|
182
|
+
"""Helper to start a service safely in background."""
|
|
183
|
+
try:
|
|
184
|
+
await instance.start()
|
|
185
|
+
logger.info(f"Async service '{name}' started (lazy)")
|
|
186
|
+
except Exception as e:
|
|
187
|
+
logger.error(f"Failed to start '{name}': {e}")
|
|
188
|
+
|
|
189
|
+
async def initialize_async(self) -> None:
|
|
190
|
+
"""
|
|
191
|
+
Initialize all async-capable singletons.
|
|
192
|
+
|
|
193
|
+
Call this during application startup.
|
|
194
|
+
"""
|
|
195
|
+
if self._initialized:
|
|
196
|
+
return
|
|
197
|
+
|
|
198
|
+
with self._lock:
|
|
199
|
+
self._initialized = True
|
|
200
|
+
|
|
201
|
+
# Eagerly instantiate all singleton factories to ensure they are started
|
|
202
|
+
# This prevents race conditions where a service is accessed later but missed the start phase
|
|
203
|
+
for name in list(self._singleton_factories.keys()):
|
|
204
|
+
self.get(name)
|
|
205
|
+
|
|
206
|
+
# Initialize singletons that have async start methods
|
|
207
|
+
for name, instance in self._singletons.items():
|
|
208
|
+
if hasattr(instance, "start") and asyncio.iscoroutinefunction(instance.start):
|
|
209
|
+
try:
|
|
210
|
+
await instance.start()
|
|
211
|
+
logger.info(f"Async service '{name}' started")
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.error(f"Failed to start '{name}': {e}")
|
|
214
|
+
|
|
215
|
+
async def shutdown_async(self) -> None:
|
|
216
|
+
"""
|
|
217
|
+
Shutdown all async-capable singletons.
|
|
218
|
+
|
|
219
|
+
Call this during application shutdown.
|
|
220
|
+
"""
|
|
221
|
+
with self._lock:
|
|
222
|
+
self._initialized = False
|
|
223
|
+
|
|
224
|
+
# Stop singletons that have async stop methods
|
|
225
|
+
for name, instance in self._singletons.items():
|
|
226
|
+
if hasattr(instance, "stop") and asyncio.iscoroutinefunction(instance.stop):
|
|
227
|
+
try:
|
|
228
|
+
await instance.stop()
|
|
229
|
+
logger.info(f"Async service '{name}' stopped")
|
|
230
|
+
except Exception as e:
|
|
231
|
+
logger.error(f"Failed to stop '{name}': {e}")
|
|
232
|
+
|
|
233
|
+
# Also try close_all for pools
|
|
234
|
+
if hasattr(instance, "close_all"):
|
|
235
|
+
try:
|
|
236
|
+
instance.close_all()
|
|
237
|
+
logger.info(f"Service '{name}' closed")
|
|
238
|
+
except Exception as e:
|
|
239
|
+
logger.error(f"Failed to close '{name}': {e}")
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
# Global container instance
|
|
243
|
+
container = ServiceContainer()
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _initialize_default_services() -> None:
|
|
247
|
+
"""Register default services in the container."""
|
|
248
|
+
from reversecore_mcp.core.config import get_config
|
|
249
|
+
from reversecore_mcp.core.ghidra import GhidraService
|
|
250
|
+
from reversecore_mcp.core.r2_pool import R2ConnectionPool
|
|
251
|
+
from reversecore_mcp.core.resource_manager import ResourceManager
|
|
252
|
+
|
|
253
|
+
# Register config as factory (always fresh)
|
|
254
|
+
container.register_factory("config", get_config)
|
|
255
|
+
|
|
256
|
+
# Register R2 pool as singleton
|
|
257
|
+
container.register_singleton("r2_pool", R2ConnectionPool)
|
|
258
|
+
|
|
259
|
+
# Register resource manager as singleton
|
|
260
|
+
container.register_singleton("resource_manager", ResourceManager)
|
|
261
|
+
|
|
262
|
+
# Register Ghidra service as singleton
|
|
263
|
+
container.register_singleton("ghidra", GhidraService)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
# Initialize default services on module load
|
|
267
|
+
_initialize_default_services()
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
# Convenience functions for common services
|
|
271
|
+
def get_r2_pool():
|
|
272
|
+
"""Get the R2 connection pool instance."""
|
|
273
|
+
return container.get("r2_pool")
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def get_resource_manager():
|
|
277
|
+
"""Get the resource manager instance."""
|
|
278
|
+
return container.get("resource_manager")
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def get_ghidra_service():
|
|
282
|
+
"""Get the Ghidra service instance."""
|
|
283
|
+
return container.get("ghidra")
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def get_config_from_container():
|
|
287
|
+
"""Get configuration from container."""
|
|
288
|
+
return container.get("config")
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Decorators for common tool execution patterns.
|
|
3
|
+
|
|
4
|
+
This module provides decorators to reduce code duplication in tool functions
|
|
5
|
+
by centralizing logging, error handling, and execution time measurement.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import functools
|
|
9
|
+
import os
|
|
10
|
+
import time
|
|
11
|
+
from collections.abc import Callable
|
|
12
|
+
from typing import Any, TypeVar
|
|
13
|
+
|
|
14
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
15
|
+
from reversecore_mcp.core.result import ToolResult
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def log_execution(tool_name: str | None = None) -> Callable[[F], F]:
|
|
23
|
+
"""
|
|
24
|
+
Decorator to add logging and error handling to tool functions.
|
|
25
|
+
|
|
26
|
+
This decorator:
|
|
27
|
+
- Logs function start and completion
|
|
28
|
+
- Measures execution time
|
|
29
|
+
- Handles common exceptions and formats errors
|
|
30
|
+
- Extracts file_name from function arguments if present
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
tool_name: Name of the tool (defaults to function name)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Decorated function
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def decorator(func: F) -> F:
|
|
40
|
+
# Use provided tool_name or function name
|
|
41
|
+
actual_tool_name = tool_name or func.__name__
|
|
42
|
+
|
|
43
|
+
# Check if function is async
|
|
44
|
+
import inspect
|
|
45
|
+
|
|
46
|
+
is_async = inspect.iscoroutinefunction(func)
|
|
47
|
+
|
|
48
|
+
if is_async:
|
|
49
|
+
|
|
50
|
+
@functools.wraps(func)
|
|
51
|
+
async def async_wrapper(*args: Any, **kwargs: Any) -> ToolResult:
|
|
52
|
+
start_time = time.time()
|
|
53
|
+
file_name = None
|
|
54
|
+
|
|
55
|
+
# OPTIMIZATION: Extract filename without creating Path object
|
|
56
|
+
# Using os.path.basename() for cross-platform path handling
|
|
57
|
+
for arg_name in ["file_path", "path", "file"]:
|
|
58
|
+
if arg_name in kwargs:
|
|
59
|
+
path_str = kwargs[arg_name]
|
|
60
|
+
if path_str: # Handle empty strings
|
|
61
|
+
file_name = os.path.basename(path_str)
|
|
62
|
+
break
|
|
63
|
+
if not file_name and args:
|
|
64
|
+
first_arg = args[0]
|
|
65
|
+
if isinstance(first_arg, str) and first_arg:
|
|
66
|
+
file_name = os.path.basename(first_arg)
|
|
67
|
+
|
|
68
|
+
# Log start
|
|
69
|
+
log_extra = {"tool_name": actual_tool_name}
|
|
70
|
+
if file_name:
|
|
71
|
+
log_extra["file_name"] = file_name
|
|
72
|
+
logger.info(f"Starting {actual_tool_name}", extra=log_extra)
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
result = await func(*args, **kwargs)
|
|
76
|
+
execution_time = int((time.time() - start_time) * 1000)
|
|
77
|
+
|
|
78
|
+
# Add execution time to metadata
|
|
79
|
+
if hasattr(result, "metadata"):
|
|
80
|
+
if result.metadata is None:
|
|
81
|
+
result.metadata = {}
|
|
82
|
+
result.metadata["execution_time_ms"] = execution_time
|
|
83
|
+
|
|
84
|
+
log_extra["execution_time_ms"] = execution_time
|
|
85
|
+
logger.info(f"{actual_tool_name} completed successfully", extra=log_extra)
|
|
86
|
+
return result
|
|
87
|
+
except Exception as exc:
|
|
88
|
+
execution_time = int((time.time() - start_time) * 1000)
|
|
89
|
+
log_extra["execution_time_ms"] = execution_time
|
|
90
|
+
logger.error(
|
|
91
|
+
f"{actual_tool_name} failed",
|
|
92
|
+
extra=log_extra,
|
|
93
|
+
exc_info=True,
|
|
94
|
+
)
|
|
95
|
+
# Critical: Re-raise exception so @handle_tool_errors can catch it
|
|
96
|
+
# returning failure() here would hide the error from outer decorators
|
|
97
|
+
raise
|
|
98
|
+
|
|
99
|
+
return async_wrapper # type: ignore
|
|
100
|
+
|
|
101
|
+
@functools.wraps(func)
|
|
102
|
+
def wrapper(*args: Any, **kwargs: Any) -> ToolResult:
|
|
103
|
+
start_time = time.time()
|
|
104
|
+
file_name = None
|
|
105
|
+
|
|
106
|
+
# OPTIMIZATION: Extract filename without creating Path object
|
|
107
|
+
# Using os.path.basename() for cross-platform path handling
|
|
108
|
+
for arg_name in ["file_path", "path", "file"]:
|
|
109
|
+
if arg_name in kwargs:
|
|
110
|
+
path_str = kwargs[arg_name]
|
|
111
|
+
if path_str: # Handle empty strings
|
|
112
|
+
file_name = os.path.basename(path_str)
|
|
113
|
+
break
|
|
114
|
+
if not file_name and args:
|
|
115
|
+
# Check first positional argument
|
|
116
|
+
first_arg = args[0]
|
|
117
|
+
if isinstance(first_arg, str) and first_arg:
|
|
118
|
+
file_name = os.path.basename(first_arg)
|
|
119
|
+
|
|
120
|
+
# Log start
|
|
121
|
+
log_extra = {"tool_name": actual_tool_name}
|
|
122
|
+
if file_name:
|
|
123
|
+
log_extra["file_name"] = file_name
|
|
124
|
+
logger.info(f"Starting {actual_tool_name}", extra=log_extra)
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
result = func(*args, **kwargs)
|
|
128
|
+
execution_time = int((time.time() - start_time) * 1000)
|
|
129
|
+
|
|
130
|
+
# Add execution time to metadata
|
|
131
|
+
if hasattr(result, "metadata"):
|
|
132
|
+
if result.metadata is None:
|
|
133
|
+
result.metadata = {}
|
|
134
|
+
result.metadata["execution_time_ms"] = execution_time
|
|
135
|
+
|
|
136
|
+
log_extra["execution_time_ms"] = execution_time
|
|
137
|
+
logger.info(f"{actual_tool_name} completed successfully", extra=log_extra)
|
|
138
|
+
return result
|
|
139
|
+
except Exception as exc:
|
|
140
|
+
execution_time = int((time.time() - start_time) * 1000)
|
|
141
|
+
log_extra["execution_time_ms"] = execution_time
|
|
142
|
+
logger.error(
|
|
143
|
+
f"{actual_tool_name} failed",
|
|
144
|
+
extra=log_extra,
|
|
145
|
+
exc_info=True,
|
|
146
|
+
)
|
|
147
|
+
# Critical: Re-raise exception so @handle_tool_errors can catch it
|
|
148
|
+
raise
|
|
149
|
+
|
|
150
|
+
return wrapper # type: ignore
|
|
151
|
+
|
|
152
|
+
return decorator
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Error formatting utilities for structured error responses.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from reversecore_mcp.core.config import get_config
|
|
8
|
+
from reversecore_mcp.core.exceptions import ReversecoreError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def format_error(
|
|
12
|
+
error: Exception, tool_name: str | None = None, hint: str | None = None
|
|
13
|
+
) -> str | dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Format an error as string or structured JSON based on settings.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
error: The exception to format
|
|
19
|
+
tool_name: Name of the tool that raised the error
|
|
20
|
+
hint: Optional hint message for resolving the error
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Error message as string (default) or structured dict (if structured_errors enabled)
|
|
24
|
+
"""
|
|
25
|
+
# Check if structured errors are enabled
|
|
26
|
+
structured = get_config().structured_errors
|
|
27
|
+
|
|
28
|
+
if isinstance(error, ReversecoreError):
|
|
29
|
+
error_code = error.error_code
|
|
30
|
+
error_type = error.error_type
|
|
31
|
+
message = error.message
|
|
32
|
+
details = {}
|
|
33
|
+
|
|
34
|
+
# Add exception-specific details
|
|
35
|
+
if hasattr(error, "tool_name"):
|
|
36
|
+
details["tool_name"] = error.tool_name
|
|
37
|
+
if hasattr(error, "timeout_seconds"):
|
|
38
|
+
details["timeout_seconds"] = error.timeout_seconds
|
|
39
|
+
if hasattr(error, "max_size"):
|
|
40
|
+
details["max_size"] = error.max_size
|
|
41
|
+
if hasattr(error, "actual_size"):
|
|
42
|
+
details["actual_size"] = error.actual_size
|
|
43
|
+
if hasattr(error, "details"):
|
|
44
|
+
details.update(error.details)
|
|
45
|
+
else:
|
|
46
|
+
# Generic error
|
|
47
|
+
error_code = "RCMCP-E000"
|
|
48
|
+
error_type = "SYSTEM_ERROR"
|
|
49
|
+
message = str(error)
|
|
50
|
+
details = {"exception_type": type(error).__name__}
|
|
51
|
+
|
|
52
|
+
if tool_name:
|
|
53
|
+
details["tool_name"] = tool_name
|
|
54
|
+
|
|
55
|
+
if structured:
|
|
56
|
+
# Return structured JSON format
|
|
57
|
+
result: dict[str, Any] = {
|
|
58
|
+
"error_code": error_code,
|
|
59
|
+
"error_type": error_type,
|
|
60
|
+
"message": message,
|
|
61
|
+
"details": details,
|
|
62
|
+
}
|
|
63
|
+
if hint:
|
|
64
|
+
result["hint"] = hint
|
|
65
|
+
return result
|
|
66
|
+
else:
|
|
67
|
+
# Return simple string format (backward compatible)
|
|
68
|
+
error_str = f"Error: {message}"
|
|
69
|
+
if hint:
|
|
70
|
+
error_str += f" Hint: {hint}"
|
|
71
|
+
return error_str
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_validation_hint(error: ValueError) -> str:
|
|
75
|
+
"""
|
|
76
|
+
Generate a helpful hint message for validation errors.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
error: The ValueError exception
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Hint message string
|
|
83
|
+
"""
|
|
84
|
+
error_msg = str(error).lower()
|
|
85
|
+
|
|
86
|
+
if "outside" in error_msg or "workspace" in error_msg:
|
|
87
|
+
return "Ensure the file is in the allowed workspace directory. Set REVERSECORE_WORKSPACE environment variable to change the workspace path."
|
|
88
|
+
elif "does not point to a file" in error_msg:
|
|
89
|
+
return "The specified path points to a directory, not a file. Please provide a file path."
|
|
90
|
+
elif "invalid file path" in error_msg:
|
|
91
|
+
return "The file path is invalid or the file does not exist. Please check the path and try again."
|
|
92
|
+
else:
|
|
93
|
+
return "Please check the input and try again."
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Shared error handling utilities for tool wrappers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from functools import wraps
|
|
7
|
+
from typing import TypeVar
|
|
8
|
+
|
|
9
|
+
from reversecore_mcp.core.exceptions import (
|
|
10
|
+
ExecutionTimeoutError,
|
|
11
|
+
OutputLimitExceededError,
|
|
12
|
+
ToolNotFoundError,
|
|
13
|
+
ValidationError,
|
|
14
|
+
)
|
|
15
|
+
from reversecore_mcp.core.logging_config import get_logger
|
|
16
|
+
from reversecore_mcp.core.result import ToolResult, failure
|
|
17
|
+
|
|
18
|
+
logger = get_logger(__name__)
|
|
19
|
+
|
|
20
|
+
F = TypeVar("F", bound=Callable[..., ToolResult])
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _handle_exception(exc: Exception, tool_name: str) -> ToolResult:
|
|
24
|
+
"""Convert common exceptions into ToolResult failures.
|
|
25
|
+
|
|
26
|
+
This is the centralized exception handler that eliminates code duplication
|
|
27
|
+
between sync and async wrappers.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
exc: The exception to handle
|
|
31
|
+
tool_name: Name of the tool that raised the exception
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
ToolResult with appropriate error code and message
|
|
35
|
+
"""
|
|
36
|
+
if isinstance(exc, ToolNotFoundError):
|
|
37
|
+
hint = f"Install with: apt-get install {exc.tool_name}"
|
|
38
|
+
return failure("TOOL_NOT_FOUND", str(exc), hint=hint)
|
|
39
|
+
|
|
40
|
+
if isinstance(exc, ExecutionTimeoutError):
|
|
41
|
+
return failure(
|
|
42
|
+
"TIMEOUT",
|
|
43
|
+
f"Command timed out after {exc.timeout_seconds} seconds",
|
|
44
|
+
timeout_seconds=exc.timeout_seconds,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if isinstance(exc, OutputLimitExceededError):
|
|
48
|
+
return failure(
|
|
49
|
+
"OUTPUT_LIMIT",
|
|
50
|
+
str(exc),
|
|
51
|
+
hint="Reduce output size or increase the limit",
|
|
52
|
+
details={
|
|
53
|
+
"max_size": exc.max_size,
|
|
54
|
+
"actual_size": exc.actual_size,
|
|
55
|
+
},
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
if isinstance(exc, ValidationError):
|
|
59
|
+
return failure(
|
|
60
|
+
"VALIDATION_ERROR",
|
|
61
|
+
str(exc),
|
|
62
|
+
hint="Ensure the file is in the workspace directory",
|
|
63
|
+
details=exc.details,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Generic exception handler
|
|
67
|
+
logger.exception("Unexpected error in tool '%s'", tool_name)
|
|
68
|
+
return failure(
|
|
69
|
+
"INTERNAL_ERROR",
|
|
70
|
+
f"{tool_name} failed: {exc}",
|
|
71
|
+
exception_type=exc.__class__.__name__,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def handle_tool_errors(func=None, *, max_retries: int = 0, backoff: float = 0.5) -> F:
|
|
76
|
+
"""
|
|
77
|
+
Wrap a tool function to handle errors and optionally retry on failure.
|
|
78
|
+
|
|
79
|
+
Supports usage as both:
|
|
80
|
+
@handle_tool_errors
|
|
81
|
+
def my_tool(): ...
|
|
82
|
+
|
|
83
|
+
and:
|
|
84
|
+
@handle_tool_errors(max_retries=3)
|
|
85
|
+
def my_tool(): ...
|
|
86
|
+
"""
|
|
87
|
+
import asyncio
|
|
88
|
+
import inspect
|
|
89
|
+
import time
|
|
90
|
+
|
|
91
|
+
def decorator(f: F) -> F:
|
|
92
|
+
is_async = inspect.iscoroutinefunction(f)
|
|
93
|
+
tool_name = f.__name__
|
|
94
|
+
|
|
95
|
+
if is_async:
|
|
96
|
+
@wraps(f)
|
|
97
|
+
async def async_wrapper(*args, **kwargs) -> ToolResult:
|
|
98
|
+
last_exception = None
|
|
99
|
+
for attempt in range(max_retries + 1):
|
|
100
|
+
try:
|
|
101
|
+
return await f(*args, **kwargs)
|
|
102
|
+
except Exception as exc:
|
|
103
|
+
last_exception = exc
|
|
104
|
+
if attempt < max_retries:
|
|
105
|
+
wait_time = backoff * (2 ** attempt)
|
|
106
|
+
logger.warning(
|
|
107
|
+
f"Tool '{tool_name}' failed (attempt {attempt+1}/{max_retries+1}). "
|
|
108
|
+
f"Retrying in {wait_time:.1f}s. Error: {exc}"
|
|
109
|
+
)
|
|
110
|
+
await asyncio.sleep(wait_time)
|
|
111
|
+
else:
|
|
112
|
+
# Final attempt failed
|
|
113
|
+
msg = f"Failed after {max_retries+1} attempts" if max_retries > 0 else None
|
|
114
|
+
return _handle_exception(exc, tool_name)
|
|
115
|
+
# Should not reach here
|
|
116
|
+
return _handle_exception(last_exception, tool_name)
|
|
117
|
+
return async_wrapper # type: ignore
|
|
118
|
+
|
|
119
|
+
else:
|
|
120
|
+
@wraps(f)
|
|
121
|
+
def sync_wrapper(*args, **kwargs) -> ToolResult:
|
|
122
|
+
last_exception = None
|
|
123
|
+
for attempt in range(max_retries + 1):
|
|
124
|
+
try:
|
|
125
|
+
return f(*args, **kwargs)
|
|
126
|
+
except Exception as exc:
|
|
127
|
+
last_exception = exc
|
|
128
|
+
if attempt < max_retries:
|
|
129
|
+
wait_time = backoff * (2 ** attempt)
|
|
130
|
+
logger.warning(
|
|
131
|
+
f"Tool '{tool_name}' failed (attempt {attempt+1}/{max_retries+1}). "
|
|
132
|
+
f"Retrying in {wait_time:.1f}s. Error: {exc}"
|
|
133
|
+
)
|
|
134
|
+
time.sleep(wait_time)
|
|
135
|
+
else:
|
|
136
|
+
return _handle_exception(exc, tool_name)
|
|
137
|
+
return _handle_exception(last_exception, tool_name)
|
|
138
|
+
return sync_wrapper # type: ignore
|
|
139
|
+
|
|
140
|
+
if func is None:
|
|
141
|
+
return decorator
|
|
142
|
+
return decorator(func)
|