iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
  2. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
  3. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
  4. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
  5. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
  6. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
  7. reversecore_mcp/__init__.py +9 -0
  8. reversecore_mcp/core/__init__.py +78 -0
  9. reversecore_mcp/core/audit.py +101 -0
  10. reversecore_mcp/core/binary_cache.py +138 -0
  11. reversecore_mcp/core/command_spec.py +357 -0
  12. reversecore_mcp/core/config.py +432 -0
  13. reversecore_mcp/core/container.py +288 -0
  14. reversecore_mcp/core/decorators.py +152 -0
  15. reversecore_mcp/core/error_formatting.py +93 -0
  16. reversecore_mcp/core/error_handling.py +142 -0
  17. reversecore_mcp/core/evidence.py +229 -0
  18. reversecore_mcp/core/exceptions.py +296 -0
  19. reversecore_mcp/core/execution.py +240 -0
  20. reversecore_mcp/core/ghidra.py +642 -0
  21. reversecore_mcp/core/ghidra_helper.py +481 -0
  22. reversecore_mcp/core/ghidra_manager.py +234 -0
  23. reversecore_mcp/core/json_utils.py +131 -0
  24. reversecore_mcp/core/loader.py +73 -0
  25. reversecore_mcp/core/logging_config.py +206 -0
  26. reversecore_mcp/core/memory.py +721 -0
  27. reversecore_mcp/core/metrics.py +198 -0
  28. reversecore_mcp/core/mitre_mapper.py +365 -0
  29. reversecore_mcp/core/plugin.py +45 -0
  30. reversecore_mcp/core/r2_helpers.py +404 -0
  31. reversecore_mcp/core/r2_pool.py +403 -0
  32. reversecore_mcp/core/report_generator.py +268 -0
  33. reversecore_mcp/core/resilience.py +252 -0
  34. reversecore_mcp/core/resource_manager.py +169 -0
  35. reversecore_mcp/core/result.py +132 -0
  36. reversecore_mcp/core/security.py +213 -0
  37. reversecore_mcp/core/validators.py +238 -0
  38. reversecore_mcp/dashboard/__init__.py +221 -0
  39. reversecore_mcp/prompts/__init__.py +56 -0
  40. reversecore_mcp/prompts/common.py +24 -0
  41. reversecore_mcp/prompts/game.py +280 -0
  42. reversecore_mcp/prompts/malware.py +1219 -0
  43. reversecore_mcp/prompts/report.py +150 -0
  44. reversecore_mcp/prompts/security.py +136 -0
  45. reversecore_mcp/resources.py +329 -0
  46. reversecore_mcp/server.py +727 -0
  47. reversecore_mcp/tools/__init__.py +49 -0
  48. reversecore_mcp/tools/analysis/__init__.py +74 -0
  49. reversecore_mcp/tools/analysis/capa_tools.py +215 -0
  50. reversecore_mcp/tools/analysis/die_tools.py +180 -0
  51. reversecore_mcp/tools/analysis/diff_tools.py +643 -0
  52. reversecore_mcp/tools/analysis/lief_tools.py +272 -0
  53. reversecore_mcp/tools/analysis/signature_tools.py +591 -0
  54. reversecore_mcp/tools/analysis/static_analysis.py +479 -0
  55. reversecore_mcp/tools/common/__init__.py +58 -0
  56. reversecore_mcp/tools/common/file_operations.py +352 -0
  57. reversecore_mcp/tools/common/memory_tools.py +516 -0
  58. reversecore_mcp/tools/common/patch_explainer.py +230 -0
  59. reversecore_mcp/tools/common/server_tools.py +115 -0
  60. reversecore_mcp/tools/ghidra/__init__.py +19 -0
  61. reversecore_mcp/tools/ghidra/decompilation.py +975 -0
  62. reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
  63. reversecore_mcp/tools/malware/__init__.py +61 -0
  64. reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
  65. reversecore_mcp/tools/malware/dormant_detector.py +756 -0
  66. reversecore_mcp/tools/malware/ioc_tools.py +228 -0
  67. reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
  68. reversecore_mcp/tools/malware/yara_tools.py +214 -0
  69. reversecore_mcp/tools/patch_explainer.py +19 -0
  70. reversecore_mcp/tools/radare2/__init__.py +13 -0
  71. reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
  72. reversecore_mcp/tools/radare2/r2_session.py +376 -0
  73. reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
  74. reversecore_mcp/tools/report/__init__.py +4 -0
  75. reversecore_mcp/tools/report/email.py +82 -0
  76. reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
  77. reversecore_mcp/tools/report/report_tools.py +1076 -0
  78. reversecore_mcp/tools/report/session.py +194 -0
  79. reversecore_mcp/tools/report_tools.py +11 -0
@@ -0,0 +1,234 @@
1
+ """
2
+ Ghidra Manager
3
+
4
+ This module manages the Ghidra JVM lifecycle and project reuse.
5
+ It ensures the JVM is started only once and projects are cached for performance.
6
+ """
7
+
8
+ import asyncio
9
+ import threading
10
+ from typing import Any
11
+
12
+ from reversecore_mcp.core.config import get_config
13
+ from reversecore_mcp.core.logging_config import get_logger
14
+
15
+
16
+ # Custom Exceptions for Structured Error Handling
17
+ class GhidraError(Exception):
18
+ """Base exception for Ghidra operations."""
19
+
20
+ pass
21
+
22
+
23
+ class DecompilationError(GhidraError):
24
+ """Raised when decompilation fails."""
25
+
26
+ pass
27
+
28
+
29
+ logger = get_logger(__name__)
30
+
31
+
32
+ class GhidraManager:
33
+ """
34
+ Manages Ghidra JVM and project lifecycle.
35
+
36
+ Features:
37
+ - Singleton JVM instance
38
+ - Project caching
39
+ - Thread-safe execution
40
+ """
41
+
42
+ _instance = None
43
+ _lock = threading.RLock()
44
+
45
+ def __new__(cls):
46
+ if cls._instance is None:
47
+ with cls._lock:
48
+ if cls._instance is None:
49
+ cls._instance = super().__new__(cls)
50
+ cls._instance._initialized = False
51
+ return cls._instance
52
+
53
+ def __init__(self):
54
+ if self._initialized:
55
+ return
56
+
57
+ self._jvm_started = False
58
+ self._projects: dict[
59
+ str, Any
60
+ ] = {} # Cache for loaded programs (path -> (program, flat_api))
61
+ self._project_lock = threading.RLock()
62
+ # Use config value instead of hardcoded 1
63
+ self._max_projects = get_config().ghidra_max_projects
64
+ self._initialized = True
65
+ self._pyghidra = None
66
+ self._flat_program_api = None
67
+
68
+ def _ensure_jvm_started(self):
69
+ """Start the JVM if not already started."""
70
+ if self._jvm_started:
71
+ return
72
+
73
+ with self._lock:
74
+ if self._jvm_started:
75
+ return
76
+
77
+ try:
78
+ import pyghidra
79
+ from pyghidra.core import FlatProgramAPI
80
+
81
+ logger.info("Starting Ghidra JVM...")
82
+ # pyghidra.start() is often called automatically on import or first use
83
+ # but explicit start ensures control
84
+ try:
85
+ pyghidra.start()
86
+ except Exception as e:
87
+ # It might be already running or failed
88
+ logger.debug(f"pyghidra.start() result: {e}")
89
+
90
+ self._pyghidra = pyghidra
91
+ self._flat_program_api = FlatProgramAPI
92
+ self._jvm_started = True
93
+ logger.info("Ghidra JVM started successfully")
94
+
95
+ except ImportError:
96
+ logger.error("pyghidra not installed")
97
+ raise ImportError("pyghidra not installed")
98
+ except Exception as e:
99
+ logger.error(f"Failed to start Ghidra JVM: {e}")
100
+ raise
101
+
102
+ def _close_project(self, file_path: str, ctx: Any) -> None:
103
+ """Properly close a Ghidra project context manager."""
104
+ try:
105
+ ctx.__exit__(None, None, None)
106
+ logger.info(f"Closed Ghidra project: {file_path}")
107
+ except Exception as e:
108
+ logger.warning(f"Error closing Ghidra project {file_path}: {e}")
109
+
110
+ def _get_project(self, file_path: str):
111
+ """Get or load a project for the given file."""
112
+ with self._project_lock:
113
+ if file_path in self._projects:
114
+ # Move to end (LRU)
115
+ val = self._projects.pop(file_path)
116
+ self._projects[file_path] = val
117
+ return val
118
+
119
+ # Evict if needed - properly close the context manager
120
+ if len(self._projects) >= self._max_projects:
121
+ oldest_path, (oldest_prog, oldest_api, oldest_ctx) = (
122
+ self._projects.popitem()
123
+ ) # pop first (oldest)
124
+ logger.info(f"Evicting Ghidra project: {oldest_path}")
125
+ self._close_project(oldest_path, oldest_ctx)
126
+
127
+ logger.info(f"Loading Ghidra project: {file_path}")
128
+ # We use open_program but we need to keep it alive.
129
+ # pyghidra.open_program returns a context manager.
130
+ # We enter it manually.
131
+ ctx = self._pyghidra.open_program(file_path)
132
+ flat_api = ctx.__enter__()
133
+ program = flat_api.getCurrentProgram()
134
+
135
+ # Store context so we can exit it on eviction
136
+ self._projects[file_path] = (program, flat_api, ctx)
137
+ return program, flat_api, ctx
138
+
139
+ def close_all(self) -> None:
140
+ """Close all cached projects. Call on shutdown to prevent resource leaks."""
141
+ with self._project_lock:
142
+ for path, (_prog, _api, ctx) in list(self._projects.items()):
143
+ self._close_project(path, ctx)
144
+ self._projects.clear()
145
+ logger.info("All Ghidra projects closed")
146
+
147
+ def decompile(self, file_path: str, function_address: str | None = None) -> str:
148
+ """
149
+ Decompile a function or the entire file.
150
+
151
+ Args:
152
+ file_path: Path to the binary
153
+ function_address: Address of function to decompile (optional)
154
+
155
+ Returns:
156
+ Decompiled C code
157
+ """
158
+ self._ensure_jvm_started()
159
+
160
+ self._ensure_jvm_started()
161
+
162
+ # [PERFORMANCE BOTTLENECK]
163
+ # Ghidra/JPype requires single-threaded access to the JVM bridge via this lock.
164
+ # This serializes all decompilation requests.
165
+ # DO NOT REMOVE this lock without implementing a multi-process worker pool architecture.
166
+ with self._lock:
167
+ try:
168
+ # Get cached project
169
+ program, flat_api, _ = self._get_project(file_path)
170
+
171
+ from ghidra.app.decompiler import DecompInterface
172
+ from ghidra.util.task import ConsoleTaskMonitor
173
+
174
+ decompiler = DecompInterface()
175
+ decompiler.openProgram(program)
176
+
177
+ monitor = ConsoleTaskMonitor()
178
+
179
+ if function_address:
180
+ # Parse address
181
+ addr = flat_api.toAddr(function_address)
182
+ if not addr:
183
+ # Try adding base address if needed, or assume hex
184
+ try:
185
+ if function_address.startswith("0x"):
186
+ addr = flat_api.toAddr(int(function_address, 16))
187
+ else:
188
+ # Try to find symbol
189
+ funcs = flat_api.getGlobalFunctions(function_address)
190
+ if funcs:
191
+ addr = funcs[0].getEntryPoint()
192
+ except Exception: # Catch all exceptions when parsing address
193
+ pass
194
+
195
+ if not addr:
196
+ raise ValueError(f"Invalid address or symbol: {function_address}")
197
+
198
+ func = flat_api.getFunctionAt(addr)
199
+ if not func:
200
+ # Try to find nearest function
201
+ func = flat_api.getFunctionBefore(addr)
202
+ if not func:
203
+ raise ValueError(f"No function found at or near {function_address}")
204
+
205
+ res = decompiler.decompileFunction(func, 60, monitor)
206
+ if not res.decompileCompleted():
207
+ raise DecompilationError(f"Decompilation failed: {res.getErrorMessage()}")
208
+
209
+ return res.getDecompiledFunction().getC()
210
+ else:
211
+ raise NotImplementedError(
212
+ "Full file decompilation not supported. Please specify a function."
213
+ )
214
+
215
+ except Exception as e:
216
+ logger.error(f"Ghidra decompilation failed: {e}")
217
+ # Invalidate cache on error
218
+ with self._project_lock:
219
+ if file_path in self._projects:
220
+ del self._projects[file_path]
221
+ raise
222
+
223
+ async def decompile_async(self, file_path: str, function_address: str | None = None) -> str:
224
+ """Execute decompilation asynchronously."""
225
+ return await asyncio.to_thread(self.decompile, file_path, function_address)
226
+
227
+
228
+ # Global instance
229
+ ghidra_manager = GhidraManager()
230
+
231
+
232
+ def get_ghidra_manager() -> GhidraManager:
233
+ """Get the global GhidraManager instance."""
234
+ return ghidra_manager
@@ -0,0 +1,131 @@
1
+ """
2
+ High-performance JSON utilities with orjson fallback.
3
+
4
+ This module provides a drop-in replacement for the standard json module
5
+ with automatic fallback. It uses orjson when available for 3-5x faster
6
+ JSON parsing and serialization, falling back to the standard json module
7
+ if orjson is not installed.
8
+
9
+ Performance comparison:
10
+ - orjson.loads(): ~3-5x faster than json.loads()
11
+ - orjson.dumps(): ~3-5x faster than json.dumps()
12
+ - Particularly impactful for large JSON objects and hot paths
13
+ """
14
+
15
+ import json as _stdlib_json
16
+ from typing import Any
17
+
18
+ try:
19
+ import orjson
20
+
21
+ _ORJSON_AVAILABLE = True
22
+ # Expose stdlib JSONDecodeError for consistent error handling
23
+ JSONDecodeError = _stdlib_json.JSONDecodeError
24
+
25
+ def loads(s: str | bytes) -> Any:
26
+ """
27
+ Parse JSON with orjson (fast path).
28
+ Wraps orjson.JSONDecodeError ensuring compatibility with stdlib json.
29
+ """
30
+ if isinstance(s, str):
31
+ s = s.encode("utf-8")
32
+ try:
33
+ return orjson.loads(s)
34
+ except orjson.JSONDecodeError as e:
35
+ # Re-raise as stdlib JSONDecodeError for compatibility
36
+ # orjson error message usually contains position info at the end
37
+ raise _stdlib_json.JSONDecodeError(str(e), str(s), 0) from e
38
+
39
+ def dumps(
40
+ obj: Any,
41
+ indent: int | None = None,
42
+ ensure_ascii: bool = True,
43
+ default: Any = None,
44
+ ) -> str:
45
+ """
46
+ Serialize object to JSON with orjson (fast path).
47
+
48
+ Note: orjson only supports 2-space indentation when indent is provided.
49
+ Any non-None indent value will result in 2-space pretty-printing.
50
+ This differs slightly from stdlib json which respects the exact indent value.
51
+
52
+ Note: orjson always outputs UTF-8 (never escapes non-ASCII).
53
+ The ensure_ascii parameter is accepted for API compatibility but ignored.
54
+
55
+ Args:
56
+ obj: Python object to serialize
57
+ indent: If provided (any non-None value), pretty-print with 2-space indentation
58
+ ensure_ascii: Ignored (orjson always uses UTF-8). Kept for API compatibility.
59
+ default: Callable to serialize non-serializable objects (passed to stdlib as fallback)
60
+
61
+ Returns:
62
+ JSON string
63
+ """
64
+ try:
65
+ if indent is not None:
66
+ # orjson only supports 2-space indentation via OPT_INDENT_2
67
+ # For compatibility, any indent value triggers pretty-printing
68
+ result = orjson.dumps(obj, option=orjson.OPT_INDENT_2)
69
+ else:
70
+ result = orjson.dumps(obj)
71
+ # orjson returns bytes, convert to str for compatibility
72
+ return result.decode("utf-8")
73
+ except TypeError:
74
+ # orjson can't serialize some types, fall back to stdlib with default
75
+ return _stdlib_json.dumps(
76
+ obj, indent=indent, ensure_ascii=ensure_ascii, default=default
77
+ )
78
+
79
+ except ImportError:
80
+ # Fallback to standard library json
81
+ _ORJSON_AVAILABLE = False
82
+ # Use stdlib JSONDecodeError for compatibility
83
+ JSONDecodeError = _stdlib_json.JSONDecodeError
84
+
85
+ def loads(s: str | bytes) -> Any:
86
+ """
87
+ Parse JSON with standard library (fallback).
88
+
89
+ Args:
90
+ s: JSON string or bytes to parse
91
+
92
+ Returns:
93
+ Parsed Python object
94
+ """
95
+ if isinstance(s, bytes):
96
+ s = s.decode("utf-8")
97
+ return _stdlib_json.loads(s)
98
+
99
+ def dumps(
100
+ obj: Any,
101
+ indent: int | None = None,
102
+ ensure_ascii: bool = True,
103
+ default: Any = None,
104
+ ) -> str:
105
+ """
106
+ Serialize object to JSON with standard library (fallback).
107
+
108
+ Args:
109
+ obj: Python object to serialize
110
+ indent: If provided, pretty-print with indentation
111
+ ensure_ascii: If True, escape non-ASCII characters
112
+ default: Callable to serialize non-serializable objects
113
+
114
+ Returns:
115
+ JSON string
116
+ """
117
+ return _stdlib_json.dumps(obj, indent=indent, ensure_ascii=ensure_ascii, default=default)
118
+
119
+
120
+ def is_orjson_available() -> bool:
121
+ """
122
+ Check if orjson is available.
123
+
124
+ Returns:
125
+ True if orjson is installed and being used, False if using fallback
126
+ """
127
+ return _ORJSON_AVAILABLE
128
+
129
+
130
+ # For compatibility, expose the same interface as json module
131
+ __all__ = ["loads", "dumps", "is_orjson_available", "JSONDecodeError"]
@@ -0,0 +1,73 @@
1
+ """
2
+ Plugin loader for dynamically discovering and loading plugins.
3
+ """
4
+
5
+ import importlib
6
+ import inspect
7
+ import pkgutil
8
+
9
+ from reversecore_mcp.core.logging_config import get_logger
10
+ from reversecore_mcp.core.plugin import Plugin
11
+
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ class PluginLoader:
16
+ """Responsible for discovering and loading plugins."""
17
+
18
+ def __init__(self):
19
+ self._plugins: dict[str, Plugin] = {}
20
+
21
+ def discover_plugins(
22
+ self, package_path: str, package_name: str = "reversecore_mcp.tools"
23
+ ) -> list[Plugin]:
24
+ """
25
+ Discover and load plugins from a package directory (including subdirectories).
26
+
27
+ Args:
28
+ package_path: Absolute path to the package directory
29
+ package_name: Python package name prefix
30
+
31
+ Returns:
32
+ List of instantiated Plugin objects
33
+ """
34
+ logger.info(f"Discovering plugins in {package_path}")
35
+
36
+ discovered_plugins = []
37
+
38
+ # Use walk_packages to recursively iterate over all modules including subdirectories
39
+ for importer, name, is_pkg in pkgutil.walk_packages(
40
+ [package_path], prefix=f"{package_name}."
41
+ ):
42
+ # Skip __init__ modules and __pycache__ directories
43
+ if name.endswith(".__init__") or "__pycache__" in name:
44
+ continue
45
+
46
+ try:
47
+ module = importlib.import_module(name)
48
+
49
+ # Find Plugin subclasses in the module
50
+ for item_name, item in inspect.getmembers(module):
51
+ if inspect.isclass(item) and issubclass(item, Plugin) and item is not Plugin:
52
+ try:
53
+ # Instantiate the plugin
54
+ plugin_instance = item()
55
+ self._plugins[plugin_instance.name] = plugin_instance
56
+ discovered_plugins.append(plugin_instance)
57
+ logger.info(f"Loaded plugin: {plugin_instance.name}")
58
+ except Exception as e:
59
+ logger.error(f"Failed to instantiate plugin {item_name}: {e}")
60
+
61
+ except ImportError as e:
62
+ logger.warning(f"Failed to import module {name}: {e}")
63
+ continue
64
+
65
+ return discovered_plugins
66
+
67
+ def get_plugin(self, name: str) -> Plugin | None:
68
+ """Get a loaded plugin by name."""
69
+ return self._plugins.get(name)
70
+
71
+ def get_all_plugins(self) -> list[Plugin]:
72
+ """Get all loaded plugins."""
73
+ return list(self._plugins.values())
@@ -0,0 +1,206 @@
1
+ """
2
+ Logging configuration for Reversecore_MCP.
3
+
4
+ This module provides structured logging with JSON output option and log rotation.
5
+ Uses high-performance JSON serialization when available.
6
+
7
+ Environment Variables:
8
+ LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
9
+ LOG_FORMAT: Log format - "human" or "json"
10
+ LOG_FILE: Path to log file
11
+ """
12
+
13
+ import logging
14
+ import os
15
+ import platform
16
+ import sys
17
+ import time
18
+ from logging.handlers import RotatingFileHandler
19
+ from typing import Any
20
+
21
+ from reversecore_mcp.core import json_utils as json
22
+ from reversecore_mcp.core.config import get_config
23
+
24
+
25
+ class JSONFormatter(logging.Formatter):
26
+ """JSON formatter for structured logging with rich context."""
27
+
28
+ def __init__(
29
+ self,
30
+ datefmt: str | None = None,
31
+ include_extra: bool = True,
32
+ include_hostname: bool = True,
33
+ ):
34
+ super().__init__(datefmt=datefmt)
35
+ self.include_extra = include_extra
36
+ self.include_hostname = include_hostname
37
+ self._hostname = platform.node() if include_hostname else None
38
+
39
+ def format(self, record: logging.LogRecord) -> str:
40
+ """Format log record as JSON with structured fields."""
41
+ log_data: dict[str, Any] = {
42
+ "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(record.created))
43
+ + f".{int(record.msecs):03d}Z",
44
+ "level": record.levelname,
45
+ "logger": record.name,
46
+ "message": record.getMessage(),
47
+ "module": record.module,
48
+ "function": record.funcName,
49
+ "line": record.lineno,
50
+ }
51
+
52
+ # Add hostname for distributed systems
53
+ if self._hostname:
54
+ log_data["hostname"] = self._hostname
55
+
56
+ # Add process/thread info for debugging concurrency issues
57
+ log_data["process"] = {"id": record.process, "name": record.processName}
58
+ log_data["thread"] = {"id": record.thread, "name": record.threadName}
59
+
60
+ # Add extra fields if present (tool execution context)
61
+ if self.include_extra:
62
+ extra_fields = {}
63
+ for key in ("tool_name", "file_name", "execution_time_ms", "error_code", "binary_path"):
64
+ if hasattr(record, key):
65
+ extra_fields[key] = getattr(record, key)
66
+
67
+ # Include any custom extra fields
68
+ for key, value in record.__dict__.items():
69
+ if key.startswith("ctx_"): # Convention: ctx_ prefix for context fields
70
+ extra_fields[key[4:]] = value # Strip ctx_ prefix
71
+
72
+ if extra_fields:
73
+ log_data["context"] = extra_fields
74
+
75
+ # Add exception info if present
76
+ if record.exc_info:
77
+ log_data["exception"] = {
78
+ "type": record.exc_info[0].__name__ if record.exc_info[0] else None,
79
+ "message": str(record.exc_info[1]) if record.exc_info[1] else None,
80
+ "traceback": self.formatException(record.exc_info),
81
+ }
82
+
83
+ # Safe serialization with default=str to prevent crashes on non-serializable objects
84
+ return json.dumps(log_data, default=str)
85
+
86
+
87
+ class ContextAdapter(logging.LoggerAdapter):
88
+ """Logger adapter that adds contextual information to log records."""
89
+
90
+ def process(self, msg: str, kwargs: dict[str, Any]) -> tuple[str, dict[str, Any]]:
91
+ """Add extra context to log message."""
92
+ extra = kwargs.get("extra", {})
93
+ extra.update(self.extra)
94
+ kwargs["extra"] = extra
95
+ return msg, kwargs
96
+
97
+
98
+ def setup_logging() -> None:
99
+ """
100
+ Configure logging for Reversecore_MCP.
101
+
102
+ Logging configuration:
103
+ - Log level from settings (default: INFO)
104
+ - Log format from settings (default: human-readable)
105
+ - Log file from settings (default: /tmp/reversecore/app.log)
106
+ - Log rotation: 100MB max size, keep 10 backup files
107
+
108
+ When LOG_FORMAT=json:
109
+ - Console output is JSON formatted
110
+ - File output is JSON formatted
111
+ - Structured fields include timestamp, level, logger, message, context
112
+
113
+ When LOG_FORMAT=human (default):
114
+ - Console output is human-readable
115
+ - File output is human-readable
116
+ """
117
+ settings = get_config()
118
+ log_level = settings.log_level.upper()
119
+ log_format = settings.log_format.lower()
120
+ log_file = settings.log_file
121
+
122
+ # Create log directory if it doesn't exist
123
+ try:
124
+ log_file.parent.mkdir(parents=True, exist_ok=True)
125
+ except (PermissionError, OSError):
126
+ pass
127
+
128
+ # Configure root logger
129
+ logger = logging.getLogger("reversecore_mcp")
130
+ logger.setLevel(getattr(logging, log_level, logging.INFO))
131
+
132
+ # Remove existing handlers
133
+ logger.handlers.clear()
134
+
135
+ # Determine formatter based on LOG_FORMAT
136
+ if log_format == "json":
137
+ console_formatter: logging.Formatter = JSONFormatter()
138
+ file_formatter: logging.Formatter = JSONFormatter()
139
+ else:
140
+ human_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
141
+ datefmt = "%Y-%m-%d %H:%M:%S"
142
+ console_formatter = logging.Formatter(human_format, datefmt=datefmt)
143
+ file_formatter = logging.Formatter(human_format, datefmt=datefmt)
144
+
145
+ # Console handler
146
+ console_handler = logging.StreamHandler(sys.stderr)
147
+ # Unlock console level to allow DEBUG logs if configured
148
+ console_handler.setLevel(getattr(logging, log_level, logging.INFO))
149
+ console_handler.setFormatter(console_formatter)
150
+ logger.addHandler(console_handler)
151
+
152
+ # File handler with rotation (only if we can write to the log file)
153
+ try:
154
+ file_handler = RotatingFileHandler(
155
+ str(log_file),
156
+ maxBytes=100 * 1024 * 1024, # 100MB
157
+ backupCount=10,
158
+ encoding="utf-8",
159
+ )
160
+ file_handler.setLevel(logging.DEBUG)
161
+ file_handler.setFormatter(file_formatter)
162
+ logger.addHandler(file_handler)
163
+ except (PermissionError, OSError) as e:
164
+ logger.warning(
165
+ f"Could not create log file handler for {log_file}: {e}. Logging to console only."
166
+ )
167
+
168
+ # Prevent propagation to root logger
169
+ logger.propagate = False
170
+
171
+
172
+ def get_logger(name: str) -> logging.Logger:
173
+ """
174
+ Get a logger instance for a module.
175
+
176
+ Args:
177
+ name: Logger name (typically __name__)
178
+
179
+ Returns:
180
+ Logger instance
181
+ """
182
+ return logging.getLogger(f"reversecore_mcp.{name}")
183
+
184
+
185
+ def get_context_logger(name: str, **context: Any) -> ContextAdapter:
186
+ """
187
+ Get a logger with persistent context fields.
188
+
189
+ This is useful for adding context that should appear in all log messages,
190
+ such as tool name, file being analyzed, etc.
191
+
192
+ Args:
193
+ name: Logger name (typically __name__)
194
+ **context: Key-value pairs to include in all log messages
195
+
196
+ Returns:
197
+ ContextAdapter with the given context
198
+
199
+ Example:
200
+ logger = get_context_logger(__name__, tool_name="neural_decompile")
201
+ logger.info("Starting analysis") # Includes tool_name in output
202
+ """
203
+ base_logger = get_logger(name)
204
+ # Prefix context keys with ctx_ to avoid conflicts
205
+ prefixed_context = {f"ctx_{k}": v for k, v in context.items()}
206
+ return ContextAdapter(base_logger, prefixed_context)