iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
  2. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
  3. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
  4. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
  5. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
  6. iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
  7. reversecore_mcp/__init__.py +9 -0
  8. reversecore_mcp/core/__init__.py +78 -0
  9. reversecore_mcp/core/audit.py +101 -0
  10. reversecore_mcp/core/binary_cache.py +138 -0
  11. reversecore_mcp/core/command_spec.py +357 -0
  12. reversecore_mcp/core/config.py +432 -0
  13. reversecore_mcp/core/container.py +288 -0
  14. reversecore_mcp/core/decorators.py +152 -0
  15. reversecore_mcp/core/error_formatting.py +93 -0
  16. reversecore_mcp/core/error_handling.py +142 -0
  17. reversecore_mcp/core/evidence.py +229 -0
  18. reversecore_mcp/core/exceptions.py +296 -0
  19. reversecore_mcp/core/execution.py +240 -0
  20. reversecore_mcp/core/ghidra.py +642 -0
  21. reversecore_mcp/core/ghidra_helper.py +481 -0
  22. reversecore_mcp/core/ghidra_manager.py +234 -0
  23. reversecore_mcp/core/json_utils.py +131 -0
  24. reversecore_mcp/core/loader.py +73 -0
  25. reversecore_mcp/core/logging_config.py +206 -0
  26. reversecore_mcp/core/memory.py +721 -0
  27. reversecore_mcp/core/metrics.py +198 -0
  28. reversecore_mcp/core/mitre_mapper.py +365 -0
  29. reversecore_mcp/core/plugin.py +45 -0
  30. reversecore_mcp/core/r2_helpers.py +404 -0
  31. reversecore_mcp/core/r2_pool.py +403 -0
  32. reversecore_mcp/core/report_generator.py +268 -0
  33. reversecore_mcp/core/resilience.py +252 -0
  34. reversecore_mcp/core/resource_manager.py +169 -0
  35. reversecore_mcp/core/result.py +132 -0
  36. reversecore_mcp/core/security.py +213 -0
  37. reversecore_mcp/core/validators.py +238 -0
  38. reversecore_mcp/dashboard/__init__.py +221 -0
  39. reversecore_mcp/prompts/__init__.py +56 -0
  40. reversecore_mcp/prompts/common.py +24 -0
  41. reversecore_mcp/prompts/game.py +280 -0
  42. reversecore_mcp/prompts/malware.py +1219 -0
  43. reversecore_mcp/prompts/report.py +150 -0
  44. reversecore_mcp/prompts/security.py +136 -0
  45. reversecore_mcp/resources.py +329 -0
  46. reversecore_mcp/server.py +727 -0
  47. reversecore_mcp/tools/__init__.py +49 -0
  48. reversecore_mcp/tools/analysis/__init__.py +74 -0
  49. reversecore_mcp/tools/analysis/capa_tools.py +215 -0
  50. reversecore_mcp/tools/analysis/die_tools.py +180 -0
  51. reversecore_mcp/tools/analysis/diff_tools.py +643 -0
  52. reversecore_mcp/tools/analysis/lief_tools.py +272 -0
  53. reversecore_mcp/tools/analysis/signature_tools.py +591 -0
  54. reversecore_mcp/tools/analysis/static_analysis.py +479 -0
  55. reversecore_mcp/tools/common/__init__.py +58 -0
  56. reversecore_mcp/tools/common/file_operations.py +352 -0
  57. reversecore_mcp/tools/common/memory_tools.py +516 -0
  58. reversecore_mcp/tools/common/patch_explainer.py +230 -0
  59. reversecore_mcp/tools/common/server_tools.py +115 -0
  60. reversecore_mcp/tools/ghidra/__init__.py +19 -0
  61. reversecore_mcp/tools/ghidra/decompilation.py +975 -0
  62. reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
  63. reversecore_mcp/tools/malware/__init__.py +61 -0
  64. reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
  65. reversecore_mcp/tools/malware/dormant_detector.py +756 -0
  66. reversecore_mcp/tools/malware/ioc_tools.py +228 -0
  67. reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
  68. reversecore_mcp/tools/malware/yara_tools.py +214 -0
  69. reversecore_mcp/tools/patch_explainer.py +19 -0
  70. reversecore_mcp/tools/radare2/__init__.py +13 -0
  71. reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
  72. reversecore_mcp/tools/radare2/r2_session.py +376 -0
  73. reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
  74. reversecore_mcp/tools/report/__init__.py +4 -0
  75. reversecore_mcp/tools/report/email.py +82 -0
  76. reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
  77. reversecore_mcp/tools/report/report_tools.py +1076 -0
  78. reversecore_mcp/tools/report/session.py +194 -0
  79. reversecore_mcp/tools/report_tools.py +11 -0
@@ -0,0 +1,230 @@
1
+ """
2
+ Semantic Patch Explainer: Analyzes differences between binaries to explain security patches.
3
+ """
4
+
5
+ import difflib
6
+ from typing import Any
7
+
8
+ from fastmcp import Context
9
+
10
+ from reversecore_mcp.core.decorators import log_execution
11
+ from reversecore_mcp.core.error_handling import handle_tool_errors
12
+ from reversecore_mcp.core.logging_config import get_logger
13
+ from reversecore_mcp.core.metrics import track_metrics
14
+ from reversecore_mcp.core.result import ToolResult, failure, success
15
+ from reversecore_mcp.core.security import validate_file_path
16
+ from reversecore_mcp.tools.ghidra import decompilation, diff_tools
17
+
18
+ logger = get_logger(__name__)
19
+
20
+
21
+ # Note: PatchExplainerPlugin has been removed.
22
+ # The explain_patch tool is now registered via CommonToolsPlugin in common/__init__.py.
23
+
24
+
25
+ @log_execution(tool_name="explain_patch")
26
+ @track_metrics("explain_patch")
27
+ @handle_tool_errors
28
+ async def explain_patch(
29
+ file_path_a: str,
30
+ file_path_b: str,
31
+ function_name: str = None,
32
+ ctx: Context = None,
33
+ ) -> ToolResult:
34
+ """
35
+ Analyze differences between two binaries and explain changes in natural language.
36
+
37
+ This tool combines binary diffing with decompilation to help understand security patches.
38
+ It identifies changed functions, decompiles them, and uses heuristics to explain
39
+ the nature of the changes (e.g., "Added bounds check", "Replaced unsafe API").
40
+
41
+ Args:
42
+ file_path_a: Path to the original binary (e.g., vulnerable version).
43
+ file_path_b: Path to the modified binary (e.g., patched version).
44
+ function_name: Optional specific function to analyze. If None, analyzes top changes.
45
+ ctx: FastMCP Context (auto-injected).
46
+
47
+ Returns:
48
+ ToolResult containing the explanation report.
49
+ """
50
+ path_a = validate_file_path(file_path_a)
51
+ path_b = validate_file_path(file_path_b)
52
+
53
+ if ctx:
54
+ await ctx.info(f"🔍 Analyzing patch: {path_a.name} -> {path_b.name}")
55
+
56
+ # 1. Diff Binaries
57
+ if ctx:
58
+ await ctx.info("📊 Diffing binaries to find changed functions...")
59
+
60
+ diff_result = await diff_tools.diff_binaries(
61
+ str(path_a), str(path_b), function_name=function_name
62
+ )
63
+
64
+ if diff_result.status != "success":
65
+ return failure(
66
+ error_code="DIFF_FAILED",
67
+ message=f"Binary diff failed: {diff_result.message}",
68
+ )
69
+
70
+ changes = diff_result.data.get("changes", [])
71
+ if not changes:
72
+ return success(
73
+ {
74
+ "summary": "No significant code changes detected.",
75
+ "changes": [],
76
+ }
77
+ )
78
+
79
+ # Filter for code changes (ignore new/deleted functions for now, focus on modified)
80
+ modified_funcs = []
81
+ if function_name:
82
+ # If specific function requested, use it
83
+ modified_funcs.append(
84
+ {"address": function_name, "name": function_name}
85
+ ) # Address might be name here
86
+ else:
87
+ # Extract function names/addresses from changes
88
+ # diff_binaries returns list of changes. We need to map them to functions.
89
+ # For simplicity, let's assume we can get a list of changed function addresses/names.
90
+ # Since diff_binaries output format might vary, let's rely on what we have.
91
+ # If diff_binaries returns a list of changes, we might need to parse it.
92
+ # BUT, diff_tools.diff_binaries returns a structured JSON.
93
+ # Let's assume for this implementation we pick top 3 changed functions if not specified.
94
+ # To do this properly, we might need a helper to map changes to functions.
95
+ # For now, let's try to analyze the function specified or warn if none.
96
+ pass
97
+
98
+ # If no function specified, we need to identify WHICH functions changed.
99
+ # diff_binaries might return raw offsets.
100
+ # Let's use a simpler approach: If function_name is None, we ask the user to specify one
101
+ # OR we try to find it.
102
+ # Actually, `diff_binaries` output has "address". We can try to resolve it to a name.
103
+
104
+ target_functions = []
105
+ if function_name:
106
+ target_functions.append(function_name)
107
+ else:
108
+ # Heuristic: Pick the first few changed addresses that look like functions
109
+ # This is a simplification. In a real scenario, we'd map offsets to symbols.
110
+ # For now, let's just take the first 3 unique addresses from changes.
111
+ seen = set()
112
+ for change in changes:
113
+ addr = change.get("address")
114
+ if addr and addr not in seen:
115
+ target_functions.append(addr)
116
+ seen.add(addr)
117
+ if len(target_functions) >= 3:
118
+ break
119
+
120
+ if not target_functions:
121
+ return success({"summary": "No changed functions identified to analyze."})
122
+
123
+ explanations = []
124
+
125
+ for func in target_functions:
126
+ if ctx:
127
+ await ctx.info(f"🧠 Analyzing function: {func}...")
128
+
129
+ # 2. Decompile Both Versions
130
+ # We use smart_decompile (which uses Ghidra/r2)
131
+ # Note: We need to handle the case where function exists in both.
132
+
133
+ # Decompile A
134
+ res_a = await decompilation.smart_decompile(str(path_a), str(func))
135
+ code_a = res_a.data if res_a.status == "success" else ""
136
+
137
+ # Decompile B
138
+ res_b = await decompilation.smart_decompile(str(path_b), str(func))
139
+ code_b = res_b.data if res_b.status == "success" else ""
140
+
141
+ if not code_a or not code_b:
142
+ explanations.append(
143
+ {"function": func, "error": "Failed to decompile one or both versions."}
144
+ )
145
+ continue
146
+
147
+ # 3. Compare and Explain
148
+ explanation = _generate_explanation(code_a, code_b)
149
+ explanations.append(
150
+ {
151
+ "function": func,
152
+ "explanation": explanation,
153
+ "diff_snippet": _generate_diff_snippet(code_a, code_b),
154
+ }
155
+ )
156
+
157
+ return success(
158
+ {"summary": f"Analyzed {len(explanations)} function(s).", "explanations": explanations}
159
+ )
160
+
161
+
162
+ def _generate_explanation(code_a: str, code_b: str) -> dict:
163
+ """
164
+ Heuristically explain changes between two code snippets.
165
+ """
166
+ explanation = {"summary": "Code structure changed.", "details": []}
167
+
168
+ # Normalize code (remove whitespace changes)
169
+ lines_a = [line.strip() for line in code_a.splitlines() if line.strip()]
170
+ lines_b = [line.strip() for line in code_b.splitlines() if line.strip()]
171
+
172
+ # 1. Check for Added Conditions (Security Checks)
173
+ # Heuristic: More 'if' statements in B than A
174
+ if_count_a = sum(1 for line in lines_a if line.startswith("if"))
175
+ if_count_b = sum(1 for line in lines_b if line.startswith("if"))
176
+
177
+ if if_count_b > if_count_a:
178
+ explanation["details"].append(
179
+ "🛡️ **Added Security Check**: New conditional logic detected (likely bounds check or validation)."
180
+ )
181
+ explanation["summary"] = "Security checks were added."
182
+
183
+ # 2. Check for API Replacements
184
+ # Common safe replacements
185
+ replacements = [
186
+ ("strcpy", "strncpy", "Replaced unsafe string copy with bounded copy."),
187
+ ("sprintf", "snprintf", "Replaced unsafe format string with bounded version."),
188
+ ("gets", "fgets", "Replaced dangerous input function."),
189
+ ("memcpy", "memcpy_s", "Replaced memory copy with secure version."),
190
+ ]
191
+
192
+ code_a_str = " ".join(lines_a)
193
+ code_b_str = " ".join(lines_b)
194
+
195
+ for old, new, msg in replacements:
196
+ if old in code_a_str and new in code_b_str:
197
+ explanation["details"].append(f"🔄 **API Hardening**: {msg} (`{old}` -> `{new}`)")
198
+ explanation["summary"] = "Unsafe APIs were replaced."
199
+
200
+ # 3. Check for Integer Overflow Checks
201
+ # Look for patterns like (a > MAX - b) or specific constants
202
+ if "MAX" in code_b_str and "MAX" not in code_a_str:
203
+ explanation["details"].append(
204
+ "🔢 **Integer Overflow Check**: Potential overflow check added using MAX constants."
205
+ )
206
+
207
+ # 4. Check for Logic Removal
208
+ if len(lines_b) < len(lines_a) * 0.8:
209
+ explanation["details"].append(
210
+ "✂️ **Logic Removal**: Significant portion of code was removed (dead code or feature removal)."
211
+ )
212
+
213
+ if not explanation["details"]:
214
+ explanation["details"].append("ℹ️ Logic modified without obvious security patterns.")
215
+
216
+ return explanation
217
+
218
+
219
+ def _generate_diff_snippet(code_a: str, code_b: str, context: int = 3) -> str:
220
+ """Generate a unified diff snippet."""
221
+ a_lines = code_a.splitlines()
222
+ b_lines = code_b.splitlines()
223
+
224
+ diff = difflib.unified_diff(
225
+ a_lines, b_lines, fromfile="Original", tofile="Patched", n=context, lineterm=""
226
+ )
227
+
228
+ # Convert generator to string, limit length
229
+ diff_text = "\n".join(list(diff)[:50]) # Limit to 50 lines
230
+ return diff_text
@@ -0,0 +1,115 @@
1
+
2
+ """
3
+ Server tools for health checks, metrics, and monitoring.
4
+ """
5
+
6
+ import os
7
+ import resource
8
+ import time
9
+ from typing import Any
10
+
11
+ from fastmcp import FastMCP
12
+
13
+ from reversecore_mcp.core.logging_config import get_logger
14
+ from reversecore_mcp.core.metrics import metrics_collector
15
+ from reversecore_mcp.core.plugin import Plugin
16
+ from reversecore_mcp.core.result import ToolResult, success
17
+
18
+ logger = get_logger(__name__)
19
+
20
+ # Record module load time as approximate server start time
21
+ SERVER_START_TIME = time.time()
22
+
23
+
24
+ class ServerToolsPlugin(Plugin):
25
+ """Plugin for server management and monitoring tools."""
26
+
27
+ @property
28
+ def name(self) -> str:
29
+ return "server_tools"
30
+
31
+ @property
32
+ def description(self) -> str:
33
+ return "Tools for server health checks, metrics, and monitoring."
34
+
35
+ def register(self, mcp: FastMCP) -> None:
36
+ """Register server tools."""
37
+
38
+ @mcp.tool()
39
+ async def get_server_health() -> ToolResult:
40
+ """
41
+ Get the current health status and resource usage of the MCP server.
42
+
43
+ Use this to monitor the server's uptime, memory consumption,
44
+ and tool execution statistics.
45
+
46
+ Returns:
47
+ ToolResult containing:
48
+ - uptime_seconds: Server uptime
49
+ - memory_usage_mb: Current memory usage in MB
50
+ - status: 'healthy' or 'degraded'
51
+ - tool_stats: Summary of tool execution success/failure
52
+ """
53
+ uptime = time.time() - SERVER_START_TIME
54
+
55
+ # Memory usage (RSS)
56
+ # getrusage returns kilobytes on Linux, bytes on macOS
57
+ usage = resource.getrusage(resource.RUSAGE_SELF)
58
+ memory_mb = usage.ru_maxrss / 1024
59
+ if os.uname().sysname == "Darwin":
60
+ # macOS returns bytes
61
+ memory_mb = usage.ru_maxrss / (1024 * 1024)
62
+
63
+ # Metrics summary
64
+ metrics = metrics_collector.get_metrics()
65
+ tool_metrics = metrics.get("tools", {})
66
+
67
+ total_calls = sum(m.get("calls", 0) for m in tool_metrics.values())
68
+ total_errors = sum(m.get("errors", 0) for m in tool_metrics.values())
69
+
70
+ # Determine status
71
+ status = "healthy"
72
+ if total_calls > 0 and (total_errors / total_calls) > 0.2:
73
+ # If error rate > 20%, mark as degraded
74
+ status = "degraded"
75
+
76
+ return success({
77
+ "status": status,
78
+ "uptime_seconds": round(uptime, 2),
79
+ "uptime_formatted": _format_uptime(uptime),
80
+ "memory_usage_mb": round(memory_mb, 2),
81
+ "total_calls": total_calls,
82
+ "total_errors": total_errors,
83
+ "error_rate": f"{(total_errors/total_calls)*100:.1f}%" if total_calls > 0 else "0.0%",
84
+ "active_tools": len(tool_metrics)
85
+ })
86
+
87
+ @mcp.tool()
88
+ async def get_tool_metrics(tool_name: str = None) -> ToolResult:
89
+ """
90
+ Get detailed execution metrics for specific or all tools.
91
+
92
+ Args:
93
+ tool_name: Optional tool name to filter results
94
+
95
+ Returns:
96
+ Detailed metrics including execution times, call counts, and error rates.
97
+ """
98
+ metrics = metrics_collector.get_metrics()
99
+ tools = metrics.get("tools", {})
100
+
101
+ if tool_name:
102
+ if tool_name not in tools:
103
+ return success({}, message=f"No metrics found for tool '{tool_name}'")
104
+ return success({tool_name: tools[tool_name]})
105
+
106
+ # Return all
107
+ return success(tools)
108
+
109
+ def _format_uptime(seconds: float) -> str:
110
+ m, s = divmod(seconds, 60)
111
+ h, m = divmod(m, 60)
112
+ d, h = divmod(h, 24)
113
+ if d > 0:
114
+ return f"{int(d)}d {int(h)}h {int(m)}m"
115
+ return f"{int(h)}h {int(m)}m {int(s)}s"
@@ -0,0 +1,19 @@
1
+ """Ghidra tools package."""
2
+
3
+ # Backward compatibility re-exports for legacy imports
4
+ # These modules have been moved but are re-exported for backward compatibility
5
+ from reversecore_mcp.tools.analysis import diff_tools, lief_tools, signature_tools, static_analysis
6
+ from reversecore_mcp.tools.ghidra import decompilation
7
+ from reversecore_mcp.tools.ghidra.ghidra_tools import GhidraToolsPlugin
8
+ from reversecore_mcp.tools.radare2 import r2_analysis
9
+
10
+ __all__ = [
11
+ "GhidraToolsPlugin",
12
+ "decompilation",
13
+ # Backward compatibility
14
+ "diff_tools",
15
+ "signature_tools",
16
+ "static_analysis",
17
+ "lief_tools",
18
+ "r2_analysis",
19
+ ]