iflow-mcp_developermode-korea_reversecore-mcp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/METADATA +543 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/RECORD +79 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/WHEEL +5 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_developermode_korea_reversecore_mcp-1.0.0.dist-info/top_level.txt +1 -0
- reversecore_mcp/__init__.py +9 -0
- reversecore_mcp/core/__init__.py +78 -0
- reversecore_mcp/core/audit.py +101 -0
- reversecore_mcp/core/binary_cache.py +138 -0
- reversecore_mcp/core/command_spec.py +357 -0
- reversecore_mcp/core/config.py +432 -0
- reversecore_mcp/core/container.py +288 -0
- reversecore_mcp/core/decorators.py +152 -0
- reversecore_mcp/core/error_formatting.py +93 -0
- reversecore_mcp/core/error_handling.py +142 -0
- reversecore_mcp/core/evidence.py +229 -0
- reversecore_mcp/core/exceptions.py +296 -0
- reversecore_mcp/core/execution.py +240 -0
- reversecore_mcp/core/ghidra.py +642 -0
- reversecore_mcp/core/ghidra_helper.py +481 -0
- reversecore_mcp/core/ghidra_manager.py +234 -0
- reversecore_mcp/core/json_utils.py +131 -0
- reversecore_mcp/core/loader.py +73 -0
- reversecore_mcp/core/logging_config.py +206 -0
- reversecore_mcp/core/memory.py +721 -0
- reversecore_mcp/core/metrics.py +198 -0
- reversecore_mcp/core/mitre_mapper.py +365 -0
- reversecore_mcp/core/plugin.py +45 -0
- reversecore_mcp/core/r2_helpers.py +404 -0
- reversecore_mcp/core/r2_pool.py +403 -0
- reversecore_mcp/core/report_generator.py +268 -0
- reversecore_mcp/core/resilience.py +252 -0
- reversecore_mcp/core/resource_manager.py +169 -0
- reversecore_mcp/core/result.py +132 -0
- reversecore_mcp/core/security.py +213 -0
- reversecore_mcp/core/validators.py +238 -0
- reversecore_mcp/dashboard/__init__.py +221 -0
- reversecore_mcp/prompts/__init__.py +56 -0
- reversecore_mcp/prompts/common.py +24 -0
- reversecore_mcp/prompts/game.py +280 -0
- reversecore_mcp/prompts/malware.py +1219 -0
- reversecore_mcp/prompts/report.py +150 -0
- reversecore_mcp/prompts/security.py +136 -0
- reversecore_mcp/resources.py +329 -0
- reversecore_mcp/server.py +727 -0
- reversecore_mcp/tools/__init__.py +49 -0
- reversecore_mcp/tools/analysis/__init__.py +74 -0
- reversecore_mcp/tools/analysis/capa_tools.py +215 -0
- reversecore_mcp/tools/analysis/die_tools.py +180 -0
- reversecore_mcp/tools/analysis/diff_tools.py +643 -0
- reversecore_mcp/tools/analysis/lief_tools.py +272 -0
- reversecore_mcp/tools/analysis/signature_tools.py +591 -0
- reversecore_mcp/tools/analysis/static_analysis.py +479 -0
- reversecore_mcp/tools/common/__init__.py +58 -0
- reversecore_mcp/tools/common/file_operations.py +352 -0
- reversecore_mcp/tools/common/memory_tools.py +516 -0
- reversecore_mcp/tools/common/patch_explainer.py +230 -0
- reversecore_mcp/tools/common/server_tools.py +115 -0
- reversecore_mcp/tools/ghidra/__init__.py +19 -0
- reversecore_mcp/tools/ghidra/decompilation.py +975 -0
- reversecore_mcp/tools/ghidra/ghidra_tools.py +1052 -0
- reversecore_mcp/tools/malware/__init__.py +61 -0
- reversecore_mcp/tools/malware/adaptive_vaccine.py +579 -0
- reversecore_mcp/tools/malware/dormant_detector.py +756 -0
- reversecore_mcp/tools/malware/ioc_tools.py +228 -0
- reversecore_mcp/tools/malware/vulnerability_hunter.py +519 -0
- reversecore_mcp/tools/malware/yara_tools.py +214 -0
- reversecore_mcp/tools/patch_explainer.py +19 -0
- reversecore_mcp/tools/radare2/__init__.py +13 -0
- reversecore_mcp/tools/radare2/r2_analysis.py +972 -0
- reversecore_mcp/tools/radare2/r2_session.py +376 -0
- reversecore_mcp/tools/radare2/radare2_mcp_tools.py +1183 -0
- reversecore_mcp/tools/report/__init__.py +4 -0
- reversecore_mcp/tools/report/email.py +82 -0
- reversecore_mcp/tools/report/report_mcp_tools.py +344 -0
- reversecore_mcp/tools/report/report_tools.py +1076 -0
- reversecore_mcp/tools/report/session.py +194 -0
- reversecore_mcp/tools/report_tools.py +11 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""Prompts for report generation."""
|
|
2
|
+
|
|
3
|
+
from reversecore_mcp.prompts.common import DOCKER_PATH_RULE, LANGUAGE_RULE
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def report_generation_mode(filename: str) -> str:
|
|
7
|
+
"""Generate professional malware analysis reports with accurate timestamps and IOC tracking."""
|
|
8
|
+
return f"""
|
|
9
|
+
You are a Security Report Specialist generating professional malware analysis documentation.
|
|
10
|
+
Your task is to analyze '{filename}' and create a comprehensive, shareable report.
|
|
11
|
+
|
|
12
|
+
{LANGUAGE_RULE}
|
|
13
|
+
|
|
14
|
+
{DOCKER_PATH_RULE}
|
|
15
|
+
|
|
16
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
17
|
+
██ REPORT GENERATION WORKFLOW ██
|
|
18
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
19
|
+
|
|
20
|
+
[STEP 1] Initialize Analysis Session
|
|
21
|
+
First, get accurate system time and start a tracking session:
|
|
22
|
+
|
|
23
|
+
```
|
|
24
|
+
get_system_time() # Get server timestamp (prevents date hallucination)
|
|
25
|
+
start_analysis_session(
|
|
26
|
+
sample_path="{filename}",
|
|
27
|
+
analyst="Your Name",
|
|
28
|
+
severity="medium" # low, medium, high, critical
|
|
29
|
+
)
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
[STEP 2] Perform Analysis
|
|
33
|
+
Conduct your analysis using appropriate tools:
|
|
34
|
+
|
|
35
|
+
# 1. Start session
|
|
36
|
+
create_analysis_session(file_path="{filename}")
|
|
37
|
+
|
|
38
|
+
# 2. Extract metadata
|
|
39
|
+
parse_binary_with_lief(file_path="{filename}")
|
|
40
|
+
|
|
41
|
+
# 3. Analyze code
|
|
42
|
+
# ...dormant_detector("{filename}")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
[STEP 3] Collect IOCs During Analysis
|
|
46
|
+
As you find indicators, add them to the session:
|
|
47
|
+
|
|
48
|
+
```
|
|
49
|
+
add_session_ioc("hashes", "SHA256: abc123...")
|
|
50
|
+
add_session_ioc("ips", "192.168.1.100")
|
|
51
|
+
add_session_ioc("domains", "malware-c2.com")
|
|
52
|
+
add_session_ioc("urls", "http://evil.com/payload.exe")
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Valid IOC types: hashes, ips, domains, urls, files, registry, mutexes, emails
|
|
56
|
+
|
|
57
|
+
[STEP 4] Document MITRE ATT&CK Techniques
|
|
58
|
+
Map behaviors to MITRE framework:
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
add_session_mitre("T1059.001", "PowerShell", "Execution")
|
|
62
|
+
add_session_mitre("T1547.001", "Registry Run Keys", "Persistence")
|
|
63
|
+
add_session_mitre("T1071.001", "Web Protocols", "Command and Control")
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
[STEP 5] Add Analysis Notes
|
|
67
|
+
Document important findings:
|
|
68
|
+
|
|
69
|
+
```
|
|
70
|
+
add_session_note("Found encrypted config at 0x401000", category="finding")
|
|
71
|
+
add_session_note("Sample connects to C2 on port 443", category="behavior")
|
|
72
|
+
add_session_note("Anti-VM checks detected", category="warning")
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
Note categories: general, finding, warning, important, behavior
|
|
76
|
+
|
|
77
|
+
**Tip: Label each note with evidence level!**
|
|
78
|
+
```
|
|
79
|
+
add_session_note("[🔍 OBSERVED] Procmon captured registry write to Run key", category="finding")
|
|
80
|
+
add_session_note("[🔎 INFERRED] CryptEncrypt import suggests encryption capability", category="finding")
|
|
81
|
+
add_session_note("[❓ POSSIBLE] SMB functions may enable lateral movement", category="warning")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
[STEP 6] Set Severity and Tags
|
|
85
|
+
```
|
|
86
|
+
set_session_severity("high")
|
|
87
|
+
add_session_tag("ransomware")
|
|
88
|
+
add_session_tag("APT")
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
[STEP 7] End Session and Generate Report
|
|
92
|
+
```
|
|
93
|
+
end_analysis_session(summary="Brief summary of findings...")
|
|
94
|
+
|
|
95
|
+
create_analysis_report(
|
|
96
|
+
template_type="full_analysis", # full_analysis, quick_triage, ioc_summary, executive_brief
|
|
97
|
+
classification="TLP:AMBER"
|
|
98
|
+
)
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
[CRITICAL: Evidence Summary in Report]
|
|
102
|
+
The final report MUST include an evidence summary:
|
|
103
|
+
|
|
104
|
+
## Confidence Assessment
|
|
105
|
+
| Evidence Level | Count | Key Findings |
|
|
106
|
+
|----------------|-------|---------------|
|
|
107
|
+
| 🔍 OBSERVED | X | (sandbox, logs, traces) |
|
|
108
|
+
| 🔎 INFERRED | Y | (static analysis) |
|
|
109
|
+
| ❓ POSSIBLE | Z | (needs verification) |
|
|
110
|
+
|
|
111
|
+
**Overall Confidence**: [✅ CONFIRMED / 🟢 HIGH / 🟡 MEDIUM / 🔴 LOW]
|
|
112
|
+
|
|
113
|
+
[STEP 8] Optional: Email Report
|
|
114
|
+
```
|
|
115
|
+
get_email_status() # Check if email is configured
|
|
116
|
+
send_report_email(
|
|
117
|
+
report_id="MAR-20251205-...",
|
|
118
|
+
recipients=["security-team@company.com"]
|
|
119
|
+
)
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
123
|
+
██ AVAILABLE REPORT TEMPLATES ██
|
|
124
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
125
|
+
|
|
126
|
+
| Template | Purpose |
|
|
127
|
+
|----------|---------|
|
|
128
|
+
| `full_analysis` | Complete technical report with all details |
|
|
129
|
+
| `quick_triage` | Rapid assessment summary |
|
|
130
|
+
| `ioc_summary` | IOC-focused export (YAML/CSV included) |
|
|
131
|
+
| `executive_brief` | Non-technical summary for management |
|
|
132
|
+
|
|
133
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
134
|
+
██ TIMESTAMP FORMATS AVAILABLE ██
|
|
135
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
136
|
+
|
|
137
|
+
The system provides multiple date/time formats:
|
|
138
|
+
- `date`: 2025-12-05 (ISO format)
|
|
139
|
+
- `date_long`: December 05, 2025
|
|
140
|
+
- `date_short`: 05 Dec 2025
|
|
141
|
+
- `date_eu`: 05/12/2025
|
|
142
|
+
- `date_us`: 12/05/2025
|
|
143
|
+
- `weekday`: Friday
|
|
144
|
+
- `weekday_short`: Fri
|
|
145
|
+
- `time_12h`: 02:30:45 PM
|
|
146
|
+
- `datetime_full`: 2025-12-05 14:30:52 (KST)
|
|
147
|
+
- `datetime_utc`: 2025-12-05 05:30:52 UTC
|
|
148
|
+
|
|
149
|
+
Begin report generation workflow now.
|
|
150
|
+
"""
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
"""Prompts for security research, specialized analysis, and patching."""
|
|
2
|
+
|
|
3
|
+
from reversecore_mcp.prompts.common import DOCKER_PATH_RULE, LANGUAGE_RULE
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def vulnerability_research_mode(filename: str) -> str:
|
|
7
|
+
"""Specialized mode for Bug Hunting and Vulnerability Research."""
|
|
8
|
+
return f"""
|
|
9
|
+
You are a Vulnerability Researcher.
|
|
10
|
+
Analyze the binary '{filename}' to find exploitable bugs (Buffer Overflow, UAF, Command Injection).
|
|
11
|
+
|
|
12
|
+
{LANGUAGE_RULE}
|
|
13
|
+
|
|
14
|
+
{DOCKER_PATH_RULE}
|
|
15
|
+
|
|
16
|
+
[CRITICAL: Evidence-Based Vulnerability Reporting]
|
|
17
|
+
==========================================
|
|
18
|
+
Vulnerability claims require STRONG evidence. False positives damage credibility.
|
|
19
|
+
|
|
20
|
+
🔍 [CONFIRMED] - Verified through PoC, fuzzing, or dynamic testing
|
|
21
|
+
Example: "Crash at strcpy with controlled input (PoC attached)"
|
|
22
|
+
|
|
23
|
+
🔎 [LIKELY] - Strong static evidence (dangerous pattern + reachable sink)
|
|
24
|
+
Example: "User input reaches sprintf without bounds check"
|
|
25
|
+
|
|
26
|
+
❓ [POSSIBLE] - Pattern present but exploitability unclear
|
|
27
|
+
Example: "strcpy used but input source not confirmed"
|
|
28
|
+
|
|
29
|
+
[Analysis SOP]
|
|
30
|
+
1. Dangerous API Search:
|
|
31
|
+
- Identify usage of dangerous functions (strcpy, system, sprintf, gets) using `run_radare2` imports.
|
|
32
|
+
- Use `analyze_xrefs` to check if user input reaches these sinks.
|
|
33
|
+
→ API present only = [❓ POSSIBLE]
|
|
34
|
+
→ API + reachable input = [🔎 LIKELY]
|
|
35
|
+
→ PoC crash = [🔍 CONFIRMED]
|
|
36
|
+
|
|
37
|
+
2. Mitigation Check:
|
|
38
|
+
- Check for exploit mitigations (ASLR, DEP/NX, Canary, PIE) using `parse_binary_with_lief`.
|
|
39
|
+
→ Mitigations affect exploitability, not vulnerability existence
|
|
40
|
+
|
|
41
|
+
3. Fuzzing Candidate Identification:
|
|
42
|
+
- Identify parsing functions or network handlers suitable for fuzzing.
|
|
43
|
+
|
|
44
|
+
4. Reporting Format:
|
|
45
|
+
| Vulnerability | CWE | Confidence | Evidence |
|
|
46
|
+
|---------------|-----|------------|----------|
|
|
47
|
+
| Stack Buffer Overflow | CWE-121 | 🔍 CONFIRMED | PoC crash at 0x401234 |
|
|
48
|
+
| Command Injection | CWE-78 | 🔎 LIKELY | system() called with user input |
|
|
49
|
+
| Integer Overflow | CWE-190 | ❓ POSSIBLE | Unchecked multiplication, needs verification |
|
|
50
|
+
|
|
51
|
+
- Include code snippets for each finding
|
|
52
|
+
- Recommend PoC (Proof of Concept) strategies
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def crypto_analysis_mode(filename: str) -> str:
|
|
57
|
+
"""Specialized mode for analyzing Cryptographic algorithms and Key management."""
|
|
58
|
+
return f"""
|
|
59
|
+
You are a Cryptography Analyst.
|
|
60
|
+
Analyze the binary '{filename}' to identify cryptographic algorithms and key management flaws.
|
|
61
|
+
|
|
62
|
+
{LANGUAGE_RULE}
|
|
63
|
+
|
|
64
|
+
{DOCKER_PATH_RULE}
|
|
65
|
+
|
|
66
|
+
[Analysis SOP]
|
|
67
|
+
1. Algo Identification:
|
|
68
|
+
- Identify crypto constants (S-Boxes, IVs, Magic Numbers) using `run_yara` (crypto-signatures) or `run_strings`.
|
|
69
|
+
- Identify standard crypto libraries (OpenSSL, mbedTLS) using `match_libraries`.
|
|
70
|
+
|
|
71
|
+
2. Key Management:
|
|
72
|
+
- Check for hardcoded keys or IVs.
|
|
73
|
+
- Analyze how keys are generated and stored.
|
|
74
|
+
|
|
75
|
+
3. Reporting:
|
|
76
|
+
- List identified algorithms (AES, RSA, ChaCha20, etc.) and their modes (ECB, CBC, GCM).
|
|
77
|
+
- Report any weak crypto usage (e.g., ECB mode, weak RNG).
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def firmware_analysis_mode(filename: str) -> str:
|
|
82
|
+
"""Specialized mode for analyzing Firmware images and IoT devices."""
|
|
83
|
+
return f"""
|
|
84
|
+
You are an Embedded Systems Security Expert.
|
|
85
|
+
Analyze the firmware image '{filename}' to extract file systems and identify vulnerabilities.
|
|
86
|
+
|
|
87
|
+
{LANGUAGE_RULE}
|
|
88
|
+
|
|
89
|
+
{DOCKER_PATH_RULE}
|
|
90
|
+
|
|
91
|
+
[Analysis SOP]
|
|
92
|
+
1. Extraction:
|
|
93
|
+
- Use `run_binwalk` to identify and extract embedded file systems (SquashFS, UBIFS, etc.) and bootloaders.
|
|
94
|
+
- Identify the CPU architecture (ARM, MIPS, PowerPC) using `run_file` or `parse_binary_with_lief`.
|
|
95
|
+
|
|
96
|
+
2. Secret Hunting:
|
|
97
|
+
- Search for hardcoded credentials (root passwords, API keys, private keys) using `run_strings` and `run_yara`.
|
|
98
|
+
- Look for configuration files (/etc/shadow, /etc/passwd, .conf).
|
|
99
|
+
|
|
100
|
+
3. Vulnerability Check:
|
|
101
|
+
- Check for outdated components or known vulnerable services (telnet, old httpd).
|
|
102
|
+
|
|
103
|
+
4. Reporting:
|
|
104
|
+
- List extracted components, architecture, and potential backdoors/secrets.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def patch_analysis_mode(original_binary: str, patched_binary: str) -> str:
|
|
109
|
+
"""Analyze the differences between two binaries to identify patches or vulnerabilities (1-day analysis)."""
|
|
110
|
+
return f"""
|
|
111
|
+
You are a Patch Analyst / 1-Day Exploit Researcher.
|
|
112
|
+
Compare '{original_binary}' (vulnerable) and '{patched_binary}' (patched) to understand the security fix.
|
|
113
|
+
|
|
114
|
+
{LANGUAGE_RULE}
|
|
115
|
+
|
|
116
|
+
{DOCKER_PATH_RULE}
|
|
117
|
+
|
|
118
|
+
[Analysis SOP]
|
|
119
|
+
1. Binary Diffing:
|
|
120
|
+
- Run `diff_binaries("{original_binary}", "{patched_binary}")` to find changed functions.
|
|
121
|
+
- Focus on functions with 'unsafe' or 'security' related changes.
|
|
122
|
+
|
|
123
|
+
2. Change Analysis:
|
|
124
|
+
- For each changed function:
|
|
125
|
+
A. Decompile both versions using `smart_decompile` or `smart_decompile`.
|
|
126
|
+
B. Compare the logic to identify added checks (bounds check, integer overflow check, input validation).
|
|
127
|
+
|
|
128
|
+
3. Vulnerability Reconstruction:
|
|
129
|
+
- Based on the added check, infer the original vulnerability (Buffer Overflow, UAF, Integer Overflow).
|
|
130
|
+
- Determine if the patch is complete or if it can be bypassed.
|
|
131
|
+
|
|
132
|
+
4. Reporting:
|
|
133
|
+
- Summarize the vulnerability (CVE style).
|
|
134
|
+
- Explain the patch logic.
|
|
135
|
+
- Suggest a Proof-of-Concept (PoC) strategy to trigger the original bug.
|
|
136
|
+
"""
|
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from collections import deque
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from functools import wraps
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, TypeVar
|
|
7
|
+
|
|
8
|
+
from fastmcp import FastMCP
|
|
9
|
+
|
|
10
|
+
from reversecore_mcp.core import json_utils as json # Use optimized JSON (3-5x faster)
|
|
11
|
+
from reversecore_mcp.core.config import get_config
|
|
12
|
+
from reversecore_mcp.core.decorators import log_execution
|
|
13
|
+
from reversecore_mcp.core.metrics import track_metrics
|
|
14
|
+
|
|
15
|
+
# Import tools at module level for better performance
|
|
16
|
+
# These imports are used by resource functions below
|
|
17
|
+
from reversecore_mcp.tools.ghidra import decompilation, r2_analysis, static_analysis
|
|
18
|
+
from reversecore_mcp.tools.malware import ioc_tools
|
|
19
|
+
|
|
20
|
+
# Type variable for generic function wrapper
|
|
21
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
22
|
+
|
|
23
|
+
# Type alias for decorator return
|
|
24
|
+
DecoratorType = Callable[[F], F]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def resource_decorator(resource_name: str) -> DecoratorType:
|
|
28
|
+
"""Combined decorator for resource functions with logging and metrics.
|
|
29
|
+
|
|
30
|
+
Applies @log_execution and @track_metrics to resource functions
|
|
31
|
+
for consistent monitoring and observability.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
resource_name: Name identifier for logging and metrics tracking
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
A decorator that wraps the function with logging and metrics
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def decorator(func: F) -> F:
|
|
41
|
+
# Apply decorators in reverse order (innermost first)
|
|
42
|
+
wrapped = track_metrics(resource_name)(func)
|
|
43
|
+
wrapped = log_execution(tool_name=resource_name)(wrapped)
|
|
44
|
+
|
|
45
|
+
@wraps(func)
|
|
46
|
+
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
47
|
+
return await wrapped(*args, **kwargs)
|
|
48
|
+
|
|
49
|
+
@wraps(func)
|
|
50
|
+
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
51
|
+
return wrapped(*args, **kwargs)
|
|
52
|
+
|
|
53
|
+
# Return appropriate wrapper based on function type
|
|
54
|
+
if asyncio.iscoroutinefunction(func):
|
|
55
|
+
return async_wrapper # type: ignore[return-value]
|
|
56
|
+
return sync_wrapper # type: ignore[return-value]
|
|
57
|
+
|
|
58
|
+
return decorator
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _get_resources_path() -> Path:
|
|
62
|
+
"""Get resources path from config or use default."""
|
|
63
|
+
config = get_config()
|
|
64
|
+
# Resources are typically in a sibling directory to workspace
|
|
65
|
+
resources_path = config.workspace.parent / "resources"
|
|
66
|
+
if resources_path.exists():
|
|
67
|
+
return resources_path
|
|
68
|
+
# Fallback to local resources directory
|
|
69
|
+
local_resources = Path(__file__).parent.parent / "resources"
|
|
70
|
+
if local_resources.exists():
|
|
71
|
+
return local_resources
|
|
72
|
+
return resources_path # Return config-based path even if not exists
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _get_workspace_path(filename: str) -> str:
|
|
76
|
+
"""Get full path to a file in the workspace."""
|
|
77
|
+
return str(get_config().workspace / filename)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def register_resources(mcp: FastMCP):
|
|
81
|
+
"""Register MCP resources for AI agents."""
|
|
82
|
+
|
|
83
|
+
# ============================================================================
|
|
84
|
+
# Static Resources
|
|
85
|
+
# ============================================================================
|
|
86
|
+
|
|
87
|
+
@mcp.resource("reversecore://guide")
|
|
88
|
+
def get_guide() -> str:
|
|
89
|
+
"""Reversecore MCP Tool Usage Guide"""
|
|
90
|
+
guide_path = _get_resources_path() / "FILE_COPY_TOOL_GUIDE.md"
|
|
91
|
+
if guide_path.exists():
|
|
92
|
+
return guide_path.read_text(encoding="utf-8")
|
|
93
|
+
return "Guide not found."
|
|
94
|
+
|
|
95
|
+
@mcp.resource("reversecore://guide/structures")
|
|
96
|
+
def get_structure_guide() -> str:
|
|
97
|
+
"""Structure Recovery and Cross-Reference Analysis Technical Guide"""
|
|
98
|
+
doc_path = _get_resources_path() / "XREFS_AND_STRUCTURES_IMPLEMENTATION.md"
|
|
99
|
+
if doc_path.exists():
|
|
100
|
+
return doc_path.read_text(encoding="utf-8")
|
|
101
|
+
return "Documentation not found."
|
|
102
|
+
|
|
103
|
+
@mcp.resource("reversecore://tools")
|
|
104
|
+
def get_tools_doc() -> str:
|
|
105
|
+
"""Complete documentation for all available tools"""
|
|
106
|
+
doc_path = _get_resources_path() / "TOOLS.md"
|
|
107
|
+
if doc_path.exists():
|
|
108
|
+
return doc_path.read_text(encoding="utf-8")
|
|
109
|
+
return "Tools documentation not found."
|
|
110
|
+
|
|
111
|
+
@mcp.resource("reversecore://logs")
|
|
112
|
+
def get_logs() -> str:
|
|
113
|
+
"""Application logs (last 100 lines)"""
|
|
114
|
+
log_file = get_config().log_file
|
|
115
|
+
if log_file.exists():
|
|
116
|
+
try:
|
|
117
|
+
# OPTIMIZED: Use deque to read only last N lines efficiently
|
|
118
|
+
# This avoids loading the entire log file into memory
|
|
119
|
+
with open(log_file, encoding="utf-8", errors="replace") as f:
|
|
120
|
+
# deque with maxlen automatically keeps only last N items
|
|
121
|
+
last_lines = deque(f, maxlen=100)
|
|
122
|
+
return "".join(last_lines)
|
|
123
|
+
except (OSError, PermissionError) as e:
|
|
124
|
+
return f"Error reading logs: {e}"
|
|
125
|
+
return "No logs found."
|
|
126
|
+
|
|
127
|
+
# ============================================================================
|
|
128
|
+
# Dynamic Resources - Binary Virtual File System
|
|
129
|
+
# ============================================================================
|
|
130
|
+
|
|
131
|
+
@mcp.resource("reversecore://{filename}/strings")
|
|
132
|
+
@resource_decorator("resource_get_file_strings")
|
|
133
|
+
async def get_file_strings(filename: str) -> str:
|
|
134
|
+
"""Extract all strings from a binary file"""
|
|
135
|
+
try:
|
|
136
|
+
result = await static_analysis.run_strings(_get_workspace_path(filename))
|
|
137
|
+
if result.status == "success":
|
|
138
|
+
# Get content from ToolResult
|
|
139
|
+
content = result.data if isinstance(result.data, str) else str(result.data)
|
|
140
|
+
return f"# Strings from {filename}\n\n{content}"
|
|
141
|
+
return f"Error extracting strings: {result.message if hasattr(result, 'message') else 'Unknown error'}"
|
|
142
|
+
except Exception as e:
|
|
143
|
+
return f"Error: {str(e)}"
|
|
144
|
+
|
|
145
|
+
@mcp.resource("reversecore://{filename}/iocs")
|
|
146
|
+
@resource_decorator("resource_get_file_iocs")
|
|
147
|
+
async def get_file_iocs(filename: str) -> str:
|
|
148
|
+
"""Extract IOCs (IPs, URLs, Emails) from a binary file"""
|
|
149
|
+
try:
|
|
150
|
+
# 1. Extract strings
|
|
151
|
+
strings_res = await static_analysis.run_strings(_get_workspace_path(filename))
|
|
152
|
+
if strings_res.status != "success":
|
|
153
|
+
return f"Failed to extract strings from {filename}"
|
|
154
|
+
|
|
155
|
+
# 2. Extract IOCs from strings
|
|
156
|
+
strings_data = (
|
|
157
|
+
strings_res.data if isinstance(strings_res.data, str) else str(strings_res.data)
|
|
158
|
+
)
|
|
159
|
+
ioc_res = ioc_tools.extract_iocs(strings_data)
|
|
160
|
+
|
|
161
|
+
# 3. Format output
|
|
162
|
+
if ioc_res.status == "success":
|
|
163
|
+
data = ioc_res.data
|
|
164
|
+
ipv4_list = data.get("ipv4", [])
|
|
165
|
+
urls_list = data.get("urls", [])
|
|
166
|
+
emails_list = data.get("emails", [])
|
|
167
|
+
|
|
168
|
+
return f"""# IOC Report for {filename}
|
|
169
|
+
|
|
170
|
+
## IPv4 Addresses ({len(ipv4_list)})
|
|
171
|
+
{chr(10).join(f"- {ip}" for ip in ipv4_list) if ipv4_list else "No IPv4 addresses found"}
|
|
172
|
+
|
|
173
|
+
## URLs ({len(urls_list)})
|
|
174
|
+
{chr(10).join(f"- {url}" for url in urls_list) if urls_list else "No URLs found"}
|
|
175
|
+
|
|
176
|
+
## Email Addresses ({len(emails_list)})
|
|
177
|
+
{chr(10).join(f"- {email}" for email in emails_list) if emails_list else "No emails found"}
|
|
178
|
+
"""
|
|
179
|
+
return f"Error extracting IOCs: {ioc_res.message if hasattr(ioc_res, 'message') else 'Unknown error'}"
|
|
180
|
+
except Exception as e:
|
|
181
|
+
return f"Error: {str(e)}"
|
|
182
|
+
|
|
183
|
+
@mcp.resource("reversecore://{filename}/func/{address}/code")
|
|
184
|
+
@resource_decorator("resource_get_decompiled_code")
|
|
185
|
+
async def get_decompiled_code(filename: str, address: str) -> str:
|
|
186
|
+
"""Get decompiled pseudo-C code for a specific function"""
|
|
187
|
+
try:
|
|
188
|
+
result = await decompilation.smart_decompile(
|
|
189
|
+
_get_workspace_path(filename), address, use_ghidra=True
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
if result.status == "success":
|
|
193
|
+
content = result.data if isinstance(result.data, str) else str(result.data)
|
|
194
|
+
return f"""# Decompiled Code: {filename} @ {address}
|
|
195
|
+
|
|
196
|
+
```c
|
|
197
|
+
{content}
|
|
198
|
+
```
|
|
199
|
+
"""
|
|
200
|
+
return f"Error decompiling {address}: {result.message if hasattr(result, 'message') else 'Decompilation failed'}"
|
|
201
|
+
except Exception as e:
|
|
202
|
+
return f"Error: {str(e)}"
|
|
203
|
+
|
|
204
|
+
@mcp.resource("reversecore://{filename}/func/{address}/asm")
|
|
205
|
+
@resource_decorator("resource_get_disassembly")
|
|
206
|
+
async def get_disassembly(filename: str, address: str) -> str:
|
|
207
|
+
"""Get disassembly for a specific function"""
|
|
208
|
+
try:
|
|
209
|
+
result = await r2_analysis.run_radare2(
|
|
210
|
+
_get_workspace_path(filename), f"pdf @ {address}"
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
if result.status == "success":
|
|
214
|
+
content = result.data if isinstance(result.data, str) else str(result.data)
|
|
215
|
+
return f"""# Disassembly: {filename} @ {address}
|
|
216
|
+
|
|
217
|
+
```asm
|
|
218
|
+
{content}
|
|
219
|
+
```
|
|
220
|
+
"""
|
|
221
|
+
return f"Error disassembling {address}: {result.message if hasattr(result, 'message') else 'Disassembly failed'}"
|
|
222
|
+
except Exception as e:
|
|
223
|
+
return f"Error: {str(e)}"
|
|
224
|
+
|
|
225
|
+
@mcp.resource("reversecore://{filename}/func/{address}/cfg")
|
|
226
|
+
@resource_decorator("resource_get_function_cfg")
|
|
227
|
+
async def get_function_cfg(filename: str, address: str) -> str:
|
|
228
|
+
"""Get Control Flow Graph (Mermaid) for a specific function"""
|
|
229
|
+
try:
|
|
230
|
+
result = await r2_analysis.generate_function_graph(
|
|
231
|
+
_get_workspace_path(filename), address, format="mermaid"
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
if result.status == "success":
|
|
235
|
+
content = result.data if isinstance(result.data, str) else str(result.data)
|
|
236
|
+
return f"""# Control Flow Graph: {filename} @ {address}
|
|
237
|
+
|
|
238
|
+
{content}
|
|
239
|
+
"""
|
|
240
|
+
return f"Error generating CFG for {address}: {result.message if hasattr(result, 'message') else 'CFG generation failed'}"
|
|
241
|
+
except Exception as e:
|
|
242
|
+
return f"Error: {str(e)}"
|
|
243
|
+
|
|
244
|
+
@mcp.resource("reversecore://{filename}/functions")
|
|
245
|
+
@resource_decorator("resource_get_function_list")
|
|
246
|
+
async def get_function_list(filename: str) -> str:
|
|
247
|
+
"""Get list of all functions in the binary"""
|
|
248
|
+
try:
|
|
249
|
+
result = await r2_analysis.run_radare2(
|
|
250
|
+
_get_workspace_path(filename), "aflj"
|
|
251
|
+
) # List functions in JSON format
|
|
252
|
+
|
|
253
|
+
if result.status == "success":
|
|
254
|
+
content = result.data if isinstance(result.data, str) else str(result.data)
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
functions = json.loads(content)
|
|
258
|
+
func_list = []
|
|
259
|
+
for func in functions[:50]: # Limit to first 50 for readability
|
|
260
|
+
name = func.get("name", "unknown")
|
|
261
|
+
offset = func.get("offset", 0)
|
|
262
|
+
size = func.get("size", 0)
|
|
263
|
+
func_list.append(f"- `{name}` @ 0x{offset:x} (size: {size} bytes)")
|
|
264
|
+
|
|
265
|
+
total = len(functions)
|
|
266
|
+
shown = min(50, total)
|
|
267
|
+
|
|
268
|
+
return f"""# Functions in {filename}
|
|
269
|
+
|
|
270
|
+
Total functions: {total}
|
|
271
|
+
Showing: {shown}
|
|
272
|
+
|
|
273
|
+
{chr(10).join(func_list)}
|
|
274
|
+
"""
|
|
275
|
+
except Exception: # Catch all JSON parsing errors
|
|
276
|
+
return f"# Functions in {filename}\n\n{content}"
|
|
277
|
+
|
|
278
|
+
return f"Error listing functions: {result.message if hasattr(result, 'message') else 'Failed to list functions'}"
|
|
279
|
+
except Exception as e:
|
|
280
|
+
return f"Error: {str(e)}"
|
|
281
|
+
|
|
282
|
+
# ============================================================================
|
|
283
|
+
# Reversecore Signature Resources (Dormant Detector)
|
|
284
|
+
# ============================================================================
|
|
285
|
+
|
|
286
|
+
@mcp.resource("reversecore://{filename}/dormant_detector")
|
|
287
|
+
@resource_decorator("resource_get_dormant_detector_results")
|
|
288
|
+
async def get_dormant_detector_results(filename: str) -> str:
|
|
289
|
+
"""Get Dormant Detector analysis results (orphan functions and logic bombs)"""
|
|
290
|
+
try:
|
|
291
|
+
from reversecore_mcp.tools import dormant_detector as dd_module
|
|
292
|
+
|
|
293
|
+
result = await dd_module.dormant_detector(file_path=_get_workspace_path(filename))
|
|
294
|
+
|
|
295
|
+
if result.status == "success":
|
|
296
|
+
data = result.data
|
|
297
|
+
orphans = data.get("orphan_functions", [])
|
|
298
|
+
suspicious = data.get("suspicious_logic", [])
|
|
299
|
+
|
|
300
|
+
report = f"""# 🔍 Dormant Detector Results: {filename}
|
|
301
|
+
|
|
302
|
+
## Orphan Functions (Never Called)
|
|
303
|
+
Found {len(orphans)} orphan function(s):
|
|
304
|
+
|
|
305
|
+
"""
|
|
306
|
+
for func in orphans[:10]:
|
|
307
|
+
report += f"""### {func.get("name", "unknown")}
|
|
308
|
+
- **Address**: {func.get("address", "N/A")}
|
|
309
|
+
- **Size**: {func.get("size", 0)} bytes
|
|
310
|
+
- **Cross-References**: {func.get("xrefs", 0)}
|
|
311
|
+
- **Assessment**: Potentially hidden backdoor or logic bomb
|
|
312
|
+
|
|
313
|
+
"""
|
|
314
|
+
|
|
315
|
+
report += f"\n## Suspicious Logic (Magic Values)\nFound {len(suspicious)} suspicious pattern(s):\n\n"
|
|
316
|
+
|
|
317
|
+
for logic in suspicious[:10]:
|
|
318
|
+
report += f"""### {logic.get("function", "unknown")}
|
|
319
|
+
- **Address**: {logic.get("address", "N/A")}
|
|
320
|
+
- **Instruction**: `{logic.get("instruction", "N/A")}`
|
|
321
|
+
- **Reason**: {logic.get("reason", "N/A")}
|
|
322
|
+
|
|
323
|
+
"""
|
|
324
|
+
|
|
325
|
+
return report
|
|
326
|
+
|
|
327
|
+
return f"Dormant Detector analysis failed: {result.message if hasattr(result, 'message') else 'Unknown error'}"
|
|
328
|
+
except Exception as e:
|
|
329
|
+
return f"Error: {str(e)}"
|