ida-pro-mcp-xjoker 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ida_pro_mcp/__init__.py +0 -0
- ida_pro_mcp/__main__.py +6 -0
- ida_pro_mcp/ida_mcp/__init__.py +68 -0
- ida_pro_mcp/ida_mcp/api_analysis.py +1296 -0
- ida_pro_mcp/ida_mcp/api_core.py +337 -0
- ida_pro_mcp/ida_mcp/api_debug.py +617 -0
- ida_pro_mcp/ida_mcp/api_memory.py +304 -0
- ida_pro_mcp/ida_mcp/api_modify.py +406 -0
- ida_pro_mcp/ida_mcp/api_python.py +179 -0
- ida_pro_mcp/ida_mcp/api_resources.py +295 -0
- ida_pro_mcp/ida_mcp/api_stack.py +167 -0
- ida_pro_mcp/ida_mcp/api_types.py +480 -0
- ida_pro_mcp/ida_mcp/auth.py +166 -0
- ida_pro_mcp/ida_mcp/cache.py +232 -0
- ida_pro_mcp/ida_mcp/config.py +228 -0
- ida_pro_mcp/ida_mcp/framework.py +547 -0
- ida_pro_mcp/ida_mcp/http.py +859 -0
- ida_pro_mcp/ida_mcp/port_utils.py +104 -0
- ida_pro_mcp/ida_mcp/rpc.py +187 -0
- ida_pro_mcp/ida_mcp/server_manager.py +339 -0
- ida_pro_mcp/ida_mcp/sync.py +233 -0
- ida_pro_mcp/ida_mcp/tests/__init__.py +14 -0
- ida_pro_mcp/ida_mcp/tests/test_api_analysis.py +336 -0
- ida_pro_mcp/ida_mcp/tests/test_api_core.py +237 -0
- ida_pro_mcp/ida_mcp/tests/test_api_memory.py +207 -0
- ida_pro_mcp/ida_mcp/tests/test_api_modify.py +123 -0
- ida_pro_mcp/ida_mcp/tests/test_api_resources.py +199 -0
- ida_pro_mcp/ida_mcp/tests/test_api_stack.py +77 -0
- ida_pro_mcp/ida_mcp/tests/test_api_types.py +249 -0
- ida_pro_mcp/ida_mcp/ui.py +357 -0
- ida_pro_mcp/ida_mcp/utils.py +1186 -0
- ida_pro_mcp/ida_mcp/zeromcp/__init__.py +5 -0
- ida_pro_mcp/ida_mcp/zeromcp/jsonrpc.py +384 -0
- ida_pro_mcp/ida_mcp/zeromcp/mcp.py +883 -0
- ida_pro_mcp/ida_mcp.py +186 -0
- ida_pro_mcp/idalib_server.py +354 -0
- ida_pro_mcp/idalib_session_manager.py +259 -0
- ida_pro_mcp/server.py +1060 -0
- ida_pro_mcp/test.py +170 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/METADATA +405 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/RECORD +45 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/WHEEL +5 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/entry_points.txt +4 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/licenses/LICENSE +21 -0
- ida_pro_mcp_xjoker-1.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
"""Core API Functions - IDB metadata and basic queries"""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import time
|
|
5
|
+
from typing import Annotated
|
|
6
|
+
|
|
7
|
+
import idaapi
|
|
8
|
+
import idautils
|
|
9
|
+
import ida_nalt
|
|
10
|
+
|
|
11
|
+
from .rpc import tool
|
|
12
|
+
from .sync import idasync
|
|
13
|
+
from .cache import function_cache, string_cache
|
|
14
|
+
|
|
15
|
+
# Cached strings list: [(ea, text), ...]
|
|
16
|
+
_strings_cache: list[tuple[int, str]] | None = None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _get_strings_cache() -> list[tuple[int, str]]:
|
|
20
|
+
"""Get cached strings, building cache on first access."""
|
|
21
|
+
global _strings_cache
|
|
22
|
+
if _strings_cache is None:
|
|
23
|
+
_strings_cache = [(s.ea, str(s)) for s in idautils.Strings() if s is not None]
|
|
24
|
+
return _strings_cache
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def invalidate_strings_cache():
|
|
28
|
+
"""Clear the strings cache (call after IDB changes)."""
|
|
29
|
+
global _strings_cache
|
|
30
|
+
_strings_cache = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def init_caches():
|
|
34
|
+
"""Build caches on plugin startup (called from Ctrl+M)."""
|
|
35
|
+
t0 = time.perf_counter()
|
|
36
|
+
strings = _get_strings_cache()
|
|
37
|
+
t1 = time.perf_counter()
|
|
38
|
+
print(f"[MCP] Cached {len(strings)} strings in {(t1 - t0) * 1000:.0f}ms")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
from .utils import (
|
|
42
|
+
Function,
|
|
43
|
+
ConvertedNumber,
|
|
44
|
+
Global,
|
|
45
|
+
Import,
|
|
46
|
+
Page,
|
|
47
|
+
NumberConversion,
|
|
48
|
+
ListQuery,
|
|
49
|
+
normalize_list_input,
|
|
50
|
+
normalize_dict_list,
|
|
51
|
+
get_function,
|
|
52
|
+
paginate,
|
|
53
|
+
pattern_filter,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# ============================================================================
|
|
58
|
+
# Core API Functions
|
|
59
|
+
# ============================================================================
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _parse_func_query(query: str) -> int:
|
|
63
|
+
"""Fast path for common function query patterns. Returns ea or BADADDR."""
|
|
64
|
+
q = query.strip()
|
|
65
|
+
|
|
66
|
+
# 0x<hex> - direct address
|
|
67
|
+
if q.startswith("0x") or q.startswith("0X"):
|
|
68
|
+
try:
|
|
69
|
+
return int(q, 16)
|
|
70
|
+
except ValueError:
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
# sub_<hex> - IDA auto-named function
|
|
74
|
+
if q.startswith("sub_"):
|
|
75
|
+
try:
|
|
76
|
+
return int(q[4:], 16)
|
|
77
|
+
except ValueError:
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
return idaapi.BADADDR
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@tool
|
|
84
|
+
@idasync
|
|
85
|
+
def lookup_funcs(
|
|
86
|
+
queries: Annotated[list[str] | str, "Address(es) or name(s)"],
|
|
87
|
+
) -> list[dict]:
|
|
88
|
+
"""Get functions by address or name (auto-detects)"""
|
|
89
|
+
queries = normalize_list_input(queries)
|
|
90
|
+
|
|
91
|
+
# Treat empty/"*" as "all functions" - but add limit
|
|
92
|
+
if not queries or (len(queries) == 1 and queries[0] in ("*", "")):
|
|
93
|
+
all_funcs = []
|
|
94
|
+
for addr in idautils.Functions():
|
|
95
|
+
all_funcs.append(get_function(addr))
|
|
96
|
+
if len(all_funcs) >= 1000:
|
|
97
|
+
break
|
|
98
|
+
return [{"query": "*", "fn": fn, "error": None} for fn in all_funcs]
|
|
99
|
+
|
|
100
|
+
results = []
|
|
101
|
+
for query in queries:
|
|
102
|
+
# Try cache first
|
|
103
|
+
cache_key = f"lookup:{query}"
|
|
104
|
+
cached = function_cache.get(cache_key)
|
|
105
|
+
if cached is not None:
|
|
106
|
+
results.append(cached)
|
|
107
|
+
continue
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
# Fast path: 0x<ea> or sub_<ea>
|
|
111
|
+
ea = _parse_func_query(query)
|
|
112
|
+
|
|
113
|
+
# Slow path: name lookup
|
|
114
|
+
if ea == idaapi.BADADDR:
|
|
115
|
+
ea = idaapi.get_name_ea(idaapi.BADADDR, query)
|
|
116
|
+
|
|
117
|
+
if ea != idaapi.BADADDR:
|
|
118
|
+
func = get_function(ea, raise_error=False)
|
|
119
|
+
if func:
|
|
120
|
+
result = {"query": query, "fn": func, "error": None}
|
|
121
|
+
else:
|
|
122
|
+
result = {"query": query, "fn": None, "error": "Not a function"}
|
|
123
|
+
else:
|
|
124
|
+
result = {"query": query, "fn": None, "error": "Not found"}
|
|
125
|
+
except Exception as e:
|
|
126
|
+
result = {"query": query, "fn": None, "error": str(e)}
|
|
127
|
+
|
|
128
|
+
# Cache the result
|
|
129
|
+
function_cache.set(cache_key, result)
|
|
130
|
+
results.append(result)
|
|
131
|
+
|
|
132
|
+
return results
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@tool
|
|
136
|
+
def int_convert(
|
|
137
|
+
inputs: Annotated[
|
|
138
|
+
list[NumberConversion] | NumberConversion,
|
|
139
|
+
"Convert numbers to various formats (hex, decimal, binary, ascii)",
|
|
140
|
+
],
|
|
141
|
+
) -> list[dict]:
|
|
142
|
+
"""Convert numbers to different formats"""
|
|
143
|
+
inputs = normalize_dict_list(inputs, lambda s: {"text": s})
|
|
144
|
+
|
|
145
|
+
results = []
|
|
146
|
+
for item in inputs:
|
|
147
|
+
text = item.get("text", "")
|
|
148
|
+
size = item.get("size")
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
value = int(text, 0)
|
|
152
|
+
except ValueError:
|
|
153
|
+
results.append(
|
|
154
|
+
{"input": text, "result": None, "error": f"Invalid number: {text}"}
|
|
155
|
+
)
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
if not size:
|
|
159
|
+
size = 0
|
|
160
|
+
n = abs(value)
|
|
161
|
+
while n:
|
|
162
|
+
size += 1
|
|
163
|
+
n >>= 1
|
|
164
|
+
size += 7
|
|
165
|
+
size //= 8
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
bytes_data = value.to_bytes(size, "little", signed=True)
|
|
169
|
+
except OverflowError:
|
|
170
|
+
results.append(
|
|
171
|
+
{
|
|
172
|
+
"input": text,
|
|
173
|
+
"result": None,
|
|
174
|
+
"error": f"Number {text} is too big for {size} bytes",
|
|
175
|
+
}
|
|
176
|
+
)
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
ascii_str = ""
|
|
180
|
+
for byte in bytes_data.rstrip(b"\x00"):
|
|
181
|
+
if byte >= 32 and byte <= 126:
|
|
182
|
+
ascii_str += chr(byte)
|
|
183
|
+
else:
|
|
184
|
+
ascii_str = None
|
|
185
|
+
break
|
|
186
|
+
|
|
187
|
+
results.append(
|
|
188
|
+
{
|
|
189
|
+
"input": text,
|
|
190
|
+
"result": ConvertedNumber(
|
|
191
|
+
decimal=str(value),
|
|
192
|
+
hexadecimal=hex(value),
|
|
193
|
+
bytes=bytes_data.hex(" "),
|
|
194
|
+
ascii=ascii_str,
|
|
195
|
+
binary=bin(value),
|
|
196
|
+
),
|
|
197
|
+
"error": None,
|
|
198
|
+
}
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return results
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
@tool
|
|
205
|
+
@idasync
|
|
206
|
+
def list_funcs(
|
|
207
|
+
queries: Annotated[
|
|
208
|
+
list[ListQuery] | ListQuery | str,
|
|
209
|
+
"List functions with optional filtering and pagination",
|
|
210
|
+
],
|
|
211
|
+
) -> list[Page[Function]]:
|
|
212
|
+
"""List functions"""
|
|
213
|
+
queries = normalize_dict_list(
|
|
214
|
+
queries, lambda s: {"offset": 0, "count": 100, "filter": s}
|
|
215
|
+
)
|
|
216
|
+
all_functions = [get_function(addr) for addr in idautils.Functions()]
|
|
217
|
+
|
|
218
|
+
results = []
|
|
219
|
+
for query in queries:
|
|
220
|
+
offset = query.get("offset", 0)
|
|
221
|
+
count = query.get("count", 100)
|
|
222
|
+
filter_pattern = query.get("filter", "")
|
|
223
|
+
|
|
224
|
+
# Treat empty/"*" filter as "all"
|
|
225
|
+
if filter_pattern in ("", "*"):
|
|
226
|
+
filter_pattern = ""
|
|
227
|
+
|
|
228
|
+
filtered = pattern_filter(all_functions, filter_pattern, "name")
|
|
229
|
+
results.append(paginate(filtered, offset, count))
|
|
230
|
+
|
|
231
|
+
return results
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
@tool
|
|
235
|
+
@idasync
|
|
236
|
+
def list_globals(
|
|
237
|
+
queries: Annotated[
|
|
238
|
+
list[ListQuery] | ListQuery | str,
|
|
239
|
+
"List global variables with optional filtering and pagination",
|
|
240
|
+
],
|
|
241
|
+
) -> list[Page[Global]]:
|
|
242
|
+
"""List globals"""
|
|
243
|
+
queries = normalize_dict_list(
|
|
244
|
+
queries, lambda s: {"offset": 0, "count": 100, "filter": s}
|
|
245
|
+
)
|
|
246
|
+
all_globals: list[Global] = []
|
|
247
|
+
for addr, name in idautils.Names():
|
|
248
|
+
if not idaapi.get_func(addr) and name is not None:
|
|
249
|
+
all_globals.append(Global(addr=hex(addr), name=name))
|
|
250
|
+
|
|
251
|
+
results = []
|
|
252
|
+
for query in queries:
|
|
253
|
+
offset = query.get("offset", 0)
|
|
254
|
+
count = query.get("count", 100)
|
|
255
|
+
filter_pattern = query.get("filter", "")
|
|
256
|
+
|
|
257
|
+
# Treat empty/"*" filter as "all"
|
|
258
|
+
if filter_pattern in ("", "*"):
|
|
259
|
+
filter_pattern = ""
|
|
260
|
+
|
|
261
|
+
filtered = pattern_filter(all_globals, filter_pattern, "name")
|
|
262
|
+
results.append(paginate(filtered, offset, count))
|
|
263
|
+
|
|
264
|
+
return results
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
@tool
|
|
268
|
+
@idasync
|
|
269
|
+
def imports(
|
|
270
|
+
offset: Annotated[int, "Offset"],
|
|
271
|
+
count: Annotated[int, "Count (0=all)"],
|
|
272
|
+
) -> Page[Import]:
|
|
273
|
+
"""List imports"""
|
|
274
|
+
nimps = ida_nalt.get_import_module_qty()
|
|
275
|
+
|
|
276
|
+
rv = []
|
|
277
|
+
for i in range(nimps):
|
|
278
|
+
module_name = ida_nalt.get_import_module_name(i)
|
|
279
|
+
if not module_name:
|
|
280
|
+
module_name = "<unnamed>"
|
|
281
|
+
|
|
282
|
+
def imp_cb(ea, symbol_name, ordinal, acc):
|
|
283
|
+
if not symbol_name:
|
|
284
|
+
symbol_name = f"#{ordinal}"
|
|
285
|
+
acc += [Import(addr=hex(ea), imported_name=symbol_name, module=module_name)]
|
|
286
|
+
return True
|
|
287
|
+
|
|
288
|
+
def imp_cb_w_context(ea, symbol_name, ordinal):
|
|
289
|
+
return imp_cb(ea, symbol_name, ordinal, rv)
|
|
290
|
+
|
|
291
|
+
ida_nalt.enum_import_names(i, imp_cb_w_context)
|
|
292
|
+
|
|
293
|
+
return paginate(rv, offset, count)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
@tool
|
|
297
|
+
@idasync
|
|
298
|
+
def find_regex(
|
|
299
|
+
pattern: Annotated[str, "Regex pattern to search for in strings"],
|
|
300
|
+
limit: Annotated[int, "Max matches (default: 30, max: 500)"] = 30,
|
|
301
|
+
offset: Annotated[int, "Skip first N matches (default: 0)"] = 0,
|
|
302
|
+
) -> dict:
|
|
303
|
+
"""Search strings with case-insensitive regex patterns"""
|
|
304
|
+
if limit <= 0:
|
|
305
|
+
limit = 30
|
|
306
|
+
if limit > 500:
|
|
307
|
+
limit = 500
|
|
308
|
+
|
|
309
|
+
matches = []
|
|
310
|
+
try:
|
|
311
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
312
|
+
except re.error as e:
|
|
313
|
+
return {
|
|
314
|
+
"n": 0,
|
|
315
|
+
"matches": [],
|
|
316
|
+
"cursor": {"done": True},
|
|
317
|
+
"error": f"Invalid regex pattern: {e}",
|
|
318
|
+
}
|
|
319
|
+
strings = _get_strings_cache()
|
|
320
|
+
|
|
321
|
+
skipped = 0
|
|
322
|
+
more = False
|
|
323
|
+
for ea, text in strings:
|
|
324
|
+
if regex.search(text):
|
|
325
|
+
if skipped < offset:
|
|
326
|
+
skipped += 1
|
|
327
|
+
continue
|
|
328
|
+
if len(matches) >= limit:
|
|
329
|
+
more = True
|
|
330
|
+
break
|
|
331
|
+
matches.append({"addr": hex(ea), "string": text})
|
|
332
|
+
|
|
333
|
+
return {
|
|
334
|
+
"n": len(matches),
|
|
335
|
+
"matches": matches,
|
|
336
|
+
"cursor": {"next": offset + limit} if more else {"done": True},
|
|
337
|
+
}
|