@ebowwa/mcp-nm 2.0.3 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +2644 -2334
- package/package.json +5 -3
- package/src/handlers/bin.ts +558 -0
- package/src/handlers/index.ts +72 -0
- package/src/handlers/macos.ts +611 -0
- package/src/handlers/nm.ts +368 -0
- package/src/handlers/patch.ts +535 -0
- package/src/handlers/xxd.ts +304 -0
- package/src/index.ts +138 -3208
- package/src/tools/index.ts +682 -0
- package/src/types/index.ts +109 -0
- package/src/utils/convert.ts +166 -0
- package/src/utils/exec.ts +69 -0
- package/src/utils/file.ts +108 -0
- package/src/utils/index.ts +11 -0
- package/src/utils/nm.ts +152 -0
- package/src/utils/xxd.ts +173 -0
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ebowwa/mcp-nm",
|
|
3
|
-
"version": "2.0
|
|
4
|
-
"description": "Comprehensive binary analysis MCP server - symbols (nm), hex dumps (xxd), strings, disassembly, security audit, entropy analysis, ELF/Mach-O inspection, binary patching,
|
|
3
|
+
"version": "2.2.0",
|
|
4
|
+
"description": "Comprehensive binary analysis MCP server - symbols (nm), hex dumps (xxd), strings, disassembly, security audit, entropy analysis, ELF/Mach-O inspection, binary patching, macOS code signing, and persistent patch management",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
7
7
|
"types": "dist/index.d.ts",
|
|
@@ -36,7 +36,9 @@
|
|
|
36
36
|
"otool",
|
|
37
37
|
"codesign",
|
|
38
38
|
"quarantine",
|
|
39
|
-
"macos"
|
|
39
|
+
"macos",
|
|
40
|
+
"patch-management",
|
|
41
|
+
"binary-persistence"
|
|
40
42
|
],
|
|
41
43
|
"author": "ebowwa",
|
|
42
44
|
"license": "MIT",
|
|
@@ -0,0 +1,558 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @ebowwa/mcp-nm - Binary analysis handlers
|
|
3
|
+
*
|
|
4
|
+
* MCP tool handlers for extended binary analysis
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { runCommand, LARGE_BUFFER, execAsync, isMacOS, isLinux } from "../utils/exec";
|
|
8
|
+
import { analyzeFile, getMimeType, getFileInfo } from "../utils/file";
|
|
9
|
+
import { runNm } from "../utils/nm";
|
|
10
|
+
import { hexToBytes, formatBytesWithContext } from "../utils/xxd";
|
|
11
|
+
import type { McpResponse } from "../types";
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract strings from binary
|
|
15
|
+
*/
|
|
16
|
+
export async function handleStrings(args: {
|
|
17
|
+
filePath: string;
|
|
18
|
+
minLength?: number;
|
|
19
|
+
encoding?: "ascii" | "unicode" | "all";
|
|
20
|
+
}): Promise<McpResponse> {
|
|
21
|
+
const minLen = args.minLength ?? 4;
|
|
22
|
+
const encFlags: Record<string, string> = {
|
|
23
|
+
ascii: "-a",
|
|
24
|
+
unicode: "-e l",
|
|
25
|
+
all: "-a -e l",
|
|
26
|
+
};
|
|
27
|
+
const encFlag = encFlags[args.encoding ?? "all"];
|
|
28
|
+
|
|
29
|
+
const { stdout } = await runCommand(
|
|
30
|
+
`strings ${encFlag} -n ${minLen} "${args.filePath}"`,
|
|
31
|
+
{ maxBuffer: LARGE_BUFFER }
|
|
32
|
+
);
|
|
33
|
+
|
|
34
|
+
const strings = stdout.trim().split("\n").filter(Boolean);
|
|
35
|
+
|
|
36
|
+
const summary = [
|
|
37
|
+
`Strings in: ${args.filePath}`,
|
|
38
|
+
`Encoding: ${args.encoding ?? "all"}`,
|
|
39
|
+
`Minimum length: ${minLen}`,
|
|
40
|
+
`Total strings: ${strings.length}`,
|
|
41
|
+
"",
|
|
42
|
+
"Strings:",
|
|
43
|
+
...strings.slice(0, 200).map((s) => ` ${s}`),
|
|
44
|
+
strings.length > 200 ? ` ... and ${strings.length - 200} more` : "",
|
|
45
|
+
].join("\n");
|
|
46
|
+
|
|
47
|
+
return { content: [{ type: "text", text: summary }] };
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Get file type with MIME
|
|
52
|
+
*/
|
|
53
|
+
export async function handleFileInfo(args: { filePath: string }): Promise<McpResponse> {
|
|
54
|
+
const [mimeInfo, detailedInfo] = await Promise.all([
|
|
55
|
+
getMimeType(args.filePath),
|
|
56
|
+
getFileInfo(args.filePath),
|
|
57
|
+
]);
|
|
58
|
+
|
|
59
|
+
const summary = [
|
|
60
|
+
`File: ${args.filePath}`,
|
|
61
|
+
"",
|
|
62
|
+
"MIME Type:",
|
|
63
|
+
` ${mimeInfo}`,
|
|
64
|
+
"",
|
|
65
|
+
"Detailed Type:",
|
|
66
|
+
` ${detailedInfo}`,
|
|
67
|
+
].join("\n");
|
|
68
|
+
|
|
69
|
+
return { content: [{ type: "text", text: summary }] };
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Get section sizes
|
|
74
|
+
*/
|
|
75
|
+
export async function handleSectionSizes(args: { filePath: string }): Promise<McpResponse> {
|
|
76
|
+
try {
|
|
77
|
+
const { stdout } = await runCommand(`size -A -x "${args.filePath}"`);
|
|
78
|
+
return { content: [{ type: "text", text: `Section Sizes:\n${stdout}` }] };
|
|
79
|
+
} catch {
|
|
80
|
+
try {
|
|
81
|
+
const { stdout } = await runCommand(`size -m "${args.filePath}"`);
|
|
82
|
+
return { content: [{ type: "text", text: `Section Sizes:\n${stdout}` }] };
|
|
83
|
+
} catch (error) {
|
|
84
|
+
throw new Error(`size command failed: ${error instanceof Error ? error.message : error}`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Get section headers via objdump
|
|
91
|
+
*/
|
|
92
|
+
export async function handleObjdumpSections(args: { filePath: string }): Promise<McpResponse> {
|
|
93
|
+
const { stdout } = await runCommand(`objdump -h "${args.filePath}"`, {
|
|
94
|
+
maxBuffer: LARGE_BUFFER,
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
return { content: [{ type: "text", text: `Section Headers: ${args.filePath}\n\n${stdout}` }] };
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Get program headers / DLL imports via objdump
|
|
102
|
+
*/
|
|
103
|
+
export async function handleObjdumpProgramHeaders(args: { filePath: string }): Promise<McpResponse> {
|
|
104
|
+
const { stdout } = await runCommand(`objdump -p "${args.filePath}"`, {
|
|
105
|
+
maxBuffer: LARGE_BUFFER,
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
return { content: [{ type: "text", text: `Program Headers / Imports: ${args.filePath}\n\n${stdout}` }] };
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Get Mach-O dynamic libraries via otool (macOS only)
|
|
113
|
+
*/
|
|
114
|
+
export async function handleOtoolLibs(args: { filePath: string }): Promise<McpResponse> {
|
|
115
|
+
if (!isMacOS()) {
|
|
116
|
+
throw new Error("otool -L is only available on macOS");
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
const { stdout } = await runCommand(`otool -L "${args.filePath}"`);
|
|
120
|
+
const lines = stdout.trim().split("\n");
|
|
121
|
+
|
|
122
|
+
const summary = [
|
|
123
|
+
`Dynamic Library Dependencies: ${args.filePath}`,
|
|
124
|
+
"",
|
|
125
|
+
...lines.map((l) => ` ${l}`),
|
|
126
|
+
].join("\n");
|
|
127
|
+
|
|
128
|
+
return { content: [{ type: "text", text: summary }] };
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Comprehensive ELF analysis via readelf (Linux only)
|
|
133
|
+
*/
|
|
134
|
+
export async function handleReadelf(args: {
|
|
135
|
+
filePath: string;
|
|
136
|
+
sections?: ("headers" | "sections" | "segments" | "symbols" | "dynamic" | "all")[];
|
|
137
|
+
}): Promise<McpResponse> {
|
|
138
|
+
if (!isLinux()) {
|
|
139
|
+
throw new Error("readelf is only available on Linux");
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
const sections = args.sections ?? ["all"];
|
|
143
|
+
const flags: string[] = [];
|
|
144
|
+
|
|
145
|
+
if (sections.includes("all")) {
|
|
146
|
+
flags.push("-a");
|
|
147
|
+
} else {
|
|
148
|
+
if (sections.includes("headers")) flags.push("-h");
|
|
149
|
+
if (sections.includes("sections")) flags.push("-S");
|
|
150
|
+
if (sections.includes("segments")) flags.push("-l");
|
|
151
|
+
if (sections.includes("symbols")) flags.push("-s");
|
|
152
|
+
if (sections.includes("dynamic")) flags.push("-d");
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const { stdout } = await runCommand(`readelf ${flags.join(" ")} "${args.filePath}"`, {
|
|
156
|
+
maxBuffer: 20 * 1024 * 1024,
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
const summary = [
|
|
160
|
+
`ELF Analysis: ${args.filePath}`,
|
|
161
|
+
`Sections: ${sections.join(", ")}`,
|
|
162
|
+
"",
|
|
163
|
+
stdout.slice(0, 50000),
|
|
164
|
+
stdout.length > 50000 ? `\n... truncated (${stdout.length - 50000} more chars)` : "",
|
|
165
|
+
].join("\n");
|
|
166
|
+
|
|
167
|
+
return { content: [{ type: "text", text: summary }] };
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Get shared library dependencies via ldd (Linux only)
|
|
172
|
+
*/
|
|
173
|
+
export async function handleLdd(args: { filePath: string }): Promise<McpResponse> {
|
|
174
|
+
if (!isLinux()) {
|
|
175
|
+
throw new Error("ldd is only available on Linux");
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
const { stdout } = await runCommand(`ldd "${args.filePath}"`);
|
|
179
|
+
const lines = stdout.trim().split("\n").filter(Boolean);
|
|
180
|
+
|
|
181
|
+
const summary = [
|
|
182
|
+
`Shared Library Dependencies: ${args.filePath}`,
|
|
183
|
+
"",
|
|
184
|
+
...lines.map((l) => ` ${l}`),
|
|
185
|
+
].join("\n");
|
|
186
|
+
|
|
187
|
+
return { content: [{ type: "text", text: summary }] };
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Disassemble via objdump -d
|
|
192
|
+
*/
|
|
193
|
+
export async function handleDisassembly(args: {
|
|
194
|
+
filePath: string;
|
|
195
|
+
symbol?: string;
|
|
196
|
+
startOffset?: number;
|
|
197
|
+
length?: number;
|
|
198
|
+
}): Promise<McpResponse> {
|
|
199
|
+
let cmd = "objdump -d";
|
|
200
|
+
|
|
201
|
+
if (args.symbol) {
|
|
202
|
+
cmd += ` --disassemble="${args.symbol}"`;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
cmd += ` "${args.filePath}"`;
|
|
206
|
+
|
|
207
|
+
const { stdout } = await runCommand(cmd, { maxBuffer: LARGE_BUFFER });
|
|
208
|
+
|
|
209
|
+
let output = stdout;
|
|
210
|
+
if (args.startOffset !== undefined || args.length !== undefined) {
|
|
211
|
+
const lines = stdout.split("\n");
|
|
212
|
+
const start = args.startOffset ?? 0;
|
|
213
|
+
const len = args.length ?? 500;
|
|
214
|
+
output = lines.slice(start, start + len).join("\n");
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
const summary = [
|
|
218
|
+
`Disassembly: ${args.filePath}`,
|
|
219
|
+
args.symbol ? `Symbol: ${args.symbol}` : "",
|
|
220
|
+
"",
|
|
221
|
+
output.slice(0, 100000),
|
|
222
|
+
output.length > 100000 ? `\n... truncated (${output.length - 100000} more chars)` : "",
|
|
223
|
+
].filter(Boolean).join("\n");
|
|
224
|
+
|
|
225
|
+
return { content: [{ type: "text", text: summary }] };
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Security audit (ASLR, PIE, RELRO, stack canary, NX)
|
|
230
|
+
*/
|
|
231
|
+
export async function handleSecurityAudit(args: { filePath: string }): Promise<McpResponse> {
|
|
232
|
+
const results: { check: string; status: string; details: string }[] = [];
|
|
233
|
+
|
|
234
|
+
try {
|
|
235
|
+
const { stdout: fileInfo } = await runCommand(`file "${args.filePath}"`);
|
|
236
|
+
const info = fileInfo.toLowerCase();
|
|
237
|
+
|
|
238
|
+
// PIE
|
|
239
|
+
if (info.includes("pie") || info.includes("position independent")) {
|
|
240
|
+
results.push({ check: "PIE", status: "ENABLED", details: "Position Independent Executable" });
|
|
241
|
+
} else if (info.includes("executable")) {
|
|
242
|
+
results.push({ check: "PIE", status: "DISABLED", details: "Not a PIE binary" });
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
// RELRO (Linux)
|
|
246
|
+
if (isLinux()) {
|
|
247
|
+
try {
|
|
248
|
+
const { stdout: relro } = await runCommand(
|
|
249
|
+
`readelf -l "${args.filePath}" 2>/dev/null | grep -i gnu_relro`
|
|
250
|
+
);
|
|
251
|
+
if (relro.trim()) {
|
|
252
|
+
results.push({ check: "RELRO", status: "ENABLED", details: "Read-Only relocations" });
|
|
253
|
+
}
|
|
254
|
+
} catch {
|
|
255
|
+
// Not applicable
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// Stack canary
|
|
260
|
+
try {
|
|
261
|
+
const { stdout: canary } = await runCommand(
|
|
262
|
+
`nm "${args.filePath}" 2>/dev/null | grep -i "__stack_chk_fail"`
|
|
263
|
+
);
|
|
264
|
+
if (canary.trim()) {
|
|
265
|
+
results.push({ check: "Stack Canary", status: "ENABLED", details: "Stack smashing detected symbol found" });
|
|
266
|
+
} else {
|
|
267
|
+
results.push({ check: "Stack Canary", status: "UNKNOWN", details: "No stack canary symbol found" });
|
|
268
|
+
}
|
|
269
|
+
} catch {
|
|
270
|
+
results.push({ check: "Stack Canary", status: "UNKNOWN", details: "Could not check" });
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// NX bit
|
|
274
|
+
if (isLinux()) {
|
|
275
|
+
try {
|
|
276
|
+
const { stdout: nx } = await runCommand(
|
|
277
|
+
`readelf -l "${args.filePath}" 2>/dev/null | grep -i "gnu_stack"`
|
|
278
|
+
);
|
|
279
|
+
if (nx.toLowerCase().includes("rwe")) {
|
|
280
|
+
results.push({ check: "NX", status: "DISABLED", details: "Stack is executable (RWE)" });
|
|
281
|
+
} else if (nx.trim()) {
|
|
282
|
+
results.push({ check: "NX", status: "ENABLED", details: "Non-executable stack" });
|
|
283
|
+
}
|
|
284
|
+
} catch {
|
|
285
|
+
// Not Linux
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
// FORTIFY_SOURCE
|
|
290
|
+
try {
|
|
291
|
+
const { stdout: fortify } = await runCommand(
|
|
292
|
+
`nm "${args.filePath}" 2>/dev/null | grep -i "_chk@"`
|
|
293
|
+
);
|
|
294
|
+
if (fortify.trim()) {
|
|
295
|
+
results.push({ check: "FORTIFY", status: "ENABLED", details: "Fortified functions detected" });
|
|
296
|
+
}
|
|
297
|
+
} catch {
|
|
298
|
+
// Not applicable
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// macOS specific
|
|
302
|
+
if (info.includes("mach-o")) {
|
|
303
|
+
try {
|
|
304
|
+
const { stdout: loadCmds } = await runCommand(`otool -l "${args.filePath}"`);
|
|
305
|
+
if (loadCmds.toLowerCase().includes("lc_main")) {
|
|
306
|
+
results.push({ check: "PIE", status: "ENABLED", details: "Mach-O with LC_MAIN (likely PIE)" });
|
|
307
|
+
}
|
|
308
|
+
} catch {
|
|
309
|
+
// Ignore
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
} catch (error) {
|
|
313
|
+
results.push({ check: "Error", status: "FAILED", details: error instanceof Error ? error.message : String(error) });
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
const summary = [
|
|
317
|
+
`Security Audit: ${args.filePath}`,
|
|
318
|
+
"",
|
|
319
|
+
"Security Features:",
|
|
320
|
+
...results.map((r) => ` ${r.check}: ${r.status} - ${r.details}`),
|
|
321
|
+
"",
|
|
322
|
+
"Recommendations:",
|
|
323
|
+
...results
|
|
324
|
+
.filter((r) => r.status === "DISABLED" || r.status === "UNKNOWN")
|
|
325
|
+
.map((r) => ` - Consider enabling ${r.check}`),
|
|
326
|
+
].join("\n");
|
|
327
|
+
|
|
328
|
+
return { content: [{ type: "text", text: summary }] };
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
/**
|
|
332
|
+
* Entropy analysis (detect packed/encrypted sections)
|
|
333
|
+
*/
|
|
334
|
+
export async function handleEntropyAnalysis(args: {
|
|
335
|
+
filePath: string;
|
|
336
|
+
blockSize?: number;
|
|
337
|
+
}): Promise<McpResponse> {
|
|
338
|
+
const blockSize = args.blockSize ?? 1024;
|
|
339
|
+
|
|
340
|
+
const { stdout: hexData } = await runCommand(`xxd -p "${args.filePath}" | tr -d '\\n'`);
|
|
341
|
+
const bytes = hexData.match(/.{2}/g)?.map((h) => parseInt(h, 16)) ?? [];
|
|
342
|
+
|
|
343
|
+
const results: { section: string; offset: number; entropy: number; status: string }[] = [];
|
|
344
|
+
|
|
345
|
+
for (let i = 0; i < bytes.length; i += blockSize) {
|
|
346
|
+
const block = bytes.slice(i, i + blockSize);
|
|
347
|
+
if (block.length === 0) continue;
|
|
348
|
+
|
|
349
|
+
// Calculate Shannon entropy
|
|
350
|
+
const freq: Record<number, number> = {};
|
|
351
|
+
for (const byte of block) {
|
|
352
|
+
freq[byte] = (freq[byte] ?? 0) + 1;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
let entropy = 0;
|
|
356
|
+
for (const count of Object.values(freq)) {
|
|
357
|
+
const p = count / block.length;
|
|
358
|
+
entropy -= p * Math.log2(p);
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
const normalizedEntropy = entropy / 8;
|
|
362
|
+
let status = "normal";
|
|
363
|
+
if (normalizedEntropy > 0.95) status = "high (possibly encrypted/compressed)";
|
|
364
|
+
else if (normalizedEntropy < 0.3) status = "low (possibly padding/zeros)";
|
|
365
|
+
|
|
366
|
+
results.push({
|
|
367
|
+
section: `Block ${Math.floor(i / blockSize)}`,
|
|
368
|
+
offset: i,
|
|
369
|
+
entropy: Math.round(entropy * 100) / 100,
|
|
370
|
+
status,
|
|
371
|
+
});
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
const avgEntropy = results.reduce((a, b) => a + b.entropy, 0) / results.length;
|
|
375
|
+
|
|
376
|
+
const summary = [
|
|
377
|
+
`Entropy Analysis: ${args.filePath}`,
|
|
378
|
+
`Block size: ${blockSize} bytes`,
|
|
379
|
+
`Total blocks: ${results.length}`,
|
|
380
|
+
`Average entropy: ${((avgEntropy / 8) * 100).toFixed(1)}% of max`,
|
|
381
|
+
"",
|
|
382
|
+
"Blocks with unusual entropy:",
|
|
383
|
+
...results
|
|
384
|
+
.filter((r) => r.status !== "normal")
|
|
385
|
+
.slice(0, 50)
|
|
386
|
+
.map((r) => ` ${r.section} (offset ${r.offset}): ${r.entropy}/8 bits - ${r.status}`),
|
|
387
|
+
results.filter((r) => r.status !== "normal").length > 50
|
|
388
|
+
? ` ... and ${results.filter((r) => r.status !== "normal").length - 50} more`
|
|
389
|
+
: "",
|
|
390
|
+
].join("\n");
|
|
391
|
+
|
|
392
|
+
return { content: [{ type: "text", text: summary }] };
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Import/export table analysis
|
|
397
|
+
*/
|
|
398
|
+
export async function handleImportExport(args: { filePath: string }): Promise<McpResponse> {
|
|
399
|
+
const analysis = await analyzeFile(args.filePath);
|
|
400
|
+
const imports: string[] = [];
|
|
401
|
+
const exports: string[] = [];
|
|
402
|
+
|
|
403
|
+
try {
|
|
404
|
+
if (analysis.format.toLowerCase().includes("mach-o")) {
|
|
405
|
+
// macOS
|
|
406
|
+
const { stdout: importSyms } = await runCommand(
|
|
407
|
+
`nm -u "${args.filePath}" 2>/dev/null | awk '{print $2}'`
|
|
408
|
+
);
|
|
409
|
+
imports.push(...importSyms.trim().split("\n").filter(Boolean));
|
|
410
|
+
|
|
411
|
+
const { stdout: exportSyms } = await runCommand(
|
|
412
|
+
`nm -g "${args.filePath}" 2>/dev/null | grep -v "U " | awk '{print $3}'`
|
|
413
|
+
);
|
|
414
|
+
exports.push(...exportSyms.trim().split("\n").filter(Boolean));
|
|
415
|
+
} else {
|
|
416
|
+
// Linux ELF
|
|
417
|
+
const { stdout: importSyms } = await runCommand(
|
|
418
|
+
`nm -D -u "${args.filePath}" 2>/dev/null | awk '{print $2}'`
|
|
419
|
+
);
|
|
420
|
+
imports.push(...importSyms.trim().split("\n").filter(Boolean));
|
|
421
|
+
|
|
422
|
+
const { stdout: exportSyms } = await runCommand(
|
|
423
|
+
`nm -D --defined-only "${args.filePath}" 2>/dev/null | awk '{print $3}'`
|
|
424
|
+
);
|
|
425
|
+
exports.push(...exportSyms.trim().split("\n").filter(Boolean));
|
|
426
|
+
}
|
|
427
|
+
} catch {
|
|
428
|
+
// Fallback to regular nm
|
|
429
|
+
try {
|
|
430
|
+
const { stdout: allSyms } = await runCommand(`nm "${args.filePath}" 2>/dev/null`);
|
|
431
|
+
for (const line of allSyms.split("\n")) {
|
|
432
|
+
const match = line.match(/^\s*([0-9a-fA-F]+)?\s+([UTDDBRC])\s+(.+)/);
|
|
433
|
+
if (match) {
|
|
434
|
+
if (match[2] === "U") imports.push(match[3]);
|
|
435
|
+
else if (match[1] && match[2] === match[2].toUpperCase()) exports.push(match[3]);
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
} catch {
|
|
439
|
+
// Ignore
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
const summary = [
|
|
444
|
+
`Import/Export Analysis: ${args.filePath}`,
|
|
445
|
+
`File type: ${analysis.format}`,
|
|
446
|
+
"",
|
|
447
|
+
`Imports (${imports.length}):`,
|
|
448
|
+
...imports.slice(0, 100).map((s) => ` - ${s}`),
|
|
449
|
+
imports.length > 100 ? ` ... and ${imports.length - 100} more` : "",
|
|
450
|
+
"",
|
|
451
|
+
`Exports (${exports.length}):`,
|
|
452
|
+
...exports.slice(0, 100).map((s) => ` + ${s}`),
|
|
453
|
+
exports.length > 100 ? ` ... and ${exports.length - 100} more` : "",
|
|
454
|
+
].join("\n");
|
|
455
|
+
|
|
456
|
+
return { content: [{ type: "text", text: summary }] };
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
/**
|
|
460
|
+
* Binary diff (byte-level comparison)
|
|
461
|
+
*/
|
|
462
|
+
export async function handleBinaryDiff(args: {
|
|
463
|
+
file1: string;
|
|
464
|
+
file2: string;
|
|
465
|
+
contextBytes?: number;
|
|
466
|
+
}): Promise<McpResponse> {
|
|
467
|
+
const contextBytes = args.contextBytes ?? 32;
|
|
468
|
+
|
|
469
|
+
const [{ stdout: hex1 }, { stdout: hex2 }] = await Promise.all([
|
|
470
|
+
runCommand(`xxd -p "${args.file1}" | tr -d '\\n'`),
|
|
471
|
+
runCommand(`xxd -p "${args.file2}" | tr -d '\\n'`),
|
|
472
|
+
]);
|
|
473
|
+
|
|
474
|
+
const bytes1 = hex1.match(/.{2}/g) ?? [];
|
|
475
|
+
const bytes2 = hex2.match(/.{2}/g) ?? [];
|
|
476
|
+
|
|
477
|
+
const maxLen = Math.max(bytes1.length, bytes2.length);
|
|
478
|
+
const diffs: { offset: number; file1: string; file2: string }[] = [];
|
|
479
|
+
|
|
480
|
+
for (let i = 0; i < maxLen; i++) {
|
|
481
|
+
const b1 = bytes1[i] ?? "??";
|
|
482
|
+
const b2 = bytes2[i] ?? "??";
|
|
483
|
+
if (b1 !== b2) {
|
|
484
|
+
diffs.push({ offset: i, file1: b1, file2: b2 });
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
const diffPercent = ((diffs.length / maxLen) * 100).toFixed(2);
|
|
489
|
+
|
|
490
|
+
const summary = [
|
|
491
|
+
`Binary Diff: ${args.file1} vs ${args.file2}`,
|
|
492
|
+
"",
|
|
493
|
+
`File 1 size: ${bytes1.length} bytes`,
|
|
494
|
+
`File 2 size: ${bytes2.length} bytes`,
|
|
495
|
+
`Differences: ${diffs.length} bytes (${diffPercent}%)`,
|
|
496
|
+
"",
|
|
497
|
+
"Differences (first 200):",
|
|
498
|
+
...diffs.slice(0, 200).map((d) => {
|
|
499
|
+
const ctx1 = formatBytesWithContext(bytes1, d.offset, 4);
|
|
500
|
+
const ctx2 = formatBytesWithContext(bytes2, d.offset, 4);
|
|
501
|
+
return ` Offset 0x${d.offset.toString(16).padStart(8, "0")}: ${d.file1} -> ${d.file2}\n Context: [${ctx1}] -> [${ctx2}]`;
|
|
502
|
+
}),
|
|
503
|
+
diffs.length > 200 ? ` ... and ${diffs.length - 200} more differences` : "",
|
|
504
|
+
].join("\n");
|
|
505
|
+
|
|
506
|
+
return { content: [{ type: "text", text: summary }] };
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
/**
|
|
510
|
+
* Archive extraction (.a files)
|
|
511
|
+
*/
|
|
512
|
+
export async function handleArchiveExtract(args: {
|
|
513
|
+
filePath: string;
|
|
514
|
+
outputDir?: string;
|
|
515
|
+
listOnly?: boolean;
|
|
516
|
+
}): Promise<McpResponse> {
|
|
517
|
+
const outputDir = args.outputDir ?? `/tmp/archive_${Date.now()}`;
|
|
518
|
+
|
|
519
|
+
const { stdout: fileInfo } = await runCommand(`file "${args.filePath}"`);
|
|
520
|
+
if (!fileInfo.toLowerCase().includes("ar archive") && !fileInfo.toLowerCase().includes("archive")) {
|
|
521
|
+
return {
|
|
522
|
+
content: [{ type: "text", text: `Error: ${args.filePath} is not an archive file.\n${fileInfo}` }],
|
|
523
|
+
isError: true,
|
|
524
|
+
};
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
const { stdout: contents } = await runCommand(`ar -t "${args.filePath}"`);
|
|
528
|
+
const files = contents.trim().split("\n").filter(Boolean);
|
|
529
|
+
|
|
530
|
+
if (args.listOnly) {
|
|
531
|
+
return {
|
|
532
|
+
content: [{
|
|
533
|
+
type: "text",
|
|
534
|
+
text: [
|
|
535
|
+
`Archive Contents: ${args.filePath}`,
|
|
536
|
+
`Total files: ${files.length}`,
|
|
537
|
+
"",
|
|
538
|
+
"Files:",
|
|
539
|
+
...files.map((f) => ` - ${f}`),
|
|
540
|
+
].join("\n"),
|
|
541
|
+
}],
|
|
542
|
+
};
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
await execAsync(`mkdir -p "${outputDir}"`);
|
|
546
|
+
await execAsync(`cd "${outputDir}" && ar -x "${args.filePath}"`);
|
|
547
|
+
|
|
548
|
+
const summary = [
|
|
549
|
+
`Archive Extracted: ${args.filePath}`,
|
|
550
|
+
`Output directory: ${outputDir}`,
|
|
551
|
+
`Files extracted: ${files.length}`,
|
|
552
|
+
"",
|
|
553
|
+
"Extracted files:",
|
|
554
|
+
...files.map((f) => ` - ${f}`),
|
|
555
|
+
].join("\n");
|
|
556
|
+
|
|
557
|
+
return { content: [{ type: "text", text: summary }] };
|
|
558
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @ebowwa/mcp-nm - Handlers index
|
|
3
|
+
*
|
|
4
|
+
* Export all MCP tool handlers organized by domain
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
// Symbol analysis (nm)
|
|
8
|
+
export {
|
|
9
|
+
handleListSymbols,
|
|
10
|
+
handleExternalSymbols,
|
|
11
|
+
handleUndefinedSymbols,
|
|
12
|
+
handleDefinedSymbols,
|
|
13
|
+
handleDynamicSymbols,
|
|
14
|
+
handleSearchSymbols,
|
|
15
|
+
handleCompareBinaries,
|
|
16
|
+
handleSymbolInfo,
|
|
17
|
+
handleSummary,
|
|
18
|
+
} from "./nm";
|
|
19
|
+
|
|
20
|
+
// Hex operations (xxd)
|
|
21
|
+
export {
|
|
22
|
+
handleXxdHexdump,
|
|
23
|
+
handleXxdPlain,
|
|
24
|
+
handleXxdCInclude,
|
|
25
|
+
handleXxdBinary,
|
|
26
|
+
handleXxdReverse,
|
|
27
|
+
handleXxdExtract,
|
|
28
|
+
handleXxdFindPattern,
|
|
29
|
+
} from "./xxd";
|
|
30
|
+
|
|
31
|
+
// Binary analysis
|
|
32
|
+
export {
|
|
33
|
+
handleStrings,
|
|
34
|
+
handleFileInfo,
|
|
35
|
+
handleSectionSizes,
|
|
36
|
+
handleObjdumpSections,
|
|
37
|
+
handleObjdumpProgramHeaders,
|
|
38
|
+
handleOtoolLibs,
|
|
39
|
+
handleReadelf,
|
|
40
|
+
handleLdd,
|
|
41
|
+
handleDisassembly,
|
|
42
|
+
handleSecurityAudit,
|
|
43
|
+
handleEntropyAnalysis,
|
|
44
|
+
handleImportExport,
|
|
45
|
+
handleBinaryDiff,
|
|
46
|
+
handleArchiveExtract,
|
|
47
|
+
} from "./bin";
|
|
48
|
+
|
|
49
|
+
// Binary patching
|
|
50
|
+
export {
|
|
51
|
+
handlePatchBytes,
|
|
52
|
+
handleNopSled,
|
|
53
|
+
handleHexEditor,
|
|
54
|
+
handleConvertNumber,
|
|
55
|
+
handlePatchRegister,
|
|
56
|
+
handlePatchList,
|
|
57
|
+
handlePatchApply,
|
|
58
|
+
handlePatchRestore,
|
|
59
|
+
handlePatchVerify,
|
|
60
|
+
handlePatchRemove,
|
|
61
|
+
} from "./patch";
|
|
62
|
+
|
|
63
|
+
// macOS-specific
|
|
64
|
+
export {
|
|
65
|
+
handleCodesignInfo,
|
|
66
|
+
handleCodesignRemove,
|
|
67
|
+
handleCodesignSign,
|
|
68
|
+
handleQuarantineCheck,
|
|
69
|
+
handleQuarantineRemove,
|
|
70
|
+
handleSafePatch,
|
|
71
|
+
handleBinVerify,
|
|
72
|
+
} from "./macos";
|