mcp-server-commands 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,17 +1,28 @@
1
1
  # mcp-server-commands
2
2
 
3
- An MCP server to run commands and includ the output in chat history.
3
+ An MCP server to run commands.
4
+
5
+ > [!WARNING]
6
+ > Be careful what you ask this server to run!
7
+ > In Claude Desktop app, use `Approve Once` (not `Allow for This Chat`) so you can review each command, use `Deny` if you don't trust the command.
8
+ > Permissions are dictated by the user that runs the server.
9
+ > DO NOT run with `sudo`.
4
10
 
5
11
  ## Tools
6
12
 
13
+ Tools are for LLMs to request, i.e. Claude Desktop app
14
+
7
15
  - `run_command` - run a command, i.e. `hostname` or `ls -al` or `echo "hello world"` etc
8
16
  - Returns STDOUT and STDERR as text
9
- - for LLMs to request tool use and get back the command output, i.e. Claude Desktop app
17
+ - `run_script` - run a script! (i.e. `fish`, `bash`, `zsh`, `python`)
18
+ - Let your LLM run the code it writes!
19
+ - script is passed over STDIN
10
20
 
11
21
  ## Prompts
12
22
 
13
- - `run_command` - include the output of a command in the chat history
14
- - for users to include relevant commands in chat history, i.e. via `Zed`'s slash commands
23
+ Prompts are for users to include in chat history, i.e. via `Zed`'s slash commands (in its AI Chat panel)
24
+
25
+ - `run_command` - generate a prompt message with the command output
15
26
 
16
27
  ## Development
17
28
 
@@ -63,6 +74,16 @@ On Windows: `%APPDATA%/Claude/claude_desktop_config.json`
63
74
  }
64
75
  ```
65
76
 
77
+ ### Logging
78
+
79
+ Claude Desktop app writes logs to `~/Library/Logs/Claude/mcp-server-mcp-server-commands.log`
80
+
81
+ By default, only important messages are logged (i.e. errors).
82
+ If you want to see more messages, add `--verbose` to the `args` when configuring the server.
83
+
84
+ By the way, logs are written to `STDERR` because that is what Claude Desktop routes to the log files.
85
+ In the future, I expect well formatted log messages to be written over the `STDIO` transport to the MCP client (note: not Claude Desktop app).
86
+
66
87
  ### Debugging
67
88
 
68
89
  Since MCP servers communicate over stdio, debugging can be challenging. We recommend using the [MCP Inspector](https://github.com/modelcontextprotocol/inspector), which is available as a package script:
@@ -72,3 +93,7 @@ npm run inspector
72
93
  ```
73
94
 
74
95
  The Inspector will provide a URL to access debugging tools in your browser.
96
+
97
+ ## TODOs
98
+
99
+ - Add some mechanism (likely in a new MCP server) to retain memory of past command failures, i.e. to use `python3` and not `python` and tie it to a machine or some context?
@@ -0,0 +1,53 @@
1
+ import { exec } from "child_process";
2
+ /**
3
+ * Executes a file with the given arguments, piping input to stdin.
4
+ * @param {string} interpreter - The file to execute.
5
+ * @param {string} stdin_text - The string to pipe to stdin.
6
+ * @returns {Promise<ExecResult>} A promise that resolves with the stdout and stderr of the command. `message` is provided on a failure to explain the error.
7
+ */
8
+ function execFileWithInput(interpreter, stdin_text, options) {
9
+ // FYI for now, using `exec()` so the interpreter can have cmd+args AIO
10
+ // could switch to `execFile()` to pass args array separately
11
+ // TODO starts with fish too? "fish -..." PRN use a library to parse the command and determine this?
12
+ if (interpreter.split(" ")[0] === "fish") {
13
+ // PRN also check error from fish and add possible clarification to error message though there are legit ways to trigger that same error message! i.e. `fish .` which is not the same issue!
14
+ return fishWorkaround(interpreter, stdin_text, options);
15
+ }
16
+ return new Promise((resolve, reject) => {
17
+ const child = exec(interpreter, options, (error, stdout, stderr) => {
18
+ if (error) {
19
+ reject({ message: error.message, stdout, stderr });
20
+ }
21
+ else {
22
+ resolve({ stdout, stderr });
23
+ }
24
+ });
25
+ if (stdin_text) {
26
+ if (child.stdin === null) {
27
+ reject(new Error("Unexpected failure: child.stdin is null"));
28
+ return;
29
+ }
30
+ child.stdin.write(stdin_text);
31
+ child.stdin.end();
32
+ }
33
+ });
34
+ }
35
+ async function fishWorkaround(interpreter, script, options) {
36
+ // fish right now chokes on piped input (STDIN) + node's exec/spawn/etc, so lets use a workaround to echo the input
37
+ // base64 encode thee input, then decode in pipeline
38
+ const base64Script = Buffer.from(script).toString("base64");
39
+ const command = `${interpreter} -c "echo ${base64Script} | base64 -d | fish"`;
40
+ return new Promise((resolve, reject) => {
41
+ // const child = ... // careful with refactoring not to return that unused child
42
+ exec(command, options, (error, stdout, stderr) => {
43
+ // I like this style of error vs success handling! it's beautiful-est (prommises are underrated)
44
+ if (error) {
45
+ reject({ message: error.message, stdout, stderr });
46
+ }
47
+ else {
48
+ resolve({ stdout, stderr });
49
+ }
50
+ });
51
+ });
52
+ }
53
+ export { execFileWithInput };
package/build/index.js CHANGED
@@ -4,18 +4,73 @@ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"
4
4
  import { CallToolRequestSchema, ListToolsRequestSchema, ListPromptsRequestSchema, GetPromptRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
5
5
  import { exec } from "node:child_process";
6
6
  import { promisify } from "node:util";
7
+ import { execFileWithInput } from "./exec-utils.js";
8
+ // TODO use .promises? in node api
7
9
  const execAsync = promisify(exec);
10
+ let verbose = false;
11
+ // check CLI args:
12
+ if (process.argv.includes("--verbose")) {
13
+ verbose = true;
14
+ }
8
15
  const server = new Server({
9
16
  name: "mcp-server-commands",
10
- version: "0.2.0",
17
+ version: "0.4.0",
11
18
  }, {
12
19
  capabilities: {
13
20
  //resources: {},
14
21
  tools: {},
15
22
  prompts: {},
23
+ //logging: {}, // for logging messages that don't seem to work yet or I am doing them wrong
16
24
  },
17
25
  });
26
+ function always_log(message, data) {
27
+ if (data) {
28
+ console.error(message + ": " + JSON.stringify(data));
29
+ }
30
+ else {
31
+ console.error(message);
32
+ }
33
+ }
34
+ if (verbose) {
35
+ always_log("INFO: verbose logging enabled");
36
+ }
37
+ else {
38
+ always_log("INFO: verbose logging disabled, enable it with --verbose");
39
+ }
40
+ function verbose_log(message, data) {
41
+ // https://modelcontextprotocol.io/docs/tools/debugging - mentions various ways to debug/troubleshoot (including dev tools)
42
+ //
43
+ // remember STDIO transport means can't log over STDOUT (client expects JSON messages per the spec)
44
+ // https://modelcontextprotocol.io/docs/tools/debugging#implementing-logging
45
+ // mentions STDERR is captured by the host app (i.e. Claude Desktop app)
46
+ // server.sendLoggingMessage is captured by MCP client (not Claude Desktop app)
47
+ // SO, IIUC use STDERR for logging into Claude Desktop app logs in:
48
+ // '~/Library/Logs/Claude/mcp.log'
49
+ if (verbose) {
50
+ always_log(message, data);
51
+ }
52
+ // inspector, catches these logs and shows them on left hand side of screen (sidebar)
53
+ // TODO add verbose parameter (CLI arg?)
54
+ // IF I wanted to log via MCP client logs (not sure what those are/do):
55
+ // I do not see inspector catching these logs :(, there is a server notifications section and it remains empty
56
+ //server.sendLoggingMessage({
57
+ // level: "info",
58
+ // data: message,
59
+ //});
60
+ // which results in something like:
61
+ //server.notification({
62
+ // method: "notifications/message",
63
+ // params: {
64
+ // level: "warning",
65
+ // logger: "mcp-server-commands",
66
+ // data: "ListToolsRequest2",
67
+ // },
68
+ //});
69
+ //
70
+ // FYI client should also requets a log level from the server, so that needs to be here at some point too
71
+ }
18
72
  server.setRequestHandler(ListToolsRequestSchema, async () => {
73
+ verbose_log("INFO: ListTools");
19
74
  return {
20
75
  tools: [
21
76
  {
@@ -25,75 +80,159 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
25
80
  properties: {
26
81
  command: {
27
82
  type: "string",
28
- description: "Command to run",
83
+ description: "Command with args",
84
+ },
85
+ cwd: {
86
+ // previous run_command calls can probe the filesystem and find paths to change to
87
+ type: "string",
88
+ description: "Current working directory, leave empty in most cases",
29
89
  },
90
+ // FYI using child_process.exec runs command in a shell, so you can pass a script here too but I still think separate tools would be helpful?
91
+ // FYI gonna use execFile for run_script
92
+ // - env - obscure cases where command takes a param only via an env var?
93
+ // args to consider:
94
+ // - timeout - lets just hard code this for now
95
+ // - shell - (cmd/args) - for now use run_script for this case, also can just pass "fish -c 'command'" or "sh ..."
96
+ // - stdin? though this borders on the run_script below
97
+ // - capture_output (default true) - for now can just redirect to /dev/null - perhaps capture_stdout/capture_stderr
30
98
  },
31
99
  required: ["command"],
32
100
  },
33
101
  },
102
+ // PRN tool to introspect the environment (i.e. windows vs linux vs mac, maybe default shell, etc?) - for now LLM can run commands and when they fail it can make adjustments accordingly - some cases where knowing this would help avoid dispatching erroneous commands (i.e. using free on linux, vm_stat on mac)
103
+ {
104
+ // TODO is run_script even needed if I were to add STDIN support to run_command above?
105
+ name: "run_script",
106
+ inputSchema: {
107
+ type: "object",
108
+ properties: {
109
+ interpreter: {
110
+ // TODO use shebang on *nix?
111
+ type: "string",
112
+ description: "Command with arguments. Script will be piped to stdin. Examples: bash, fish, zsh, python, or: bash --norc",
113
+ },
114
+ script: {
115
+ type: "string",
116
+ description: "Script to run",
117
+ },
118
+ cwd: {
119
+ type: "string",
120
+ description: "Current working directory",
121
+ },
122
+ },
123
+ required: ["script"],
124
+ },
125
+ },
34
126
  ],
35
127
  };
36
128
  });
37
129
  server.setRequestHandler(CallToolRequestSchema, async (request) => {
130
+ verbose_log("INFO: ToolRequest", request);
38
131
  switch (request.params.name) {
39
132
  case "run_command": {
40
- const command = String(request.params.arguments?.command);
41
- if (!command) {
42
- throw new Error("Command is required");
43
- }
44
- try {
45
- const { stdout, stderr } = await execAsync(command);
46
- return {
47
- toolResult: {
48
- isError: false,
49
- content: [
50
- {
51
- type: "text",
52
- text: stdout,
53
- name: "STDOUT",
54
- },
55
- {
56
- type: "text",
57
- text: stderr,
58
- name: "STDERR",
59
- },
60
- ],
61
- },
62
- };
63
- }
64
- catch (error) {
65
- const { message, stdout, stderr } = error;
66
- return {
67
- toolResult: {
68
- isError: true,
69
- content: [
70
- {
71
- // most of the time this is gonna match stderr, TODO do I want/need both error and stderr?
72
- type: "text",
73
- text: message,
74
- name: "ERROR",
75
- },
76
- {
77
- type: "text",
78
- text: stderr || "",
79
- name: "STDERR",
80
- },
81
- {
82
- // keep STDOUT b/c there might be some useful output before the failure
83
- type: "text",
84
- text: stdout || "",
85
- name: "STDOUT",
86
- },
87
- ],
88
- },
89
- };
90
- }
133
+ return {
134
+ toolResult: await runCommand(request.params.arguments),
135
+ };
136
+ }
137
+ case "run_script": {
138
+ return {
139
+ toolResult: await runScript(request.params.arguments),
140
+ };
91
141
  }
92
142
  default:
93
143
  throw new Error("Unknown tool");
94
144
  }
95
145
  });
146
+ async function runCommand(args) {
147
+ const command = String(args?.command);
148
+ if (!command) {
149
+ throw new Error("Command is required");
150
+ }
151
+ const options = {};
152
+ if (args?.cwd) {
153
+ options.cwd = String(args.cwd);
154
+ // ENOENT is thrown if the cwd doesn't exist, and I think LLMs can understand that?
155
+ }
156
+ try {
157
+ const result = await execAsync(command, options);
158
+ return {
159
+ isError: false,
160
+ content: messagesFor(result),
161
+ };
162
+ }
163
+ catch (error) {
164
+ // TODO catch for other errors, not just ExecException
165
+ // FYI failure may not always be a bad thing if for example checking for a file to exist so just keep that in mind in terms of logging?
166
+ const response = {
167
+ isError: true,
168
+ content: messagesFor(error),
169
+ };
170
+ always_log("WARN: run_command failed", response);
171
+ return response;
172
+ }
173
+ }
174
+ async function runScript(args) {
175
+ const interpreter = String(args?.interpreter);
176
+ if (!interpreter) {
177
+ throw new Error("Interpreter is required");
178
+ }
179
+ const options = {
180
+ //const options = {
181
+ // constrains typescript too, to string based overload
182
+ encoding: "utf8",
183
+ };
184
+ if (args?.cwd) {
185
+ options.cwd = String(args.cwd);
186
+ // ENOENT is thrown if the cwd doesn't exist, and I think LLMs can understand that?
187
+ }
188
+ const script = String(args?.script);
189
+ if (!script) {
190
+ throw new Error("Script is required");
191
+ }
192
+ try {
193
+ const result = await execFileWithInput(interpreter, script, options);
194
+ return {
195
+ isError: false,
196
+ content: messagesFor(result),
197
+ };
198
+ }
199
+ catch (error) {
200
+ const response = {
201
+ isError: true,
202
+ content: messagesFor(error),
203
+ };
204
+ always_log("WARN: run_script failed", response);
205
+ return response;
206
+ }
207
+ }
208
+ function messagesFor(result) {
209
+ const messages = [];
210
+ if (result.message) {
211
+ messages.push({
212
+ // most of the time this is gonna match stderr, TODO do I want/need both error and stderr?
213
+ type: "text",
214
+ text: result.message,
215
+ name: "ERROR",
216
+ });
217
+ }
218
+ if (result.stdout) {
219
+ messages.push({
220
+ type: "text",
221
+ text: result.stdout,
222
+ name: "STDOUT",
223
+ });
224
+ }
225
+ if (result.stderr) {
226
+ messages.push({
227
+ type: "text",
228
+ text: result.stderr,
229
+ name: "STDERR",
230
+ });
231
+ }
232
+ return messages;
233
+ }
96
234
  server.setRequestHandler(ListPromptsRequestSchema, async () => {
235
+ verbose_log("INFO: ListPrompts");
97
236
  return {
98
237
  prompts: [
99
238
  {
@@ -113,12 +252,17 @@ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
113
252
  if (request.params.name !== "run_command") {
114
253
  throw new Error("Unknown prompt");
115
254
  }
255
+ verbose_log("INFO: PromptRequest", request);
116
256
  const command = String(request.params.arguments?.command);
117
257
  if (!command) {
118
258
  throw new Error("Command is required");
119
259
  }
260
+ // Is it possible/feasible to pass a path for the CWD when running the command?
261
+ // - currently it uses / (yikez)
262
+ // - IMO makes more sense to have it be based on the Zed CWD of each project
263
+ // - Fallback could be to configure on server level (i.e. home dir of current user) - perhaps CLI arg? (thinking of zed's context_servers config section)
120
264
  const { stdout, stderr } = await execAsync(command);
121
- // let error bubble up, errors look good in zed /prompts (i.e. command not found)
265
+ // TODO gracefully handle errors and turn them into a prompt message that can be used by LLM to troubleshoot the issue, currently errors result in nothing inserted into the prompt and instead it shows the Zed's chat panel as a failure
122
266
  const messages = [
123
267
  {
124
268
  role: "user",
@@ -129,7 +273,7 @@ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
129
273
  },
130
274
  },
131
275
  ];
132
- if (stdout && stdout.length > 0) {
276
+ if (stdout) {
133
277
  messages.push({
134
278
  role: "user",
135
279
  content: {
@@ -138,7 +282,7 @@ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
138
282
  },
139
283
  });
140
284
  }
141
- if (stderr && stderr.length > 0) {
285
+ if (stderr) {
142
286
  messages.push({
143
287
  role: "user",
144
288
  content: {
@@ -147,10 +291,10 @@ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
147
291
  },
148
292
  });
149
293
  }
294
+ verbose_log("INFO: PromptResponse", messages);
150
295
  return { messages };
151
296
  });
152
297
  async function main() {
153
- console.log("Starting server...");
154
298
  const transport = new StdioServerTransport();
155
299
  await server.connect(transport);
156
300
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-server-commands",
3
- "version": "0.2.1",
3
+ "version": "0.4.0",
4
4
  "description": "An MCP server to run arbitrary commands",
5
5
  "private": false,
6
6
  "type": "module",
@@ -14,14 +14,20 @@
14
14
  "clean": "rm -rf build",
15
15
  "build": "tsc && node -e \"require('fs').chmodSync('build/index.js', '755')\"",
16
16
  "prepare": "npm run build",
17
- "watch": "tsc --watch",
18
- "inspector": "npx @modelcontextprotocol/inspector build/index.js"
17
+ "watch": "npm run build && tsc --watch",
18
+ "inspector": "npx @modelcontextprotocol/inspector build/index.js",
19
+ "test": "jest",
20
+ "test:watch": "jest --watch",
21
+ "test:integration": "jest tests/integration"
19
22
  },
20
23
  "dependencies": {
21
24
  "@modelcontextprotocol/sdk": "0.6.0"
22
25
  },
23
26
  "devDependencies": {
27
+ "@types/jest": "^29.5.11",
24
28
  "@types/node": "^20.11.24",
29
+ "jest": "^29.7.0",
30
+ "ts-jest": "^29.1.1",
25
31
  "typescript": "^5.3.3"
26
32
  }
27
33
  }