mcp-agents 0.5.1 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +61 -20
  2. package/package.json +1 -1
  3. package/server.js +120 -45
package/README.md CHANGED
@@ -15,16 +15,29 @@ MCP server that wraps AI CLI tools — [Claude Code](https://docs.anthropic.com/
15
15
 
16
16
  Only the CLI you select with `--provider` needs to be present.
17
17
 
18
+ ## Install
19
+
20
+ ```bash
21
+ npm install -g mcp-agents
22
+ ```
23
+
24
+ Global install is **strongly recommended** over `npx -y mcp-agents@latest`. The `npx`
25
+ approach performs a network round-trip on every cold start, which can exceed MCP client
26
+ connection timeouts and cause "stream disconnected" errors.
27
+
28
+ **Tip:** If your project's `.mcp.json` references `mcp-agents`, add `npm install -g mcp-agents`
29
+ to your setup script (e.g. `bin/setup`) so new developers get it automatically.
30
+
18
31
  ## Quick test
19
32
 
20
33
  ```bash
21
34
  # Default provider (codex)
22
- npx mcp-agents
35
+ mcp-agents
23
36
 
24
37
  # Specific provider
25
- npx mcp-agents --provider claude
26
- npx mcp-agents --provider gemini
27
- npx mcp-agents --provider gemini --sandbox false
38
+ mcp-agents --provider claude
39
+ mcp-agents --provider gemini
40
+ mcp-agents --provider gemini --sandbox false
28
41
  ```
29
42
 
30
43
  The server speaks [JSON-RPC over stdio](https://modelcontextprotocol.io/docs/concepts/transports#stdio). It prints `[mcp-agents] ready (provider: <name>)` to stderr when it's listening.
@@ -57,29 +70,29 @@ Each `--provider` flag maps to a single exposed tool:
57
70
  ### `codex` (pass-through)
58
71
 
59
72
  The codex provider passes through to Codex's native MCP server (`codex mcp-server`)
60
- with configurable flags:
73
+ using `-c key=value` config overrides:
61
74
 
62
- | CLI Flag | Default | Codex flag |
63
- |----------|---------|------------|
64
- | `--model` | `gpt-5.3-codex` | `-m <model>` |
65
- | `--model_reasoning_effort` | `high` | `-c model_reasoning_effort=<value>` |
75
+ | CLI Flag | Default | Codex config key |
76
+ |----------|---------|-----------------|
77
+ | `--model` | `gpt-5.3-codex` | `model` |
78
+ | `--model_reasoning_effort` | `high` | `model_reasoning_effort` |
66
79
 
67
- Hardcoded defaults: `-s read-only -a never` (safe for MCP server mode).
80
+ Hardcoded defaults: `sandbox_mode=read-only`, `approval_policy=never` (safe for MCP server mode).
68
81
 
69
82
  ## Integration with Claude Code
70
83
 
71
- Add entries to your project's `.mcp.json`:
84
+ Add entries to your project's `.mcp.json` (requires `npm i -g mcp-agents`):
72
85
 
73
86
  ```json
74
87
  {
75
88
  "mcpServers": {
76
89
  "codex": {
77
- "command": "npx",
78
- "args": ["-y", "mcp-agents@latest", "--provider", "codex"]
90
+ "command": "mcp-agents",
91
+ "args": ["--provider", "codex"]
79
92
  },
80
93
  "gemini": {
81
- "command": "npx",
82
- "args": ["-y", "mcp-agents@latest", "--provider", "gemini", "--sandbox", "false"]
94
+ "command": "mcp-agents",
95
+ "args": ["--provider", "gemini", "--sandbox", "false"]
83
96
  }
84
97
  }
85
98
  }
@@ -87,33 +100,61 @@ Add entries to your project's `.mcp.json`:
87
100
 
88
101
  Override codex defaults:
89
102
 
103
+ ```json
104
+ {
105
+ "mcpServers": {
106
+ "codex": {
107
+ "command": "mcp-agents",
108
+ "args": ["--provider", "codex", "--model", "o3-pro", "--model_reasoning_effort", "medium"]
109
+ }
110
+ }
111
+ }
112
+ ```
113
+
114
+ <details>
115
+ <summary>Alternative: using npx (slower, not recommended)</summary>
116
+
90
117
  ```json
91
118
  {
92
119
  "mcpServers": {
93
120
  "codex": {
94
121
  "command": "npx",
95
- "args": ["-y", "mcp-agents@latest", "--provider", "codex", "--model", "o3-pro", "--model_reasoning_effort", "medium"]
122
+ "args": ["-y", "mcp-agents@latest", "--provider", "codex"]
96
123
  }
97
124
  }
98
125
  }
99
126
  ```
100
127
 
128
+ > **Warning:** `npx -y mcp-agents@latest` performs a network round-trip on every cold
129
+ > start (~70s), which can exceed MCP client connection timeouts.
130
+
131
+ </details>
132
+
101
133
  ## Integration with OpenAI Codex
102
134
 
103
135
  Add two entries to `~/.codex/config.toml` — one per provider you want available:
104
136
 
105
137
  ```toml
106
138
  [mcp_servers.claude-code]
107
- command = "npx"
108
- args = ["-y", "mcp-agents", "--provider", "claude"]
139
+ command = "mcp-agents"
140
+ args = ["--provider", "claude"]
109
141
 
110
142
  [mcp_servers.gemini]
111
- command = "npx"
112
- args = ["-y", "mcp-agents", "--provider", "gemini", "--sandbox", "false"]
143
+ command = "mcp-agents"
144
+ args = ["--provider", "gemini", "--sandbox", "false"]
113
145
  ```
114
146
 
115
147
  Then in a Codex session you can call the `claude_code` or `gemini` tools, which shell out to the respective CLIs.
116
148
 
149
+ ## Development
150
+
151
+ ```bash
152
+ npm install
153
+ npm link # symlinks mcp-agents to your local server.js
154
+ ```
155
+
156
+ After `npm link`, any edits to `server.js` take effect immediately — no reinstall needed.
157
+
117
158
  ## How it works
118
159
 
119
160
  1. An MCP client connects over stdio
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-agents",
3
- "version": "0.5.1",
3
+ "version": "0.5.3",
4
4
  "description": "MCP server that wraps AI CLI tools (Claude Code, Gemini CLI, Codex CLI) for use by any MCP client",
5
5
  "type": "module",
6
6
  "bin": {
package/server.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  /* eslint-disable no-console */
3
3
 
4
- import { execFile, spawn } from "node:child_process";
4
+ import { spawn } from "node:child_process";
5
5
  import { readFileSync } from "node:fs";
6
6
  import { dirname, join } from "node:path";
7
7
  import { fileURLToPath } from "node:url";
@@ -92,9 +92,10 @@ Usage: mcp-agents [options]
92
92
 
93
93
  Options:
94
94
  --provider <name> CLI backend to use (${providers}) [default: codex]
95
- --model <model> Model to use (codex) [default: gpt-5.3-codex]
96
- --model_reasoning_effort <e> Reasoning effort (codex) [default: high]
95
+ --model <model> Codex model [default: gpt-5.3-codex]
96
+ --model_reasoning_effort <e> Codex reasoning effort [default: high]
97
97
  --sandbox <bool> Gemini sandbox mode (true/false) [default: false]
98
+ --timeout <seconds> Default timeout per call [default: 300]
98
99
  --help, -h Show this help message
99
100
  --version, -v Show version number`);
100
101
  }
@@ -102,7 +103,7 @@ Options:
102
103
  /**
103
104
  * Parse CLI flags from process.argv.
104
105
  * Handles --help, --version, --provider, --model, --model_reasoning_effort, --sandbox, and unknown flags.
105
- * @returns {{ provider: string, model?: string, modelReasoningEffort?: string, sandbox: boolean }}
106
+ * @returns {{ provider: string, model?: string, modelReasoningEffort?: string, sandbox: boolean, defaultTimeoutMs?: number }}
106
107
  */
107
108
  function parseArgs() {
108
109
  const args = process.argv.slice(2);
@@ -110,6 +111,7 @@ function parseArgs() {
110
111
  let model;
111
112
  let modelReasoningEffort;
112
113
  let sandbox = false;
114
+ let defaultTimeoutMs;
113
115
 
114
116
  for (let i = 0; i < args.length; i++) {
115
117
  switch (args[i]) {
@@ -153,17 +155,32 @@ function parseArgs() {
153
155
  }
154
156
  sandbox = args[++i] === "true";
155
157
  break;
158
+ case "--timeout": {
159
+ if (i + 1 >= args.length) {
160
+ process.stderr.write("error: --timeout requires a value\n");
161
+ process.exit(1);
162
+ }
163
+ const secs = Number(args[++i]);
164
+ if (!(secs > 0)) {
165
+ process.stderr.write("error: --timeout must be a positive number\n");
166
+ process.exit(1);
167
+ }
168
+ defaultTimeoutMs = Math.round(secs * 1000);
169
+ break;
170
+ }
156
171
  default:
157
172
  process.stderr.write(`error: unknown option: ${args[i]}\n`);
158
173
  process.exit(1);
159
174
  }
160
175
  }
161
176
 
162
- return { provider, model, modelReasoningEffort, sandbox };
177
+ return { provider, model, modelReasoningEffort, sandbox, defaultTimeoutMs };
163
178
  }
164
179
 
165
180
  /**
166
181
  * Run a CLI command and return stdout (or stderr if stdout is empty).
182
+ * Uses spawn with detached:true so the entire process group can be killed
183
+ * on timeout — prevents orphan child processes.
167
184
  * @param {string} command
168
185
  * @param {string[]} args
169
186
  * @param {{ timeoutMs?: number, stdinData?: string }} [opts]
@@ -174,31 +191,17 @@ function runCli(command, args, opts = {}) {
174
191
  const stdinData = opts.stdinData;
175
192
 
176
193
  return new Promise((resolve, reject) => {
177
- const child = execFile(
178
- command,
179
- args,
180
- {
181
- timeout: timeoutMs,
182
- maxBuffer: MAX_BUFFER_BYTES,
183
- env: { ...process.env, NO_COLOR: "1" },
184
- },
185
- (error, stdout, stderr) => {
186
- if (error) {
187
- const details = [
188
- `${command} failed: ${error.message}`,
189
- stderr ? `stderr:\n${stderr}` : null,
190
- ]
191
- .filter(Boolean)
192
- .join("\n");
193
-
194
- reject(new Error(details));
195
- return;
196
- }
197
-
198
- const out = (stdout || stderr || "").trimEnd();
199
- resolve(out);
200
- },
201
- );
194
+ let stdout = "";
195
+ let stderr = "";
196
+ let stdoutLen = 0;
197
+ let stderrLen = 0;
198
+ let settled = false;
199
+
200
+ const child = spawn(command, args, {
201
+ detached: true,
202
+ stdio: ["pipe", "pipe", "pipe"],
203
+ env: { ...process.env, NO_COLOR: "1" },
204
+ });
202
205
 
203
206
  // Pipe prompt via stdin to avoid arg-quoting issues, then close.
204
207
  child.stdin?.on("error", () => {}); // ignore EPIPE if child exits early
@@ -208,8 +211,59 @@ function runCli(command, args, opts = {}) {
208
211
  child.stdin?.end();
209
212
  }
210
213
 
214
+ const killGroup = () => {
215
+ try { process.kill(-child.pid, "SIGKILL"); } catch {}
216
+ };
217
+
218
+ const done = (err) => {
219
+ clearTimeout(timer);
220
+ if (settled) return;
221
+ settled = true;
222
+ err ? reject(err) : resolve((stdout || stderr || "").trimEnd());
223
+ };
224
+
225
+ child.stdout.on("data", (chunk) => {
226
+ stdoutLen += chunk.length;
227
+ if (stdoutLen > MAX_BUFFER_BYTES) {
228
+ killGroup();
229
+ done(new Error(`${command} stdout maxBuffer exceeded`));
230
+ } else {
231
+ stdout += chunk;
232
+ }
233
+ });
234
+
235
+ child.stderr.on("data", (chunk) => {
236
+ stderrLen += chunk.length;
237
+ if (stderrLen > MAX_BUFFER_BYTES) {
238
+ killGroup();
239
+ done(new Error(`${command} stderr maxBuffer exceeded`));
240
+ } else {
241
+ stderr += chunk;
242
+ }
243
+ });
244
+
245
+ // Kill entire process group on timeout (prevents orphan processes).
246
+ const timer = setTimeout(() => {
247
+ killGroup();
248
+ }, timeoutMs);
249
+
211
250
  child.on("error", (err) => {
212
- reject(new Error(`Failed to start ${command}: ${err.message}`));
251
+ done(new Error(`Failed to start ${command}: ${err.message}`));
252
+ });
253
+
254
+ child.on("close", (code, signal) => {
255
+ if (signal || code !== 0) {
256
+ const reason = signal ? `killed by ${signal}` : `exit code ${code}`;
257
+ const details = [
258
+ `${command} failed: ${reason}`,
259
+ stderr ? `stderr:\n${stderr}` : null,
260
+ ]
261
+ .filter(Boolean)
262
+ .join("\n");
263
+ done(new Error(details));
264
+ return;
265
+ }
266
+ done(null);
213
267
  });
214
268
  });
215
269
  }
@@ -220,28 +274,47 @@ function runCli(command, args, opts = {}) {
220
274
  */
221
275
  function runCodexPassthrough({ model, modelReasoningEffort }) {
222
276
  const args = [
223
- "-m",
224
- model || "gpt-5.3-codex",
225
- "-s",
226
- "read-only",
227
- "-a",
228
- "never",
229
- "-c",
230
- `model_reasoning_effort=${modelReasoningEffort || "high"}`,
231
277
  "mcp-server",
278
+ "-c", `model=${model || "gpt-5.3-codex"}`,
279
+ "-c", "sandbox_mode=read-only",
280
+ "-c", "approval_policy=never",
281
+ "-c", `model_reasoning_effort=${modelReasoningEffort || "high"}`,
232
282
  ];
233
283
 
234
284
  logErr(`[mcp-agents] passthrough: codex ${args.join(" ")}`);
235
285
 
236
- const child = spawn("codex", args, { stdio: "inherit" });
286
+ const child = spawn("codex", args, {
287
+ stdio: ["inherit", "inherit", "pipe"],
288
+ });
289
+
290
+ child.stderr.on("data", (chunk) => {
291
+ logErr(`[codex] ${chunk.toString().trimEnd()}`);
292
+ });
293
+
294
+ const SIGNAL_CODES = { SIGHUP: 1, SIGINT: 2, SIGTERM: 15 };
295
+ for (const sig of ["SIGTERM", "SIGINT", "SIGHUP"]) {
296
+ process.once(sig, () => {
297
+ child.kill(sig);
298
+ setTimeout(() => {
299
+ child.kill("SIGKILL");
300
+ process.exit(128 + SIGNAL_CODES[sig]);
301
+ }, 5000).unref();
302
+ });
303
+ }
237
304
 
238
305
  child.on("error", (err) => {
239
306
  logErr(`[mcp-agents] failed to start codex: ${err.message}`);
240
307
  process.exitCode = 1;
241
308
  });
242
309
 
243
- child.on("exit", (code) => {
244
- process.exitCode = code ?? 1;
310
+ child.on("exit", (code, signal) => {
311
+ if (signal) {
312
+ logErr(`[mcp-agents] codex killed by ${signal}`);
313
+ process.exitCode = 128 + (SIGNAL_CODES[signal] ?? 0);
314
+ } else {
315
+ if (code !== 0) logErr(`[mcp-agents] codex exited with code ${code}`);
316
+ process.exitCode = code ?? 1;
317
+ }
245
318
  });
246
319
  }
247
320
 
@@ -250,7 +323,7 @@ function runCodexPassthrough({ model, modelReasoningEffort }) {
250
323
  // ---------------------------------------------------------------------------
251
324
 
252
325
  async function main() {
253
- const { provider: providerName, model, modelReasoningEffort, sandbox } = parseArgs();
326
+ const { provider: providerName, model, modelReasoningEffort, sandbox, defaultTimeoutMs } = parseArgs();
254
327
  const backend = CLI_BACKENDS[providerName];
255
328
 
256
329
  if (!backend) {
@@ -274,6 +347,8 @@ async function main() {
274
347
  { capabilities: { tools: {} } },
275
348
  );
276
349
 
350
+ const effectiveTimeout = defaultTimeoutMs ?? DEFAULT_TIMEOUT_MS;
351
+
277
352
  const properties = {
278
353
  prompt: {
279
354
  type: "string",
@@ -282,7 +357,7 @@ async function main() {
282
357
  timeout_ms: {
283
358
  type: "integer",
284
359
  minimum: 1,
285
- description: `Optional timeout override (default ${DEFAULT_TIMEOUT_MS})`,
360
+ description: `Optional timeout override (default ${effectiveTimeout}ms)`,
286
361
  },
287
362
  ...backend.extraProperties,
288
363
  };
@@ -333,7 +408,7 @@ async function main() {
333
408
  const timeoutMsRaw = params.arguments?.timeout_ms;
334
409
  const timeoutMs = Number.isInteger(timeoutMsRaw)
335
410
  ? timeoutMsRaw
336
- : DEFAULT_TIMEOUT_MS;
411
+ : effectiveTimeout;
337
412
 
338
413
  if (!prompt.trim()) {
339
414
  return {