mcp-agents 0.3.1 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +43 -5
  2. package/package.json +1 -1
  3. package/server.js +67 -15
package/README.md CHANGED
@@ -36,6 +36,7 @@ Each `--provider` flag maps to a single exposed tool:
36
36
  |----------|-----------|-------------|
37
37
  | `claude` | `claude_code` | `claude -p <prompt>` |
38
38
  | `gemini` | `gemini` | `gemini [-s] -p <prompt>` |
39
+ | `codex` | *(pass-through)* | `codex mcp-server` |
39
40
 
40
41
  ### `claude_code` parameters
41
42
 
@@ -52,12 +53,49 @@ Each `--provider` flag maps to a single exposed tool:
52
53
  | `sandbox` | `boolean` | no | Run in sandbox mode (`-s` flag) |
53
54
  | `timeout_ms` | `integer` | no | Timeout in ms (default: 120 000) |
54
55
 
55
- ### `codex` parameters
56
+ ### `codex` (pass-through)
56
57
 
57
- | Parameter | Type | Required | Description |
58
- |-----------|------|----------|-------------|
59
- | `prompt` | `string` | yes | The prompt to send to Codex CLI |
60
- | `timeout_ms` | `integer` | no | Timeout in ms (default: 120 000) |
58
+ The codex provider passes through to Codex's native MCP server (`codex mcp-server`)
59
+ with configurable flags:
60
+
61
+ | CLI Flag | Default | Codex flag |
62
+ |----------|---------|------------|
63
+ | `--model` | `gpt-5.2-codex` | `-m <model>` |
64
+ | `--model_reasoning_effort` | `high` | `-c model_reasoning_effort=<value>` |
65
+
66
+ Hardcoded defaults: `-s read-only -a never` (safe for MCP server mode).
67
+
68
+ ## Integration with Claude Code
69
+
70
+ Add entries to your project's `.mcp.json`:
71
+
72
+ ```json
73
+ {
74
+ "mcpServers": {
75
+ "codex": {
76
+ "command": "npx",
77
+ "args": ["-y", "mcp-agents@latest", "--provider", "codex"]
78
+ },
79
+ "gemini": {
80
+ "command": "npx",
81
+ "args": ["-y", "mcp-agents@latest", "--provider", "gemini"]
82
+ }
83
+ }
84
+ }
85
+ ```
86
+
87
+ Override codex defaults:
88
+
89
+ ```json
90
+ {
91
+ "mcpServers": {
92
+ "codex": {
93
+ "command": "npx",
94
+ "args": ["-y", "mcp-agents@latest", "--provider", "codex", "--model", "o3-pro", "--model_reasoning_effort", "medium"]
95
+ }
96
+ }
97
+ }
98
+ ```
61
99
 
62
100
  ## Integration with OpenAI Codex
63
101
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-agents",
3
- "version": "0.3.1",
3
+ "version": "0.3.5",
4
4
  "description": "MCP server that wraps AI CLI tools (Claude Code, Gemini CLI, Codex CLI) for use by any MCP client",
5
5
  "type": "module",
6
6
  "bin": {
package/server.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  /* eslint-disable no-console */
3
3
 
4
- import { execFile } from "node:child_process";
4
+ import { execFile, spawn } from "node:child_process";
5
5
  import { readFileSync } from "node:fs";
6
6
  import { dirname, join } from "node:path";
7
7
  import { fileURLToPath } from "node:url";
@@ -18,7 +18,7 @@ const VERSION = JSON.parse(
18
18
  readFileSync(join(__dirname, "package.json"), "utf8"),
19
19
  ).version;
20
20
 
21
- const DEFAULT_TIMEOUT_MS = 120_000;
21
+ const DEFAULT_TIMEOUT_MS = 30_000;
22
22
  const MAX_BUFFER_BYTES = 10 * 1024 * 1024;
23
23
 
24
24
  // ---------------------------------------------------------------------------
@@ -30,7 +30,7 @@ const CLI_BACKENDS = {
30
30
  command: "claude",
31
31
  toolName: "claude_code",
32
32
  description: "Run Claude Code CLI (claude -p) with a prompt.",
33
- buildArgs: (prompt) => ["-p", prompt],
33
+ buildArgs: (prompt) => ["--no-session-persistence", "-p", prompt],
34
34
  extraProperties: {},
35
35
  },
36
36
  gemini: {
@@ -52,11 +52,7 @@ const CLI_BACKENDS = {
52
52
  },
53
53
  },
54
54
  codex: {
55
- command: "codex",
56
- toolName: "codex",
57
- description: "Run Codex CLI (codex exec) with a prompt.",
58
- buildArgs: (prompt) => ["exec", prompt],
59
- extraProperties: {},
55
+ passthrough: true,
60
56
  },
61
57
  };
62
58
 
@@ -93,19 +89,23 @@ function printHelp() {
93
89
  Usage: mcp-agents [options]
94
90
 
95
91
  Options:
96
- --provider <name> CLI backend to use (${providers}) [default: codex]
97
- --help, -h Show this help message
98
- --version, -v Show version number`);
92
+ --provider <name> CLI backend to use (${providers}) [default: codex]
93
+ --model <model> Model to use (codex) [default: gpt-5.2-codex]
94
+ --model_reasoning_effort <e> Reasoning effort (codex) [default: high]
95
+ --help, -h Show this help message
96
+ --version, -v Show version number`);
99
97
  }
100
98
 
101
99
  /**
102
100
  * Parse CLI flags from process.argv.
103
- * Handles --help, --version, --provider, and unknown flags.
104
- * @returns {string | null} Provider name, or null if the process should exit.
101
+ * Handles --help, --version, --provider, --model, --model_reasoning_effort, and unknown flags.
102
+ * @returns {{ provider: string, model?: string, modelReasoningEffort?: string }}
105
103
  */
106
104
  function parseArgs() {
107
105
  const args = process.argv.slice(2);
108
106
  let provider = "codex";
107
+ let model;
108
+ let modelReasoningEffort;
109
109
 
110
110
  for (let i = 0; i < args.length; i++) {
111
111
  switch (args[i]) {
@@ -126,13 +126,29 @@ function parseArgs() {
126
126
  }
127
127
  provider = args[++i];
128
128
  break;
129
+ case "--model":
130
+ if (i + 1 >= args.length) {
131
+ process.stderr.write("error: --model requires a value\n");
132
+ process.exit(1);
133
+ }
134
+ model = args[++i];
135
+ break;
136
+ case "--model_reasoning_effort":
137
+ if (i + 1 >= args.length) {
138
+ process.stderr.write(
139
+ "error: --model_reasoning_effort requires a value\n",
140
+ );
141
+ process.exit(1);
142
+ }
143
+ modelReasoningEffort = args[++i];
144
+ break;
129
145
  default:
130
146
  process.stderr.write(`error: unknown option: ${args[i]}\n`);
131
147
  process.exit(1);
132
148
  }
133
149
  }
134
150
 
135
- return provider;
151
+ return { provider, model, modelReasoningEffort };
136
152
  }
137
153
 
138
154
  /**
@@ -183,12 +199,43 @@ function runCli(command, args, opts = {}) {
183
199
  });
184
200
  }
185
201
 
202
+ /**
203
+ * Spawn codex mcp-server as a pass-through, piping stdio directly.
204
+ * @param {{ model?: string, modelReasoningEffort?: string }} opts
205
+ */
206
+ function runCodexPassthrough({ model, modelReasoningEffort }) {
207
+ const args = [
208
+ "-m",
209
+ model || "gpt-5.2-codex",
210
+ "-s",
211
+ "read-only",
212
+ "-a",
213
+ "never",
214
+ "-c",
215
+ `model_reasoning_effort=${modelReasoningEffort || "high"}`,
216
+ "mcp-server",
217
+ ];
218
+
219
+ logErr(`[mcp-agents] passthrough: codex ${args.join(" ")}`);
220
+
221
+ const child = spawn("codex", args, { stdio: "inherit" });
222
+
223
+ child.on("error", (err) => {
224
+ logErr(`[mcp-agents] failed to start codex: ${err.message}`);
225
+ process.exitCode = 1;
226
+ });
227
+
228
+ child.on("exit", (code) => {
229
+ process.exitCode = code ?? 1;
230
+ });
231
+ }
232
+
186
233
  // ---------------------------------------------------------------------------
187
234
  // Main
188
235
  // ---------------------------------------------------------------------------
189
236
 
190
237
  async function main() {
191
- const providerName = parseArgs();
238
+ const { provider: providerName, model, modelReasoningEffort } = parseArgs();
192
239
  const backend = CLI_BACKENDS[providerName];
193
240
 
194
241
  if (!backend) {
@@ -200,6 +247,11 @@ async function main() {
200
247
  return;
201
248
  }
202
249
 
250
+ if (backend.passthrough) {
251
+ runCodexPassthrough({ model, modelReasoningEffort });
252
+ return;
253
+ }
254
+
203
255
  const server = new Server(
204
256
  { name: "mcp-agents", version: VERSION },
205
257
  { capabilities: { tools: {} } },