moonbridge 0.5.2__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
moonbridge/__init__.py CHANGED
@@ -1,8 +1,8 @@
1
- """MCP server for spawning Kimi K2.5 agents."""
1
+ """MCP server for spawning AI coding agents."""
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- __version__ = "0.5.2"
5
+ __version__ = "0.7.0"
6
6
 
7
7
  from .server import main, run, server
8
8
 
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass
2
- from typing import Protocol
2
+ from typing import Any, Protocol
3
3
 
4
4
 
5
5
  @dataclass(frozen=True)
@@ -18,6 +18,35 @@ class AdapterConfig:
18
18
  default_timeout: int = 600
19
19
 
20
20
 
21
+ @dataclass(frozen=True)
22
+ class AgentResult:
23
+ """Agent execution result."""
24
+
25
+ status: str
26
+ output: str
27
+ stderr: str | None
28
+ returncode: int
29
+ duration_ms: int
30
+ agent_index: int
31
+ message: str | None = None
32
+ raw: dict[str, Any] | None = None
33
+
34
+ def to_dict(self) -> dict[str, Any]:
35
+ payload: dict[str, Any] = {
36
+ "status": self.status,
37
+ "output": self.output,
38
+ "stderr": self.stderr,
39
+ "returncode": self.returncode,
40
+ "duration_ms": self.duration_ms,
41
+ "agent_index": self.agent_index,
42
+ }
43
+ if self.message is not None:
44
+ payload["message"] = self.message
45
+ if self.raw is not None:
46
+ payload["raw"] = self.raw
47
+ return payload
48
+
49
+
21
50
  class CLIAdapter(Protocol):
22
51
  """Protocol for CLI backend adapters."""
23
52
 
@@ -55,6 +55,7 @@ class CodexAdapter:
55
55
  "gpt-5.1-codex-mini",
56
56
  "gpt-5.1-codex-max",
57
57
  ),
58
+ default_timeout=1800, # 30 minutes - Codex runs long
58
59
  )
59
60
 
60
61
  def build_command(
@@ -38,6 +38,7 @@ class KimiAdapter:
38
38
  install_hint="uv tool install kimi-cli",
39
39
  supports_thinking=True,
40
40
  known_models=("kimi-k2.5",),
41
+ default_timeout=600, # 10 minutes - Kimi is faster
41
42
  )
42
43
 
43
44
  def build_command(
moonbridge/sandbox.py ADDED
@@ -0,0 +1,252 @@
1
+ """Copy-on-run sandbox for agent execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import difflib
6
+ import os
7
+ import shutil
8
+ import tempfile
9
+ import time
10
+ from collections.abc import Callable, Iterator
11
+ from dataclasses import dataclass, replace
12
+ from pathlib import Path
13
+
14
+ from moonbridge.adapters.base import AgentResult
15
+
16
+ SANDBOX_IGNORE_DIRS = {
17
+ ".git",
18
+ ".venv",
19
+ ".tox",
20
+ "__pycache__",
21
+ ".mypy_cache",
22
+ ".pytest_cache",
23
+ ".ruff_cache",
24
+ "node_modules",
25
+ "dist",
26
+ "build",
27
+ }
28
+ SANDBOX_IGNORE_FILES = {".DS_Store"}
29
+ MAX_COPY_BYTES = 500 * 1024 * 1024
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class SandboxResult:
34
+ diff: str
35
+ summary: dict[str, int]
36
+ truncated: bool
37
+ sandbox_path: str | None
38
+
39
+
40
+ def _should_ignore(name: str) -> bool:
41
+ if name in SANDBOX_IGNORE_DIRS:
42
+ return True
43
+ if name in SANDBOX_IGNORE_FILES:
44
+ return True
45
+ return name.endswith((".pyc", ".pyo"))
46
+
47
+
48
+ def _ignore_names(_dirpath: str, names: list[str]) -> set[str]:
49
+ return {name for name in names if _should_ignore(name)}
50
+
51
+
52
+ def _filtered_walk(root: str) -> Iterator[tuple[str, list[str], list[str]]]:
53
+ for dirpath, dirnames, filenames in os.walk(root):
54
+ dirnames[:] = [d for d in dirnames if not _should_ignore(d)]
55
+ filenames = [f for f in filenames if not _should_ignore(f)]
56
+ yield dirpath, dirnames, filenames
57
+
58
+
59
+ def _collect_files(root: str) -> set[str]:
60
+ files: set[str] = set()
61
+ for dirpath, _dirnames, filenames in _filtered_walk(root):
62
+ rel_dir = os.path.relpath(dirpath, root)
63
+ for filename in filenames:
64
+ rel_path = filename if rel_dir == "." else os.path.join(rel_dir, filename)
65
+ files.add(rel_path)
66
+ return files
67
+
68
+
69
+ def _read_text(path: str) -> str | None:
70
+ data = Path(path).read_bytes()
71
+ try:
72
+ return data.decode("utf-8")
73
+ except UnicodeDecodeError:
74
+ return None
75
+
76
+
77
+ def _diff_trees(
78
+ original: str,
79
+ sandbox: str,
80
+ max_bytes: int,
81
+ ) -> tuple[str, dict[str, int], bool]:
82
+ original_files = _collect_files(original)
83
+ sandbox_files = _collect_files(sandbox)
84
+ all_files = sorted(original_files | sandbox_files)
85
+ diff_chunks: list[str] = []
86
+ size = 0
87
+ truncated = False
88
+ summary = {"added": 0, "modified": 0, "deleted": 0, "binary": 0}
89
+
90
+ def append_chunk(chunk: str) -> None:
91
+ nonlocal size, truncated
92
+ if truncated or not chunk:
93
+ return
94
+ remaining = max_bytes - size
95
+ if remaining <= 0:
96
+ truncated = True
97
+ return
98
+ if len(chunk) > remaining:
99
+ diff_chunks.append(chunk[:remaining])
100
+ truncated = True
101
+ size = max_bytes
102
+ return
103
+ diff_chunks.append(chunk)
104
+ size += len(chunk)
105
+
106
+ for rel_path in all_files:
107
+ original_path = os.path.join(original, rel_path)
108
+ sandbox_path = os.path.join(sandbox, rel_path)
109
+ original_exists = os.path.exists(original_path)
110
+ sandbox_exists = os.path.exists(sandbox_path)
111
+
112
+ if not original_exists and sandbox_exists:
113
+ summary["added"] += 1
114
+ sandbox_text = _read_text(sandbox_path)
115
+ if sandbox_text is None:
116
+ summary["binary"] += 1
117
+ append_chunk(f"Binary files /dev/null and b/{rel_path} differ\n")
118
+ continue
119
+ diff = difflib.unified_diff(
120
+ [],
121
+ sandbox_text.splitlines(keepends=True),
122
+ fromfile="/dev/null",
123
+ tofile=f"b/{rel_path}",
124
+ )
125
+ append_chunk("".join(diff))
126
+ continue
127
+
128
+ if original_exists and not sandbox_exists:
129
+ summary["deleted"] += 1
130
+ original_text = _read_text(original_path)
131
+ if original_text is None:
132
+ summary["binary"] += 1
133
+ append_chunk(f"Binary files a/{rel_path} and /dev/null differ\n")
134
+ continue
135
+ diff = difflib.unified_diff(
136
+ original_text.splitlines(keepends=True),
137
+ [],
138
+ fromfile=f"a/{rel_path}",
139
+ tofile="/dev/null",
140
+ )
141
+ append_chunk("".join(diff))
142
+ continue
143
+
144
+ if not original_exists or not sandbox_exists:
145
+ continue
146
+
147
+ original_bytes = Path(original_path).read_bytes()
148
+ sandbox_bytes = Path(sandbox_path).read_bytes()
149
+ if original_bytes == sandbox_bytes:
150
+ continue
151
+
152
+ original_text = None
153
+ sandbox_text = None
154
+ try:
155
+ original_text = original_bytes.decode("utf-8")
156
+ sandbox_text = sandbox_bytes.decode("utf-8")
157
+ except UnicodeDecodeError:
158
+ summary["binary"] += 1
159
+ append_chunk(f"Binary files a/{rel_path} and b/{rel_path} differ\n")
160
+ continue
161
+
162
+ summary["modified"] += 1
163
+ diff = difflib.unified_diff(
164
+ original_text.splitlines(keepends=True),
165
+ sandbox_text.splitlines(keepends=True),
166
+ fromfile=f"a/{rel_path}",
167
+ tofile=f"b/{rel_path}",
168
+ )
169
+ append_chunk("".join(diff))
170
+
171
+ if truncated:
172
+ diff_chunks.append("\n... diff truncated ...\n")
173
+ return ("".join(diff_chunks), summary, truncated)
174
+
175
+
176
+ def _estimate_copy_size(root: str, max_bytes: int) -> int:
177
+ total = 0
178
+ for dirpath, _dirnames, filenames in _filtered_walk(root):
179
+ for filename in filenames:
180
+ path = os.path.join(dirpath, filename)
181
+ total += os.path.getsize(path)
182
+ if total > max_bytes:
183
+ return total
184
+ return total
185
+
186
+
187
+ def _agent_index(fn: Callable[[str], AgentResult]) -> int:
188
+ value = getattr(fn, "agent_index", 0)
189
+ return value if isinstance(value, int) else 0
190
+
191
+
192
+ def run_sandboxed(
193
+ fn: Callable[[str], AgentResult],
194
+ cwd: str,
195
+ *,
196
+ max_diff_bytes: int = 500_000,
197
+ max_copy_bytes: int = MAX_COPY_BYTES,
198
+ keep: bool = False,
199
+ ) -> tuple[AgentResult, SandboxResult | None]:
200
+ """Run fn in a copy of cwd. Returns (agent_result, sandbox_result).
201
+
202
+ On sandbox infrastructure error, returns (error_result, None).
203
+ """
204
+ start = time.monotonic()
205
+ sandbox_root: str | None = None
206
+ agent_index = _agent_index(fn)
207
+
208
+ def error_result(reason: str) -> AgentResult:
209
+ duration_ms = int((time.monotonic() - start) * 1000)
210
+ return AgentResult(
211
+ status="error",
212
+ output="",
213
+ stderr=f"sandbox error: {reason}",
214
+ returncode=-1,
215
+ duration_ms=duration_ms,
216
+ agent_index=agent_index,
217
+ )
218
+
219
+ try:
220
+ total_bytes = _estimate_copy_size(cwd, max_copy_bytes)
221
+ if total_bytes > max_copy_bytes:
222
+ return error_result(
223
+ f"copy size {total_bytes} exceeds max {max_copy_bytes}"
224
+ ), None
225
+
226
+ sandbox_root = tempfile.mkdtemp(prefix="moonbridge-sandbox-")
227
+ sandbox_cwd = os.path.join(sandbox_root, "workspace")
228
+ shutil.copytree(cwd, sandbox_cwd, symlinks=False, ignore=_ignore_names)
229
+
230
+ result = fn(sandbox_cwd)
231
+
232
+ try:
233
+ diff, summary, truncated = _diff_trees(cwd, sandbox_cwd, max_diff_bytes)
234
+ sandbox_result = SandboxResult(
235
+ diff=diff,
236
+ summary=summary,
237
+ truncated=truncated,
238
+ sandbox_path=sandbox_root if keep else None,
239
+ )
240
+ return result, sandbox_result
241
+ except Exception as exc:
242
+ raw = dict(result.raw or {})
243
+ sandbox_payload: dict[str, object] = {"enabled": True, "error": str(exc)}
244
+ if keep:
245
+ sandbox_payload["path"] = sandbox_root
246
+ raw["sandbox"] = sandbox_payload
247
+ return replace(result, raw=raw), None
248
+ except Exception as exc:
249
+ return error_result(str(exc)), None
250
+ finally:
251
+ if not keep and sandbox_root:
252
+ shutil.rmtree(sandbox_root, ignore_errors=True)
moonbridge/server.py CHANGED
@@ -1,4 +1,4 @@
1
- """MCP server for spawning Kimi K2.5 agents."""
1
+ """MCP server for spawning AI coding agents."""
2
2
 
3
3
  from __future__ import annotations
4
4
 
@@ -11,6 +11,7 @@ import signal
11
11
  import sys
12
12
  import time
13
13
  import weakref
14
+ from dataclasses import replace
14
15
  from subprocess import PIPE, Popen, TimeoutExpired
15
16
  from typing import Any
16
17
 
@@ -19,6 +20,8 @@ from mcp.server.stdio import stdio_server
19
20
  from mcp.types import TextContent, Tool
20
21
 
21
22
  from moonbridge.adapters import ADAPTER_REGISTRY, CLIAdapter, get_adapter
23
+ from moonbridge.adapters.base import AgentResult
24
+ from moonbridge.tools import build_tools
22
25
 
23
26
  server = Server("moonbridge")
24
27
 
@@ -34,6 +37,17 @@ ALLOWED_DIRS = [
34
37
  if path
35
38
  ]
36
39
  MAX_PROMPT_LENGTH = 100_000
40
+ _SANDBOX_ENV = os.environ.get("MOONBRIDGE_SANDBOX", "").strip().lower()
41
+ SANDBOX_MODE = _SANDBOX_ENV in {"1", "true", "yes", "copy"}
42
+ SANDBOX_KEEP = os.environ.get("MOONBRIDGE_SANDBOX_KEEP", "").strip().lower() in {
43
+ "1",
44
+ "true",
45
+ "yes",
46
+ }
47
+ SANDBOX_MAX_DIFF_BYTES = int(os.environ.get("MOONBRIDGE_SANDBOX_MAX_DIFF", "500000"))
48
+ SANDBOX_MAX_COPY_BYTES = int(
49
+ os.environ.get("MOONBRIDGE_SANDBOX_MAX_COPY", str(500 * 1024 * 1024))
50
+ )
37
51
 
38
52
  _active_processes: set[weakref.ref[Popen[str]]] = set()
39
53
 
@@ -71,8 +85,20 @@ def _safe_env(adapter: CLIAdapter) -> dict[str, str]:
71
85
  return env
72
86
 
73
87
 
74
- def _validate_timeout(timeout_seconds: int | None) -> int:
75
- value = DEFAULT_TIMEOUT if timeout_seconds is None else int(timeout_seconds)
88
+ def _resolve_timeout(adapter: CLIAdapter, timeout_seconds: int | None) -> int:
89
+ """Resolve timeout: explicit > adapter-env > adapter-default > global."""
90
+ if timeout_seconds is not None:
91
+ value = int(timeout_seconds)
92
+ else:
93
+ # Check adapter-specific env var first
94
+ env_key = f"MOONBRIDGE_{adapter.config.name.upper()}_TIMEOUT"
95
+ if env_val := os.environ.get(env_key):
96
+ value = int(env_val)
97
+ elif adapter.config.default_timeout != 600:
98
+ # Use adapter default if explicitly set (not the base default)
99
+ value = adapter.config.default_timeout
100
+ else:
101
+ value = DEFAULT_TIMEOUT
76
102
  if value < 30 or value > 3600:
77
103
  raise ValueError("timeout_seconds must be between 30 and 3600")
78
104
  return value
@@ -180,27 +206,51 @@ def _auth_error(stderr: str | None, adapter: CLIAdapter) -> bool:
180
206
  return any(pattern in lowered for pattern in adapter.config.auth_patterns)
181
207
 
182
208
 
183
- def _result(
184
- *,
185
- status: str,
186
- output: str,
187
- stderr: str | None,
188
- returncode: int,
189
- duration_ms: int,
209
+ def _run_cli_sandboxed(
210
+ adapter: CLIAdapter,
211
+ prompt: str,
212
+ thinking: bool,
213
+ cwd: str,
214
+ timeout_seconds: int,
190
215
  agent_index: int,
191
- message: str | None = None,
192
- ) -> dict[str, Any]:
193
- payload: dict[str, Any] = {
194
- "status": status,
195
- "output": output,
196
- "stderr": stderr,
197
- "returncode": returncode,
198
- "duration_ms": duration_ms,
199
- "agent_index": agent_index,
200
- }
201
- if message is not None:
202
- payload["message"] = message
203
- return payload
216
+ model: str | None = None,
217
+ reasoning_effort: str | None = None,
218
+ ) -> AgentResult:
219
+ from moonbridge.sandbox import run_sandboxed
220
+
221
+ def run_agent(sandbox_cwd: str) -> AgentResult:
222
+ return _run_cli_sync(
223
+ adapter,
224
+ prompt,
225
+ thinking,
226
+ sandbox_cwd,
227
+ timeout_seconds,
228
+ agent_index,
229
+ model,
230
+ reasoning_effort,
231
+ )
232
+
233
+ run_agent.agent_index = agent_index # type: ignore[attr-defined]
234
+
235
+ result, sandbox_result = run_sandboxed(
236
+ run_agent,
237
+ cwd,
238
+ max_diff_bytes=SANDBOX_MAX_DIFF_BYTES,
239
+ max_copy_bytes=SANDBOX_MAX_COPY_BYTES,
240
+ keep=SANDBOX_KEEP,
241
+ )
242
+ if sandbox_result:
243
+ raw = dict(result.raw or {})
244
+ raw["sandbox"] = {
245
+ "enabled": True,
246
+ "summary": sandbox_result.summary,
247
+ "diff": sandbox_result.diff,
248
+ "truncated": sandbox_result.truncated,
249
+ }
250
+ if sandbox_result.sandbox_path:
251
+ raw["sandbox"]["path"] = sandbox_result.sandbox_path
252
+ return replace(result, raw=raw)
253
+ return result
204
254
 
205
255
 
206
256
  def _run_cli_sync(
@@ -212,7 +262,7 @@ def _run_cli_sync(
212
262
  agent_index: int,
213
263
  model: str | None = None,
214
264
  reasoning_effort: str | None = None,
215
- ) -> dict[str, Any]:
265
+ ) -> AgentResult:
216
266
  start = time.monotonic()
217
267
  cmd = adapter.build_command(prompt, thinking, model, reasoning_effort)
218
268
  logger.debug("Spawning agent with prompt: %s...", prompt[:100])
@@ -229,7 +279,7 @@ def _run_cli_sync(
229
279
  except FileNotFoundError:
230
280
  duration_ms = int((time.monotonic() - start) * 1000)
231
281
  logger.error("%s CLI not found or not executable", adapter.config.name)
232
- return _result(
282
+ return AgentResult(
233
283
  status="error",
234
284
  output="",
235
285
  stderr=f"{adapter.config.name} CLI not found or not executable",
@@ -240,7 +290,7 @@ def _run_cli_sync(
240
290
  except PermissionError as exc:
241
291
  duration_ms = int((time.monotonic() - start) * 1000)
242
292
  logger.error("Permission denied starting process: %s", exc)
243
- return _result(
293
+ return AgentResult(
244
294
  status="error",
245
295
  output="",
246
296
  stderr=f"Permission denied: {exc}",
@@ -251,7 +301,7 @@ def _run_cli_sync(
251
301
  except OSError as exc:
252
302
  duration_ms = int((time.monotonic() - start) * 1000)
253
303
  logger.error("Failed to start process: %s", exc)
254
- return _result(
304
+ return AgentResult(
255
305
  status="error",
256
306
  output="",
257
307
  stderr=f"Failed to start process: {exc}",
@@ -266,7 +316,7 @@ def _run_cli_sync(
266
316
  stderr_value = stderr or None
267
317
  if _auth_error(stderr_value, adapter):
268
318
  logger.info("Agent %s completed with status: auth_error", agent_index)
269
- return _result(
319
+ return AgentResult(
270
320
  status="auth_error",
271
321
  output=stdout,
272
322
  stderr=stderr_value,
@@ -277,7 +327,7 @@ def _run_cli_sync(
277
327
  )
278
328
  status = "success" if proc.returncode == 0 else "error"
279
329
  logger.info("Agent %s completed with status: %s", agent_index, status)
280
- return _result(
330
+ return AgentResult(
281
331
  status=status,
282
332
  output=stdout,
283
333
  stderr=stderr_value,
@@ -289,7 +339,7 @@ def _run_cli_sync(
289
339
  _terminate_process(proc)
290
340
  duration_ms = int((time.monotonic() - start) * 1000)
291
341
  logger.warning("Agent %s timed out after %s seconds", agent_index, timeout_seconds)
292
- return _result(
342
+ return AgentResult(
293
343
  status="timeout",
294
344
  output="",
295
345
  stderr=None,
@@ -301,7 +351,7 @@ def _run_cli_sync(
301
351
  _terminate_process(proc)
302
352
  duration_ms = int((time.monotonic() - start) * 1000)
303
353
  logger.error("Agent %s failed with error: %s", agent_index, exc)
304
- return _result(
354
+ return AgentResult(
305
355
  status="error",
306
356
  output="",
307
357
  stderr=str(exc),
@@ -313,6 +363,39 @@ def _run_cli_sync(
313
363
  _untrack_process(proc)
314
364
 
315
365
 
366
+ def _run_cli(
367
+ adapter: CLIAdapter,
368
+ prompt: str,
369
+ thinking: bool,
370
+ cwd: str,
371
+ timeout_seconds: int,
372
+ agent_index: int,
373
+ model: str | None = None,
374
+ reasoning_effort: str | None = None,
375
+ ) -> AgentResult:
376
+ if SANDBOX_MODE:
377
+ return _run_cli_sandboxed(
378
+ adapter,
379
+ prompt,
380
+ thinking,
381
+ cwd,
382
+ timeout_seconds,
383
+ agent_index,
384
+ model,
385
+ reasoning_effort,
386
+ )
387
+ return _run_cli_sync(
388
+ adapter,
389
+ prompt,
390
+ thinking,
391
+ cwd,
392
+ timeout_seconds,
393
+ agent_index,
394
+ model,
395
+ reasoning_effort,
396
+ )
397
+
398
+
316
399
  def _json_text(payload: Any) -> list[TextContent]:
317
400
  return [TextContent(type="text", text=json.dumps(payload, ensure_ascii=True))]
318
401
 
@@ -328,14 +411,18 @@ def _status_check(cwd: str, adapter: CLIAdapter) -> dict[str, Any]:
328
411
  }
329
412
  timeout = min(DEFAULT_TIMEOUT, 60)
330
413
  result = _run_cli_sync(adapter, "status check", False, cwd, timeout, 0)
331
- if result["status"] == "auth_error":
414
+ if result.status == "auth_error":
332
415
  return {"status": "auth_error", "message": adapter.config.auth_message}
333
- if result["status"] == "success":
416
+ if result.status == "success":
334
417
  return {
335
418
  "status": "success",
336
419
  "message": f"{adapter.config.name} CLI available and authenticated",
337
420
  }
338
- return {"status": "error", "message": f"{adapter.config.name} CLI error", "details": result}
421
+ return {
422
+ "status": "error",
423
+ "message": f"{adapter.config.name} CLI error",
424
+ "details": result.to_dict(),
425
+ }
339
426
 
340
427
 
341
428
  def _adapter_info(cwd: str, adapter: CLIAdapter) -> dict[str, Any]:
@@ -344,7 +431,7 @@ def _adapter_info(cwd: str, adapter: CLIAdapter) -> dict[str, Any]:
344
431
  if installed:
345
432
  timeout = min(DEFAULT_TIMEOUT, 60)
346
433
  result = _run_cli_sync(adapter, "status check", False, cwd, timeout, 0)
347
- authenticated = result["status"] == "success"
434
+ authenticated = result.status == "success"
348
435
  return {
349
436
  "name": adapter.config.name,
350
437
  "description": adapter.config.tool_description,
@@ -359,112 +446,13 @@ def _adapter_info(cwd: str, adapter: CLIAdapter) -> dict[str, Any]:
359
446
  async def list_tools() -> list[Tool]:
360
447
  adapter = get_adapter()
361
448
  tool_desc = adapter.config.tool_description
362
- parallel_desc = f"{tool_desc} Run multiple agents in parallel."
363
449
  status_desc = f"Verify {adapter.config.name} CLI is installed and authenticated"
364
- adapter_schema = {
365
- "type": "string",
366
- "enum": list(ADAPTER_REGISTRY.keys()),
367
- "description": "Backend to use (kimi, codex). Defaults to MOONBRIDGE_ADAPTER env or kimi.",
368
- }
369
- return [
370
- Tool(
371
- name="spawn_agent",
372
- description=tool_desc,
373
- inputSchema={
374
- "type": "object",
375
- "properties": {
376
- "prompt": {
377
- "type": "string",
378
- "description": "Instructions for the agent (task, context, constraints)",
379
- },
380
- "adapter": adapter_schema,
381
- "thinking": {
382
- "type": "boolean",
383
- "description": "Enable extended reasoning mode for complex tasks",
384
- "default": False,
385
- },
386
- "timeout_seconds": {
387
- "type": "integer",
388
- "description": "Max execution time (30-3600s)",
389
- "default": DEFAULT_TIMEOUT,
390
- "minimum": 30,
391
- "maximum": 3600,
392
- },
393
- "model": {
394
- "type": "string",
395
- "description": (
396
- "Model to use (e.g., 'gpt-5.2-codex', 'kimi-k2.5'). "
397
- "Falls back to MOONBRIDGE_{ADAPTER}_MODEL or MOONBRIDGE_MODEL env vars."
398
- ),
399
- },
400
- "reasoning_effort": {
401
- "type": "string",
402
- "enum": ["low", "medium", "high", "xhigh"],
403
- "description": (
404
- "Reasoning effort for Codex (low, medium, high, xhigh). "
405
- "Ignored for Kimi (use thinking instead)."
406
- ),
407
- },
408
- },
409
- "required": ["prompt"],
410
- },
411
- ),
412
- Tool(
413
- name="spawn_agents_parallel",
414
- description=parallel_desc,
415
- inputSchema={
416
- "type": "object",
417
- "properties": {
418
- "agents": {
419
- "type": "array",
420
- "description": "List of agent specs with prompt and optional settings",
421
- "items": {
422
- "type": "object",
423
- "properties": {
424
- "prompt": {"type": "string"},
425
- "adapter": adapter_schema,
426
- "thinking": {"type": "boolean", "default": False},
427
- "timeout_seconds": {
428
- "type": "integer",
429
- "description": "Max execution time (30-3600s)",
430
- "default": DEFAULT_TIMEOUT,
431
- "minimum": 30,
432
- "maximum": 3600,
433
- },
434
- "model": {
435
- "type": "string",
436
- "description": (
437
- "Model to use. Falls back to "
438
- "MOONBRIDGE_{ADAPTER}_MODEL or MOONBRIDGE_MODEL env vars."
439
- ),
440
- },
441
- "reasoning_effort": {
442
- "type": "string",
443
- "enum": ["low", "medium", "high", "xhigh"],
444
- "description": (
445
- "Reasoning effort for Codex (low, medium, high, xhigh). "
446
- "Ignored for Kimi."
447
- ),
448
- },
449
- },
450
- "required": ["prompt"],
451
- },
452
- },
453
- },
454
- "required": ["agents"],
455
- },
456
- ),
457
- Tool(
458
- name="list_adapters",
459
- description="List available adapters and their status",
460
- inputSchema={"type": "object", "properties": {}},
461
- ),
462
- Tool(
463
- name="check_status",
464
- description=status_desc,
465
- inputSchema={"type": "object", "properties": {}},
466
- ),
467
- ]
450
+ return build_tools(
451
+ adapter_names=tuple(ADAPTER_REGISTRY.keys()),
452
+ default_timeout=DEFAULT_TIMEOUT,
453
+ tool_description=tool_desc,
454
+ status_description=status_desc,
455
+ )
468
456
 
469
457
 
470
458
  async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
@@ -475,14 +463,14 @@ async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]
475
463
  adapter = get_adapter(arguments.get("adapter"))
476
464
  prompt = _validate_prompt(arguments["prompt"])
477
465
  thinking = _validate_thinking(adapter, bool(arguments.get("thinking", False)))
478
- timeout_seconds = _validate_timeout(arguments.get("timeout_seconds"))
466
+ timeout_seconds = _resolve_timeout(adapter, arguments.get("timeout_seconds"))
479
467
  model = _resolve_model(adapter, arguments.get("model"))
480
468
  reasoning_effort = arguments.get("reasoning_effort")
481
469
  loop = asyncio.get_running_loop()
482
470
  try:
483
471
  result = await loop.run_in_executor(
484
472
  None,
485
- _run_cli_sync,
473
+ _run_cli,
486
474
  adapter,
487
475
  prompt,
488
476
  thinking,
@@ -494,16 +482,16 @@ async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]
494
482
  )
495
483
  except asyncio.CancelledError:
496
484
  return _json_text(
497
- _result(
485
+ AgentResult(
498
486
  status="cancelled",
499
487
  output="",
500
488
  stderr=None,
501
489
  returncode=-1,
502
490
  duration_ms=0,
503
491
  agent_index=0,
504
- )
492
+ ).to_dict()
505
493
  )
506
- return _json_text(result)
494
+ return _json_text(result.to_dict())
507
495
 
508
496
  if name == "spawn_agents_parallel":
509
497
  agents = list(arguments["agents"])
@@ -520,12 +508,12 @@ async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]
520
508
  tasks.append(
521
509
  loop.run_in_executor(
522
510
  None,
523
- _run_cli_sync,
511
+ _run_cli,
524
512
  adapter,
525
513
  prompt,
526
514
  thinking,
527
515
  cwd,
528
- _validate_timeout(spec.get("timeout_seconds")),
516
+ _resolve_timeout(adapter, spec.get("timeout_seconds")),
529
517
  idx,
530
518
  model,
531
519
  reasoning_effort,
@@ -535,7 +523,7 @@ async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]
535
523
  results = await asyncio.gather(*tasks)
536
524
  except asyncio.CancelledError:
537
525
  cancelled = [
538
- _result(
526
+ AgentResult(
539
527
  status="cancelled",
540
528
  output="",
541
529
  stderr=None,
@@ -545,9 +533,9 @@ async def handle_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]
545
533
  )
546
534
  for idx in range(len(agents))
547
535
  ]
548
- return _json_text(cancelled)
549
- results.sort(key=lambda item: item["agent_index"])
550
- return _json_text(results)
536
+ return _json_text([item.to_dict() for item in cancelled])
537
+ results.sort(key=lambda item: item.agent_index)
538
+ return _json_text([item.to_dict() for item in results])
551
539
 
552
540
  if name == "list_adapters":
553
541
  info = [_adapter_info(cwd, adapter) for adapter in ADAPTER_REGISTRY.values()]
moonbridge/tools.py ADDED
@@ -0,0 +1,334 @@
1
+ """Tool schema definitions for Moonbridge MCP server.
2
+
3
+ This module provides dataclasses and functions for defining MCP tool schemas
4
+ in a reusable, type-safe manner.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import dataclass
10
+ from typing import Any
11
+
12
+ from mcp.types import Tool
13
+
14
+
15
+ @dataclass(frozen=True)
16
+ class ParameterDef:
17
+ """Definition for a JSON Schema parameter."""
18
+
19
+ type: str # "string", "integer", "boolean", "array"
20
+ description: str
21
+ default: Any = None
22
+ enum: tuple[str, ...] | None = None
23
+ minimum: int | None = None
24
+ maximum: int | None = None
25
+ items: dict[str, Any] | None = None # For array types
26
+
27
+
28
+ @dataclass(frozen=True)
29
+ class ToolDef:
30
+ """Definition for an MCP tool."""
31
+
32
+ name: str
33
+ description_template: str # May contain {adapter} placeholder
34
+ parameters: tuple[tuple[str, ParameterDef], ...] # Ordered (name, param) pairs
35
+ required: tuple[str, ...] = ()
36
+
37
+
38
+ # =============================================================================
39
+ # Reusable parameter definitions
40
+ # =============================================================================
41
+
42
+ PROMPT_PARAM = ParameterDef(
43
+ type="string",
44
+ description="Instructions for the agent (task, context, constraints)",
45
+ )
46
+
47
+ # Note: ADAPTER_PARAM enum is populated dynamically via build_adapter_param()
48
+ ADAPTER_PARAM_BASE = ParameterDef(
49
+ type="string",
50
+ description="Backend to use (kimi, codex). Defaults to MOONBRIDGE_ADAPTER env or kimi.",
51
+ # enum is set dynamically
52
+ )
53
+
54
+ THINKING_PARAM = ParameterDef(
55
+ type="boolean",
56
+ description="Enable extended reasoning mode for complex tasks",
57
+ default=False,
58
+ )
59
+
60
+ # Note: TIMEOUT_PARAM default is populated dynamically
61
+ TIMEOUT_PARAM_BASE = ParameterDef(
62
+ type="integer",
63
+ description=(
64
+ "Max execution time (30-3600s). "
65
+ "Defaults: Codex=1800s (30min), Kimi=600s (10min). "
66
+ "Complex implementations may need full 30min+."
67
+ ),
68
+ minimum=30,
69
+ maximum=3600,
70
+ # default is set dynamically
71
+ )
72
+
73
+ MODEL_PARAM = ParameterDef(
74
+ type="string",
75
+ description=(
76
+ "Model to use (e.g., 'gpt-5.2-codex', 'kimi-k2.5'). "
77
+ "Falls back to MOONBRIDGE_{ADAPTER}_MODEL or MOONBRIDGE_MODEL env vars."
78
+ ),
79
+ )
80
+
81
+ # Shorter model description for nested items
82
+ MODEL_PARAM_SHORT = ParameterDef(
83
+ type="string",
84
+ description=(
85
+ "Model to use. Falls back to "
86
+ "MOONBRIDGE_{ADAPTER}_MODEL or MOONBRIDGE_MODEL env vars."
87
+ ),
88
+ )
89
+
90
+ REASONING_EFFORT_PARAM = ParameterDef(
91
+ type="string",
92
+ description=(
93
+ "Reasoning effort for Codex (low, medium, high, xhigh). "
94
+ "Ignored for Kimi (use thinking instead)."
95
+ ),
96
+ enum=("low", "medium", "high", "xhigh"),
97
+ )
98
+
99
+ # Shorter reasoning_effort description for nested items
100
+ REASONING_EFFORT_PARAM_SHORT = ParameterDef(
101
+ type="string",
102
+ description=(
103
+ "Reasoning effort for Codex (low, medium, high, xhigh). "
104
+ "Ignored for Kimi."
105
+ ),
106
+ enum=("low", "medium", "high", "xhigh"),
107
+ )
108
+
109
+
110
+ # =============================================================================
111
+ # Helper functions for dynamic parameter creation
112
+ # =============================================================================
113
+
114
+
115
+ def _build_adapter_param(adapter_names: tuple[str, ...]) -> ParameterDef:
116
+ """Create adapter parameter with dynamic enum."""
117
+ return ParameterDef(
118
+ type="string",
119
+ description=ADAPTER_PARAM_BASE.description,
120
+ enum=adapter_names,
121
+ )
122
+
123
+
124
+ def _build_timeout_param(default_timeout: int) -> ParameterDef:
125
+ """Create timeout parameter with dynamic default.
126
+
127
+ Raises:
128
+ ValueError: If default_timeout is outside the valid range.
129
+ """
130
+ min_timeout = TIMEOUT_PARAM_BASE.minimum
131
+ max_timeout = TIMEOUT_PARAM_BASE.maximum
132
+ if min_timeout is not None and default_timeout < min_timeout:
133
+ raise ValueError(f"default_timeout must be >= {min_timeout}, got {default_timeout}")
134
+ if max_timeout is not None and default_timeout > max_timeout:
135
+ raise ValueError(f"default_timeout must be <= {max_timeout}, got {default_timeout}")
136
+ return ParameterDef(
137
+ type="integer",
138
+ description=TIMEOUT_PARAM_BASE.description,
139
+ default=default_timeout,
140
+ minimum=min_timeout,
141
+ maximum=max_timeout,
142
+ )
143
+
144
+
145
+ # =============================================================================
146
+ # Tool definitions
147
+ # =============================================================================
148
+
149
+ SPAWN_AGENT_TOOL = ToolDef(
150
+ name="spawn_agent",
151
+ description_template="{tool_description}",
152
+ parameters=(
153
+ ("prompt", PROMPT_PARAM),
154
+ ("adapter", ADAPTER_PARAM_BASE), # Will be replaced with dynamic version
155
+ ("thinking", THINKING_PARAM),
156
+ ("timeout_seconds", TIMEOUT_PARAM_BASE), # Will be replaced with dynamic version
157
+ ("model", MODEL_PARAM),
158
+ ("reasoning_effort", REASONING_EFFORT_PARAM),
159
+ ),
160
+ required=("prompt",),
161
+ )
162
+
163
+ SPAWN_AGENTS_PARALLEL_TOOL = ToolDef(
164
+ name="spawn_agents_parallel",
165
+ description_template="{tool_description} Run multiple agents in parallel.",
166
+ parameters=(), # Handled specially due to array items
167
+ required=("agents",),
168
+ )
169
+
170
+ LIST_ADAPTERS_TOOL = ToolDef(
171
+ name="list_adapters",
172
+ description_template="List available adapters and their status",
173
+ parameters=(),
174
+ required=(),
175
+ )
176
+
177
+ CHECK_STATUS_TOOL = ToolDef(
178
+ name="check_status",
179
+ description_template="{status_description}",
180
+ parameters=(),
181
+ required=(),
182
+ )
183
+
184
+
185
+ # =============================================================================
186
+ # Schema generation functions
187
+ # =============================================================================
188
+
189
+
190
+ def _param_to_schema(param: ParameterDef) -> dict[str, Any]:
191
+ """Convert a ParameterDef to a JSON Schema dict."""
192
+ schema: dict[str, Any] = {"type": param.type}
193
+
194
+ if param.description:
195
+ schema["description"] = param.description
196
+ if param.default is not None:
197
+ schema["default"] = param.default
198
+ if param.enum is not None:
199
+ schema["enum"] = list(param.enum)
200
+ if param.minimum is not None:
201
+ schema["minimum"] = param.minimum
202
+ if param.maximum is not None:
203
+ schema["maximum"] = param.maximum
204
+ if param.items is not None:
205
+ schema["items"] = param.items
206
+
207
+ return schema
208
+
209
+
210
+ def build_input_schema(
211
+ tool: ToolDef,
212
+ adapter_names: tuple[str, ...],
213
+ default_timeout: int,
214
+ ) -> dict[str, Any]:
215
+ """Convert ToolDef to MCP inputSchema dict.
216
+
217
+ Args:
218
+ tool: The tool definition to convert.
219
+ adapter_names: Tuple of available adapter names for enum.
220
+ default_timeout: Default timeout value for timeout parameters.
221
+
222
+ Returns:
223
+ A JSON Schema dict suitable for MCP Tool.inputSchema.
224
+ """
225
+ properties: dict[str, Any] = {}
226
+
227
+ for name, param in tool.parameters:
228
+ # Handle dynamic parameters
229
+ if name == "adapter":
230
+ param = _build_adapter_param(adapter_names)
231
+ elif name == "timeout_seconds":
232
+ param = _build_timeout_param(default_timeout)
233
+
234
+ properties[name] = _param_to_schema(param)
235
+
236
+ schema: dict[str, Any] = {
237
+ "type": "object",
238
+ "properties": properties,
239
+ }
240
+
241
+ if tool.required:
242
+ schema["required"] = list(tool.required)
243
+
244
+ return schema
245
+
246
+
247
+ def _build_agents_array_schema(
248
+ adapter_names: tuple[str, ...],
249
+ default_timeout: int,
250
+ ) -> dict[str, Any]:
251
+ """Build the schema for the agents array in spawn_agents_parallel."""
252
+ adapter_schema = _param_to_schema(_build_adapter_param(adapter_names))
253
+ timeout_schema = _param_to_schema(_build_timeout_param(default_timeout))
254
+
255
+ return {
256
+ "type": "array",
257
+ "description": "List of agent specs with prompt and optional settings",
258
+ "items": {
259
+ "type": "object",
260
+ "properties": {
261
+ "prompt": {"type": "string"},
262
+ "adapter": adapter_schema,
263
+ "thinking": {"type": "boolean", "default": False},
264
+ "timeout_seconds": timeout_schema,
265
+ "model": _param_to_schema(MODEL_PARAM_SHORT),
266
+ "reasoning_effort": _param_to_schema(REASONING_EFFORT_PARAM_SHORT),
267
+ },
268
+ "required": ["prompt"],
269
+ },
270
+ }
271
+
272
+
273
+ def build_tools(
274
+ adapter_names: tuple[str, ...],
275
+ default_timeout: int,
276
+ tool_description: str,
277
+ status_description: str,
278
+ ) -> list[Tool]:
279
+ """Build all MCP Tool objects from definitions.
280
+
281
+ Args:
282
+ adapter_names: Tuple of available adapter names.
283
+ default_timeout: Default timeout value in seconds.
284
+ tool_description: Description for the spawn_agent tool.
285
+ status_description: Description for the check_status tool.
286
+
287
+ Returns:
288
+ List of MCP Tool objects ready for registration.
289
+ """
290
+ # spawn_agent
291
+ spawn_agent_schema = build_input_schema(
292
+ SPAWN_AGENT_TOOL, adapter_names, default_timeout
293
+ )
294
+
295
+ # spawn_agents_parallel (special handling for array)
296
+ parallel_schema: dict[str, Any] = {
297
+ "type": "object",
298
+ "properties": {
299
+ "agents": _build_agents_array_schema(adapter_names, default_timeout),
300
+ },
301
+ "required": ["agents"],
302
+ }
303
+
304
+ # list_adapters
305
+ list_adapters_schema: dict[str, Any] = {"type": "object", "properties": {}}
306
+
307
+ # check_status
308
+ check_status_schema: dict[str, Any] = {"type": "object", "properties": {}}
309
+
310
+ return [
311
+ Tool(
312
+ name="spawn_agent",
313
+ description=tool_description,
314
+ inputSchema=spawn_agent_schema,
315
+ ),
316
+ Tool(
317
+ name="spawn_agents_parallel",
318
+ description=f"{tool_description} Run multiple agents in parallel.",
319
+ inputSchema=parallel_schema,
320
+ ),
321
+ Tool(
322
+ name="list_adapters",
323
+ description="List available adapters and their status",
324
+ inputSchema=list_adapters_schema,
325
+ ),
326
+ Tool(
327
+ name="check_status",
328
+ description=status_description,
329
+ inputSchema=check_status_schema,
330
+ ),
331
+ ]
332
+
333
+
334
+ __all__ = ["build_tools"]
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: moonbridge
3
- Version: 0.5.2
4
- Summary: MCP server for spawning Kimi K2.5 agents
3
+ Version: 0.7.0
4
+ Summary: MCP server for spawning AI coding agents (Kimi, Codex, and more)
5
5
  Project-URL: Homepage, https://github.com/misty-step/moonbridge
6
6
  Project-URL: Repository, https://github.com/misty-step/moonbridge
7
7
  Project-URL: Issues, https://github.com/misty-step/moonbridge/issues
8
8
  Author-email: Phaedrus <hello@mistystep.io>
9
9
  License-Expression: MIT
10
10
  License-File: LICENSE
11
- Keywords: agent,ai,claude,kimi,mcp
11
+ Keywords: agent,ai,claude,codex,kimi,mcp
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Environment :: Console
14
14
  Classifier: Intended Audience :: Developers
@@ -31,7 +31,7 @@ Description-Content-Type: text/markdown
31
31
 
32
32
  **Your MCP client just got a team.**
33
33
 
34
- Spawn Kimi K2.5 agents from Claude Code, Cursor, or any MCP client. Run 10 approaches in parallel for the cost of one Claude request.
34
+ Spawn AI coding agents from Claude Code, Cursor, or any MCP client. Run 10 approaches in parallel for a fraction of the cost.
35
35
 
36
36
  ```bash
37
37
  uvx moonbridge
@@ -39,10 +39,12 @@ uvx moonbridge
39
39
 
40
40
  ## Quick Start
41
41
 
42
- 1. **Install Kimi CLI and authenticate:**
43
- ```bash
44
- uv tool install --python 3.13 kimi-cli && kimi login
45
- ```
42
+ 1. **Install at least one supported CLI:**
43
+
44
+ | Adapter | Install | Authenticate |
45
+ |---------|---------|--------------|
46
+ | Kimi (default) | `uv tool install --python 3.13 kimi-cli` | `kimi login` |
47
+ | Codex | `npm install -g @openai/codex` | Set `OPENAI_API_KEY` |
46
48
 
47
49
  2. **Add to MCP config** (`~/.mcp.json`):
48
50
  ```json
@@ -94,7 +96,8 @@ export MOONBRIDGE_SKIP_UPDATE_CHECK=1
94
96
  |------|----------|
95
97
  | `spawn_agent` | Single task: "Write tests for auth.ts" |
96
98
  | `spawn_agents_parallel` | Go wide: 10 agents, 10 approaches, pick the best |
97
- | `check_status` | Verify Kimi CLI is installed and authenticated |
99
+ | `check_status` | Verify the configured CLI is installed and authenticated |
100
+ | `list_adapters` | Show available adapters and their status |
98
101
 
99
102
  ### Example: Parallel Exploration
100
103
 
@@ -117,7 +120,10 @@ Three approaches. One request. You choose the winner.
117
120
  | Parameter | Type | Required | Description |
118
121
  |-----------|------|----------|-------------|
119
122
  | `prompt` | string | Yes | Task description for the agent |
120
- | `thinking` | boolean | No | Enable reasoning mode (default: false) |
123
+ | `adapter` | string | No | Backend to use: `kimi`, `codex` (default: `kimi`) |
124
+ | `model` | string | No | Model override (e.g., `gpt-5.2-codex`) |
125
+ | `thinking` | boolean | No | Enable reasoning mode (Kimi only) |
126
+ | `reasoning_effort` | string | No | Reasoning budget: `low`, `medium`, `high`, `xhigh` (Codex only) |
121
127
  | `timeout_seconds` | integer | No | Override default timeout (30-3600) |
122
128
 
123
129
  **`spawn_agents_parallel`**
@@ -126,7 +132,10 @@ Three approaches. One request. You choose the winner.
126
132
  |-----------|------|----------|-------------|
127
133
  | `agents` | array | Yes | List of agent configs (max 10) |
128
134
  | `agents[].prompt` | string | Yes | Task for this agent |
129
- | `agents[].thinking` | boolean | No | Enable reasoning for this agent |
135
+ | `agents[].adapter` | string | No | Backend for this agent |
136
+ | `agents[].model` | string | No | Model override for this agent |
137
+ | `agents[].thinking` | boolean | No | Enable reasoning (Kimi only) |
138
+ | `agents[].reasoning_effort` | string | No | Reasoning budget (Codex only) |
130
139
  | `agents[].timeout_seconds` | integer | No | Timeout for this agent |
131
140
 
132
141
  ## Response Format
@@ -136,12 +145,13 @@ All tools return JSON with these fields:
136
145
  | Field | Type | Description |
137
146
  |-------|------|-------------|
138
147
  | `status` | string | `success`, `error`, `timeout`, `auth_error`, or `cancelled` |
139
- | `output` | string | stdout from Kimi agent |
148
+ | `output` | string | stdout from the agent |
140
149
  | `stderr` | string\|null | stderr if any |
141
150
  | `returncode` | int | Process exit code (-1 for timeout/error) |
142
151
  | `duration_ms` | int | Execution time in milliseconds |
143
152
  | `agent_index` | int | Agent index (0 for single, 0-N for parallel) |
144
153
  | `message` | string? | Human-readable error context (when applicable) |
154
+ | `raw` | object? | Optional structured metadata (e.g., sandbox diff) |
145
155
 
146
156
  ## Configuration
147
157
 
@@ -154,41 +164,68 @@ All tools return JSON with these fields:
154
164
  | `MOONBRIDGE_MAX_AGENTS` | Maximum parallel agents |
155
165
  | `MOONBRIDGE_ALLOWED_DIRS` | Colon-separated allowlist of working directories |
156
166
  | `MOONBRIDGE_STRICT` | Set to `1` to require `ALLOWED_DIRS` (exits if unset) |
167
+ | `MOONBRIDGE_SANDBOX` | Set to `1` to run agents in a temp copy of cwd |
168
+ | `MOONBRIDGE_SANDBOX_KEEP` | Set to `1` to keep sandbox dir for inspection |
169
+ | `MOONBRIDGE_SANDBOX_MAX_DIFF` | Max diff size in bytes (default 500000) |
170
+ | `MOONBRIDGE_SANDBOX_MAX_COPY` | Max sandbox copy size in bytes (default 500MB) |
157
171
  | `MOONBRIDGE_LOG_LEVEL` | Set to `DEBUG` for verbose logging |
158
172
 
159
173
  ## Troubleshooting
160
174
 
161
- ### "Kimi CLI not found"
175
+ ### "CLI not found"
162
176
 
163
- Install the Kimi CLI:
177
+ Install the CLI for your chosen adapter:
164
178
 
165
179
  ```bash
180
+ # Kimi
166
181
  uv tool install --python 3.13 kimi-cli
167
182
  which kimi
183
+
184
+ # Codex
185
+ npm install -g @openai/codex
186
+ which codex
168
187
  ```
169
188
 
170
189
  ### "auth_error" responses
171
190
 
172
- Authenticate with Kimi:
191
+ Authenticate with your chosen CLI:
173
192
 
174
193
  ```bash
194
+ # Kimi
175
195
  kimi login
196
+
197
+ # Codex
198
+ export OPENAI_API_KEY=sk-...
176
199
  ```
177
200
 
178
201
  ### Timeout errors
179
202
 
180
- Increase the timeout for long-running tasks:
203
+ Adapters have sensible defaults: Codex=1800s (30min), Kimi=600s (10min).
204
+
205
+ For exceptionally long tasks, override explicitly:
181
206
 
182
207
  ```json
183
- {"prompt": "...", "timeout_seconds": 1800}
208
+ {"prompt": "...", "timeout_seconds": 3600}
184
209
  ```
185
210
 
186
- Or set a global default:
211
+ Or set per-adapter defaults via environment:
187
212
 
188
213
  ```bash
189
- export MOONBRIDGE_TIMEOUT=1800
214
+ export MOONBRIDGE_CODEX_TIMEOUT=2400 # 40 minutes
215
+ export MOONBRIDGE_KIMI_TIMEOUT=900 # 15 minutes
190
216
  ```
191
217
 
218
+ ## Timeout Best Practices
219
+
220
+ | Task Type | Recommended |
221
+ |-----------|-------------|
222
+ | Quick query, status | 60-180s |
223
+ | Simple edits | 300-600s |
224
+ | Feature implementation | 1200-1800s |
225
+ | Large refactor | 1800-3600s |
226
+
227
+ Priority resolution: explicit param > adapter env > adapter default > global env > 600s fallback
228
+
192
229
  ### "MOONBRIDGE_ALLOWED_DIRS is not set" warning
193
230
 
194
231
  By default, Moonbridge warns at startup if no directory restrictions are configured. This is expected for local development. For shared/production environments, set allowed directories:
@@ -197,6 +234,29 @@ By default, Moonbridge warns at startup if no directory restrictions are configu
197
234
  export MOONBRIDGE_ALLOWED_DIRS="/path/to/project:/another/path"
198
235
  ```
199
236
 
237
+ ## Sandbox Mode (Copy-on-Run)
238
+
239
+ Enable sandbox mode to run agents in a temporary copy of the working directory:
240
+
241
+ ```bash
242
+ export MOONBRIDGE_SANDBOX=1
243
+ ```
244
+
245
+ When enabled:
246
+ - Agents run in a temp copy of `cwd`.
247
+ - Host files stay unchanged by default.
248
+ - A unified diff + summary is included in `raw.sandbox`.
249
+
250
+ Optional:
251
+
252
+ ```bash
253
+ export MOONBRIDGE_SANDBOX_KEEP=1 # keep temp dir
254
+ export MOONBRIDGE_SANDBOX_MAX_DIFF=200000
255
+ export MOONBRIDGE_SANDBOX_MAX_COPY=300000000
256
+ ```
257
+
258
+ Limitations: this is not OS-level isolation. Agents can still read/write arbitrary host paths if they choose to. Use containers/VMs for strong isolation.
259
+
200
260
  To enforce restrictions (exit instead of warn):
201
261
 
202
262
  ```bash
@@ -0,0 +1,15 @@
1
+ moonbridge/__init__.py,sha256=vAOZaP2bQ71gulCrthXRbsd5zOWB5R3cdUHNrLuS87w,198
2
+ moonbridge/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
+ moonbridge/sandbox.py,sha256=4-eIu-rURtaxKKg-d3iWwNq_x6MB_uiJBBG2uMwoKcM,7907
4
+ moonbridge/server.py,sha256=FqnoAd-WAWOX-elNgyOQ-vAiFqsGOdsyQCIsnu1e1t0,19469
5
+ moonbridge/tools.py,sha256=uw338Dilrto2t5dL9XbK4O31-JdB7Vh9RqCXHg20gHI,10126
6
+ moonbridge/version_check.py,sha256=VQueK0O_b-2Xc-XjupJsoW3Zs1Kce5q_BgqBhANGXN8,4579
7
+ moonbridge/adapters/__init__.py,sha256=w3pLvjtC2XnUhf9UzNmniQB3oq4rG8gorSH0tWR-BEE,988
8
+ moonbridge/adapters/base.py,sha256=REoEsAcqEvyVQpTgz6ytd9ioxag--nnvX90YBXMQG8Y,1716
9
+ moonbridge/adapters/codex.py,sha256=GtU4CrJ4zt0WDcKKaOeN7gH4JFIBAo3L7KAZ99zRjiY,2935
10
+ moonbridge/adapters/kimi.py,sha256=ejCxG2OGr0Qr4n0psL6p96_mMJ3lLKMbGcNYWkuC0uA,2189
11
+ moonbridge-0.7.0.dist-info/METADATA,sha256=cWa3osY8GxLAkoJPkkwT4sOKYWkcW80eMjdPcv2FPKw,8305
12
+ moonbridge-0.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ moonbridge-0.7.0.dist-info/entry_points.txt,sha256=kgL38HQy3adncDQl_o5sdtPRog56zKdHk6pKKzyR6Ww,54
14
+ moonbridge-0.7.0.dist-info/licenses/LICENSE,sha256=7WMSJoybL2cUot_wb9GUrw5mzfFmtrDzqlMS9ZE709g,1065
15
+ moonbridge-0.7.0.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- moonbridge/__init__.py,sha256=x3eYCVqjhWKoPmGJvOV3IALPoS1DFO-iZRECuVbNgtQ,198
2
- moonbridge/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
- moonbridge/server.py,sha256=rP3c0hcuUDxC5QqPNWboJZYnMS4hZw6oqkzmg-N0WwM,21194
4
- moonbridge/version_check.py,sha256=VQueK0O_b-2Xc-XjupJsoW3Zs1Kce5q_BgqBhANGXN8,4579
5
- moonbridge/adapters/__init__.py,sha256=w3pLvjtC2XnUhf9UzNmniQB3oq4rG8gorSH0tWR-BEE,988
6
- moonbridge/adapters/base.py,sha256=bj_Ms55h2lwDmEO0CZ1RFSAA9IHgNbX2LI1xgQEftLY,942
7
- moonbridge/adapters/codex.py,sha256=JTt9B3eXqset6ZrwwlnHzcno5PMdrjY2GdLSNPYkowQ,2873
8
- moonbridge/adapters/kimi.py,sha256=75QFPTMVpgbgkVGv8GEpIYY1zrIOZ0kJ-aCgd8Tx0TA,2129
9
- moonbridge-0.5.2.dist-info/METADATA,sha256=aaN1N00Q5oY2bE6faQWW163_goAPVMQDtEEWTlkUnBE,5984
10
- moonbridge-0.5.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
11
- moonbridge-0.5.2.dist-info/entry_points.txt,sha256=kgL38HQy3adncDQl_o5sdtPRog56zKdHk6pKKzyR6Ww,54
12
- moonbridge-0.5.2.dist-info/licenses/LICENSE,sha256=7WMSJoybL2cUot_wb9GUrw5mzfFmtrDzqlMS9ZE709g,1065
13
- moonbridge-0.5.2.dist-info/RECORD,,