modelmux 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modelmux-0.4.0/.gitignore +25 -0
- modelmux-0.4.0/LICENSE +21 -0
- modelmux-0.4.0/PKG-INFO +109 -0
- modelmux-0.4.0/README.md +82 -0
- modelmux-0.4.0/pyproject.toml +68 -0
- modelmux-0.4.0/src/modelmux/__init__.py +3 -0
- modelmux-0.4.0/src/modelmux/adapters/__init__.py +21 -0
- modelmux-0.4.0/src/modelmux/adapters/base.py +252 -0
- modelmux-0.4.0/src/modelmux/adapters/claude.py +60 -0
- modelmux-0.4.0/src/modelmux/adapters/codex.py +108 -0
- modelmux-0.4.0/src/modelmux/adapters/gemini.py +100 -0
- modelmux-0.4.0/src/modelmux/audit.py +140 -0
- modelmux-0.4.0/src/modelmux/cli.py +11 -0
- modelmux-0.4.0/src/modelmux/config.py +298 -0
- modelmux-0.4.0/src/modelmux/detect.py +153 -0
- modelmux-0.4.0/src/modelmux/policy.py +149 -0
- modelmux-0.4.0/src/modelmux/server.py +361 -0
- modelmux-0.4.0/tests/test_audit_policy.py +263 -0
- modelmux-0.4.0/tests/test_detect.py +227 -0
- modelmux-0.4.0/tests/test_e2e.py +319 -0
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*.egg-info/
|
|
5
|
+
dist/
|
|
6
|
+
build/
|
|
7
|
+
.venv/
|
|
8
|
+
venv/
|
|
9
|
+
|
|
10
|
+
# OS
|
|
11
|
+
.DS_Store
|
|
12
|
+
Thumbs.db
|
|
13
|
+
|
|
14
|
+
# IDE
|
|
15
|
+
.vscode/
|
|
16
|
+
.idea/
|
|
17
|
+
*.swp
|
|
18
|
+
*.swo
|
|
19
|
+
|
|
20
|
+
# Runtime
|
|
21
|
+
/tmp/
|
|
22
|
+
*.log
|
|
23
|
+
|
|
24
|
+
# uv lock file (tool project, not pinning deps)
|
|
25
|
+
uv.lock
|
modelmux-0.4.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 pure-maple
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
modelmux-0.4.0/PKG-INFO
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: modelmux
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: Model multiplexer — unified MCP server for cross-platform multi-model AI collaboration
|
|
5
|
+
Project-URL: Homepage, https://github.com/pure-maple/modelmux
|
|
6
|
+
Project-URL: Repository, https://github.com/pure-maple/modelmux
|
|
7
|
+
Project-URL: Issues, https://github.com/pure-maple/modelmux/issues
|
|
8
|
+
Project-URL: Documentation, https://github.com/pure-maple/modelmux#readme
|
|
9
|
+
Author: pure-maple
|
|
10
|
+
License-Expression: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: ai,claude,cli,codex,collaboration,gemini,mcp,modelmux,multi-model,multiplexer
|
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
23
|
+
Requires-Python: >=3.10
|
|
24
|
+
Requires-Dist: mcp[cli]>=1.20.0
|
|
25
|
+
Requires-Dist: pydantic>=2.0
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# modelmux
|
|
29
|
+
|
|
30
|
+
Model multiplexer — unified MCP server for cross-platform multi-model AI collaboration.
|
|
31
|
+
|
|
32
|
+
Route tasks to **Codex CLI**, **Gemini CLI**, and **Claude Code CLI** through a single MCP interface with smart routing and caller auto-detection.
|
|
33
|
+
|
|
34
|
+
## Install
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
# One-command install for Claude Code
|
|
38
|
+
claude mcp add modelmux -s user -- uvx modelmux
|
|
39
|
+
|
|
40
|
+
# Codex CLI (~/.codex/config.toml)
|
|
41
|
+
# [mcp_servers.modelmux]
|
|
42
|
+
# command = "uvx"
|
|
43
|
+
# args = ["modelmux"]
|
|
44
|
+
|
|
45
|
+
# Gemini CLI (~/.gemini/settings.json)
|
|
46
|
+
# {"mcpServers": {"modelmux": {"command": "uvx", "args": ["modelmux"]}}}
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Tools
|
|
50
|
+
|
|
51
|
+
- **`collab_dispatch`** — Send a task to a model and get structured results
|
|
52
|
+
- `provider`: `"auto"` / `"codex"` / `"gemini"` / `"claude"`
|
|
53
|
+
- `task`: The prompt to send
|
|
54
|
+
- `workdir`, `sandbox`, `session_id`, `timeout`, `model`, `profile`, `reasoning_effort`
|
|
55
|
+
- **`collab_check`** — Check which CLIs are available, show detected caller and config
|
|
56
|
+
|
|
57
|
+
## Smart Routing
|
|
58
|
+
|
|
59
|
+
`provider="auto"` routes tasks by keyword analysis and auto-excludes the calling platform:
|
|
60
|
+
|
|
61
|
+
```
|
|
62
|
+
From Claude Code → routes to Codex or Gemini (never back to Claude)
|
|
63
|
+
From Codex CLI → routes to Claude or Gemini (never back to Codex)
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Audit & Policy
|
|
67
|
+
|
|
68
|
+
Every dispatch call is logged to `~/.config/modelmux/audit.jsonl` for debugging and cost tracking.
|
|
69
|
+
|
|
70
|
+
Policy enforcement via `~/.config/modelmux/policy.json`:
|
|
71
|
+
|
|
72
|
+
```json
|
|
73
|
+
{
|
|
74
|
+
"blocked_providers": ["gemini"],
|
|
75
|
+
"blocked_sandboxes": ["full"],
|
|
76
|
+
"max_timeout": 600,
|
|
77
|
+
"max_calls_per_hour": 30,
|
|
78
|
+
"max_calls_per_day": 200
|
|
79
|
+
}
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
`collab_check()` now shows policy summary and audit stats.
|
|
83
|
+
|
|
84
|
+
## User Configuration
|
|
85
|
+
|
|
86
|
+
Create `.modelmux/profiles.toml` or `~/.config/modelmux/profiles.toml`:
|
|
87
|
+
|
|
88
|
+
```toml
|
|
89
|
+
[routing]
|
|
90
|
+
default_provider = "codex"
|
|
91
|
+
|
|
92
|
+
[[routing.rules]]
|
|
93
|
+
provider = "gemini"
|
|
94
|
+
[routing.rules.match]
|
|
95
|
+
keywords = ["frontend", "react", "css"]
|
|
96
|
+
|
|
97
|
+
[profiles.budget]
|
|
98
|
+
[profiles.budget.providers.codex]
|
|
99
|
+
model = "gpt-4.1-mini"
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## Links
|
|
103
|
+
|
|
104
|
+
- [Full Documentation](https://github.com/pure-maple/modelmux)
|
|
105
|
+
- [中文文档](https://github.com/pure-maple/modelmux/blob/main/docs/README_CN.md)
|
|
106
|
+
|
|
107
|
+
## License
|
|
108
|
+
|
|
109
|
+
MIT
|
modelmux-0.4.0/README.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# modelmux
|
|
2
|
+
|
|
3
|
+
Model multiplexer — unified MCP server for cross-platform multi-model AI collaboration.
|
|
4
|
+
|
|
5
|
+
Route tasks to **Codex CLI**, **Gemini CLI**, and **Claude Code CLI** through a single MCP interface with smart routing and caller auto-detection.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# One-command install for Claude Code
|
|
11
|
+
claude mcp add modelmux -s user -- uvx modelmux
|
|
12
|
+
|
|
13
|
+
# Codex CLI (~/.codex/config.toml)
|
|
14
|
+
# [mcp_servers.modelmux]
|
|
15
|
+
# command = "uvx"
|
|
16
|
+
# args = ["modelmux"]
|
|
17
|
+
|
|
18
|
+
# Gemini CLI (~/.gemini/settings.json)
|
|
19
|
+
# {"mcpServers": {"modelmux": {"command": "uvx", "args": ["modelmux"]}}}
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Tools
|
|
23
|
+
|
|
24
|
+
- **`collab_dispatch`** — Send a task to a model and get structured results
|
|
25
|
+
- `provider`: `"auto"` / `"codex"` / `"gemini"` / `"claude"`
|
|
26
|
+
- `task`: The prompt to send
|
|
27
|
+
- `workdir`, `sandbox`, `session_id`, `timeout`, `model`, `profile`, `reasoning_effort`
|
|
28
|
+
- **`collab_check`** — Check which CLIs are available, show detected caller and config
|
|
29
|
+
|
|
30
|
+
## Smart Routing
|
|
31
|
+
|
|
32
|
+
`provider="auto"` routes tasks by keyword analysis and auto-excludes the calling platform:
|
|
33
|
+
|
|
34
|
+
```
|
|
35
|
+
From Claude Code → routes to Codex or Gemini (never back to Claude)
|
|
36
|
+
From Codex CLI → routes to Claude or Gemini (never back to Codex)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Audit & Policy
|
|
40
|
+
|
|
41
|
+
Every dispatch call is logged to `~/.config/modelmux/audit.jsonl` for debugging and cost tracking.
|
|
42
|
+
|
|
43
|
+
Policy enforcement via `~/.config/modelmux/policy.json`:
|
|
44
|
+
|
|
45
|
+
```json
|
|
46
|
+
{
|
|
47
|
+
"blocked_providers": ["gemini"],
|
|
48
|
+
"blocked_sandboxes": ["full"],
|
|
49
|
+
"max_timeout": 600,
|
|
50
|
+
"max_calls_per_hour": 30,
|
|
51
|
+
"max_calls_per_day": 200
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
`collab_check()` now shows policy summary and audit stats.
|
|
56
|
+
|
|
57
|
+
## User Configuration
|
|
58
|
+
|
|
59
|
+
Create `.modelmux/profiles.toml` or `~/.config/modelmux/profiles.toml`:
|
|
60
|
+
|
|
61
|
+
```toml
|
|
62
|
+
[routing]
|
|
63
|
+
default_provider = "codex"
|
|
64
|
+
|
|
65
|
+
[[routing.rules]]
|
|
66
|
+
provider = "gemini"
|
|
67
|
+
[routing.rules.match]
|
|
68
|
+
keywords = ["frontend", "react", "css"]
|
|
69
|
+
|
|
70
|
+
[profiles.budget]
|
|
71
|
+
[profiles.budget.providers.codex]
|
|
72
|
+
model = "gpt-4.1-mini"
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## Links
|
|
76
|
+
|
|
77
|
+
- [Full Documentation](https://github.com/pure-maple/modelmux)
|
|
78
|
+
- [中文文档](https://github.com/pure-maple/modelmux/blob/main/docs/README_CN.md)
|
|
79
|
+
|
|
80
|
+
## License
|
|
81
|
+
|
|
82
|
+
MIT
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "modelmux"
|
|
7
|
+
version = "0.4.0"
|
|
8
|
+
description = "Model multiplexer — unified MCP server for cross-platform multi-model AI collaboration"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "pure-maple" },
|
|
14
|
+
]
|
|
15
|
+
keywords = [
|
|
16
|
+
"mcp",
|
|
17
|
+
"ai",
|
|
18
|
+
"collaboration",
|
|
19
|
+
"multi-model",
|
|
20
|
+
"modelmux",
|
|
21
|
+
"multiplexer",
|
|
22
|
+
"codex",
|
|
23
|
+
"gemini",
|
|
24
|
+
"claude",
|
|
25
|
+
"cli",
|
|
26
|
+
]
|
|
27
|
+
classifiers = [
|
|
28
|
+
"Development Status :: 4 - Beta",
|
|
29
|
+
"Intended Audience :: Developers",
|
|
30
|
+
"License :: OSI Approved :: MIT License",
|
|
31
|
+
"Programming Language :: Python :: 3",
|
|
32
|
+
"Programming Language :: Python :: 3.10",
|
|
33
|
+
"Programming Language :: Python :: 3.11",
|
|
34
|
+
"Programming Language :: Python :: 3.12",
|
|
35
|
+
"Programming Language :: Python :: 3.13",
|
|
36
|
+
"Topic :: Software Development :: Libraries",
|
|
37
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
38
|
+
]
|
|
39
|
+
dependencies = [
|
|
40
|
+
"mcp[cli]>=1.20.0",
|
|
41
|
+
"pydantic>=2.0",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
[dependency-groups]
|
|
45
|
+
dev = [
|
|
46
|
+
"ruff>=0.9.0",
|
|
47
|
+
"build>=1.0.0",
|
|
48
|
+
"twine>=6.0.0",
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
[project.urls]
|
|
52
|
+
Homepage = "https://github.com/pure-maple/modelmux"
|
|
53
|
+
Repository = "https://github.com/pure-maple/modelmux"
|
|
54
|
+
Issues = "https://github.com/pure-maple/modelmux/issues"
|
|
55
|
+
Documentation = "https://github.com/pure-maple/modelmux#readme"
|
|
56
|
+
|
|
57
|
+
[project.scripts]
|
|
58
|
+
modelmux = "modelmux.cli:main"
|
|
59
|
+
|
|
60
|
+
[tool.hatch.build.targets.wheel]
|
|
61
|
+
packages = ["src/modelmux"]
|
|
62
|
+
|
|
63
|
+
[tool.ruff]
|
|
64
|
+
target-version = "py310"
|
|
65
|
+
src = ["src"]
|
|
66
|
+
|
|
67
|
+
[tool.ruff.lint]
|
|
68
|
+
select = ["E", "F", "I", "W"]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Model CLI adapters for modelmux."""
|
|
2
|
+
|
|
3
|
+
from modelmux.adapters.base import AdapterResult, BaseAdapter
|
|
4
|
+
from modelmux.adapters.claude import ClaudeAdapter
|
|
5
|
+
from modelmux.adapters.codex import CodexAdapter
|
|
6
|
+
from modelmux.adapters.gemini import GeminiAdapter
|
|
7
|
+
|
|
8
|
+
ADAPTERS: dict[str, type[BaseAdapter]] = {
|
|
9
|
+
"codex": CodexAdapter,
|
|
10
|
+
"gemini": GeminiAdapter,
|
|
11
|
+
"claude": ClaudeAdapter,
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"BaseAdapter",
|
|
16
|
+
"AdapterResult",
|
|
17
|
+
"CodexAdapter",
|
|
18
|
+
"GeminiAdapter",
|
|
19
|
+
"ClaudeAdapter",
|
|
20
|
+
"ADAPTERS",
|
|
21
|
+
]
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"""Base adapter for CLI model bridges."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import queue
|
|
8
|
+
import shutil
|
|
9
|
+
import subprocess
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from threading import Thread
|
|
14
|
+
from typing import Generator
|
|
15
|
+
|
|
16
|
+
GRACEFUL_SHUTDOWN_DELAY = 0.3
|
|
17
|
+
QUEUE_READ_TIMEOUT = 0.5
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class AdapterResult:
|
|
22
|
+
"""Canonical result schema for all model adapters."""
|
|
23
|
+
|
|
24
|
+
run_id: str = ""
|
|
25
|
+
provider: str = ""
|
|
26
|
+
status: str = "error" # success | error | timeout
|
|
27
|
+
summary: str = ""
|
|
28
|
+
output: str = ""
|
|
29
|
+
session_id: str = ""
|
|
30
|
+
duration_seconds: float = 0.0
|
|
31
|
+
error: str | None = None
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
d = {
|
|
35
|
+
"run_id": self.run_id,
|
|
36
|
+
"provider": self.provider,
|
|
37
|
+
"status": self.status,
|
|
38
|
+
"summary": self.summary,
|
|
39
|
+
"output": self.output,
|
|
40
|
+
"session_id": self.session_id,
|
|
41
|
+
"duration_seconds": round(self.duration_seconds, 1),
|
|
42
|
+
}
|
|
43
|
+
if self.error:
|
|
44
|
+
d["error"] = self.error
|
|
45
|
+
return d
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def is_turn_completed(line: str) -> bool:
|
|
49
|
+
"""Check if a JSONL line indicates turn completion."""
|
|
50
|
+
try:
|
|
51
|
+
data = json.loads(line)
|
|
52
|
+
return data.get("type") == "turn.completed"
|
|
53
|
+
except (json.JSONDecodeError, AttributeError, TypeError):
|
|
54
|
+
return False
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def stream_subprocess(
|
|
58
|
+
cmd: list[str],
|
|
59
|
+
cwd: str | None = None,
|
|
60
|
+
timeout: int = 300,
|
|
61
|
+
env_overrides: dict[str, str] | None = None,
|
|
62
|
+
) -> Generator[str, None, int]:
|
|
63
|
+
"""Run a subprocess and yield stdout lines via a threaded queue.
|
|
64
|
+
|
|
65
|
+
Returns the exit code via generator return value.
|
|
66
|
+
Uses the battle-tested pattern from GuDaStudio's codexmcp/geminimcp.
|
|
67
|
+
"""
|
|
68
|
+
resolved = shutil.which(cmd[0])
|
|
69
|
+
if not resolved:
|
|
70
|
+
raise FileNotFoundError(f"Command not found: {cmd[0]}")
|
|
71
|
+
cmd[0] = resolved
|
|
72
|
+
|
|
73
|
+
proc_env = None
|
|
74
|
+
if env_overrides:
|
|
75
|
+
proc_env = os.environ.copy()
|
|
76
|
+
proc_env.update(env_overrides)
|
|
77
|
+
|
|
78
|
+
process = subprocess.Popen(
|
|
79
|
+
cmd,
|
|
80
|
+
stdout=subprocess.PIPE,
|
|
81
|
+
stderr=subprocess.STDOUT,
|
|
82
|
+
text=True,
|
|
83
|
+
cwd=cwd,
|
|
84
|
+
env=proc_env,
|
|
85
|
+
encoding="utf-8",
|
|
86
|
+
errors="replace",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
output_queue: queue.Queue[str | None] = queue.Queue()
|
|
90
|
+
|
|
91
|
+
def read_output() -> None:
|
|
92
|
+
assert process.stdout is not None
|
|
93
|
+
for line in iter(process.stdout.readline, ""):
|
|
94
|
+
stripped = line.strip()
|
|
95
|
+
if stripped:
|
|
96
|
+
output_queue.put(stripped)
|
|
97
|
+
if is_turn_completed(stripped):
|
|
98
|
+
time.sleep(GRACEFUL_SHUTDOWN_DELAY)
|
|
99
|
+
process.terminate()
|
|
100
|
+
break
|
|
101
|
+
process.stdout.close()
|
|
102
|
+
output_queue.put(None)
|
|
103
|
+
|
|
104
|
+
reader = Thread(target=read_output, daemon=True)
|
|
105
|
+
reader.start()
|
|
106
|
+
|
|
107
|
+
start_time = time.monotonic()
|
|
108
|
+
while True:
|
|
109
|
+
elapsed = time.monotonic() - start_time
|
|
110
|
+
if elapsed > timeout:
|
|
111
|
+
process.terminate()
|
|
112
|
+
process.wait(timeout=5)
|
|
113
|
+
return 124 # timeout exit code
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
line = output_queue.get(timeout=QUEUE_READ_TIMEOUT)
|
|
117
|
+
except queue.Empty:
|
|
118
|
+
if process.poll() is not None:
|
|
119
|
+
break
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
if line is None:
|
|
123
|
+
break
|
|
124
|
+
yield line
|
|
125
|
+
|
|
126
|
+
process.wait(timeout=10)
|
|
127
|
+
return process.returncode or 0
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class BaseAdapter:
|
|
131
|
+
"""Base class for model CLI adapters."""
|
|
132
|
+
|
|
133
|
+
provider_name: str = "unknown"
|
|
134
|
+
|
|
135
|
+
def check_available(self) -> bool:
|
|
136
|
+
"""Check if the CLI binary is available on PATH."""
|
|
137
|
+
return shutil.which(self._binary_name()) is not None
|
|
138
|
+
|
|
139
|
+
def _binary_name(self) -> str:
|
|
140
|
+
raise NotImplementedError
|
|
141
|
+
|
|
142
|
+
def build_command(
|
|
143
|
+
self,
|
|
144
|
+
prompt: str,
|
|
145
|
+
workdir: str,
|
|
146
|
+
sandbox: str = "read-only",
|
|
147
|
+
session_id: str = "",
|
|
148
|
+
extra_args: dict | None = None,
|
|
149
|
+
) -> list[str]:
|
|
150
|
+
raise NotImplementedError
|
|
151
|
+
|
|
152
|
+
def parse_output(self, lines: list[str]) -> tuple[str, str, str]:
|
|
153
|
+
"""Parse collected output lines.
|
|
154
|
+
|
|
155
|
+
Returns (agent_text, session_id, error_text).
|
|
156
|
+
"""
|
|
157
|
+
raise NotImplementedError
|
|
158
|
+
|
|
159
|
+
async def run(
|
|
160
|
+
self,
|
|
161
|
+
prompt: str,
|
|
162
|
+
workdir: str,
|
|
163
|
+
sandbox: str = "read-only",
|
|
164
|
+
session_id: str = "",
|
|
165
|
+
timeout: int = 300,
|
|
166
|
+
extra_args: dict | None = None,
|
|
167
|
+
env_overrides: dict[str, str] | None = None,
|
|
168
|
+
) -> AdapterResult:
|
|
169
|
+
"""Execute a task and return the canonical result."""
|
|
170
|
+
run_id = str(uuid.uuid4())[:8]
|
|
171
|
+
start = time.monotonic()
|
|
172
|
+
|
|
173
|
+
if not self.check_available():
|
|
174
|
+
return AdapterResult(
|
|
175
|
+
run_id=run_id,
|
|
176
|
+
provider=self.provider_name,
|
|
177
|
+
status="error",
|
|
178
|
+
error=f"{self._binary_name()} not found on PATH",
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
cmd = self.build_command(prompt, workdir, sandbox, session_id, extra_args)
|
|
183
|
+
except Exception as e:
|
|
184
|
+
return AdapterResult(
|
|
185
|
+
run_id=run_id,
|
|
186
|
+
provider=self.provider_name,
|
|
187
|
+
status="error",
|
|
188
|
+
error=f"Failed to build command: {e}",
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
lines: list[str] = []
|
|
192
|
+
exit_code = 0
|
|
193
|
+
try:
|
|
194
|
+
gen = stream_subprocess(
|
|
195
|
+
cmd,
|
|
196
|
+
cwd=workdir,
|
|
197
|
+
timeout=timeout,
|
|
198
|
+
env_overrides=env_overrides,
|
|
199
|
+
)
|
|
200
|
+
for line in gen:
|
|
201
|
+
lines.append(line)
|
|
202
|
+
# Get the return value (exit code) from the generator
|
|
203
|
+
try:
|
|
204
|
+
next(gen)
|
|
205
|
+
except StopIteration as e:
|
|
206
|
+
exit_code = e.value if e.value is not None else 0
|
|
207
|
+
except FileNotFoundError as e:
|
|
208
|
+
return AdapterResult(
|
|
209
|
+
run_id=run_id,
|
|
210
|
+
provider=self.provider_name,
|
|
211
|
+
status="error",
|
|
212
|
+
error=str(e),
|
|
213
|
+
duration_seconds=time.monotonic() - start,
|
|
214
|
+
)
|
|
215
|
+
except Exception as e:
|
|
216
|
+
return AdapterResult(
|
|
217
|
+
run_id=run_id,
|
|
218
|
+
provider=self.provider_name,
|
|
219
|
+
status="error",
|
|
220
|
+
error=f"Subprocess error: {e}",
|
|
221
|
+
duration_seconds=time.monotonic() - start,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
duration = time.monotonic() - start
|
|
225
|
+
|
|
226
|
+
if exit_code == 124:
|
|
227
|
+
return AdapterResult(
|
|
228
|
+
run_id=run_id,
|
|
229
|
+
provider=self.provider_name,
|
|
230
|
+
status="timeout",
|
|
231
|
+
output="\n".join(lines[-50:]),
|
|
232
|
+
duration_seconds=duration,
|
|
233
|
+
error=f"Timed out after {timeout}s",
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
agent_text, new_session_id, error_text = self.parse_output(lines)
|
|
237
|
+
|
|
238
|
+
# Generate summary (first 200 chars of agent text)
|
|
239
|
+
summary = agent_text[:200].replace("\n", " ") if agent_text else ""
|
|
240
|
+
|
|
241
|
+
status = "success" if agent_text and not error_text else "error"
|
|
242
|
+
|
|
243
|
+
return AdapterResult(
|
|
244
|
+
run_id=run_id,
|
|
245
|
+
provider=self.provider_name,
|
|
246
|
+
status=status,
|
|
247
|
+
summary=summary,
|
|
248
|
+
output=agent_text or error_text or "\n".join(lines),
|
|
249
|
+
session_id=new_session_id or session_id,
|
|
250
|
+
duration_seconds=duration,
|
|
251
|
+
error=error_text if error_text else None,
|
|
252
|
+
)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""Claude Code CLI adapter.
|
|
2
|
+
|
|
3
|
+
Wraps `claude -p` (print/non-interactive mode).
|
|
4
|
+
Claude outputs plain text, no structured JSON events.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from modelmux.adapters.base import BaseAdapter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ClaudeAdapter(BaseAdapter):
|
|
13
|
+
provider_name = "claude"
|
|
14
|
+
|
|
15
|
+
def _binary_name(self) -> str:
|
|
16
|
+
return "claude"
|
|
17
|
+
|
|
18
|
+
def build_command(
|
|
19
|
+
self,
|
|
20
|
+
prompt: str,
|
|
21
|
+
workdir: str,
|
|
22
|
+
sandbox: str = "read-only",
|
|
23
|
+
session_id: str = "",
|
|
24
|
+
extra_args: dict | None = None,
|
|
25
|
+
) -> list[str]:
|
|
26
|
+
cmd = ["claude", "-p", prompt]
|
|
27
|
+
|
|
28
|
+
if extra_args:
|
|
29
|
+
if extra_args.get("model"):
|
|
30
|
+
cmd.extend(["--model", extra_args["model"]])
|
|
31
|
+
if extra_args.get("allowed_tools"):
|
|
32
|
+
for tool in extra_args["allowed_tools"]:
|
|
33
|
+
cmd.extend(["--allowedTools", tool])
|
|
34
|
+
|
|
35
|
+
if session_id:
|
|
36
|
+
cmd.extend(["--resume", session_id])
|
|
37
|
+
|
|
38
|
+
return cmd
|
|
39
|
+
|
|
40
|
+
def parse_output(self, lines: list[str]) -> tuple[str, str, str]:
|
|
41
|
+
"""Parse Claude plain text output.
|
|
42
|
+
|
|
43
|
+
Claude -p returns plain text, no JSON structure.
|
|
44
|
+
Session ID is extracted from the session resume message if present.
|
|
45
|
+
"""
|
|
46
|
+
output_lines: list[str] = []
|
|
47
|
+
session_id = ""
|
|
48
|
+
|
|
49
|
+
for line in lines:
|
|
50
|
+
# Claude may output a session line at the start
|
|
51
|
+
if line.startswith("Session:") or line.startswith("session:"):
|
|
52
|
+
parts = line.split(":", 1)
|
|
53
|
+
if len(parts) == 2:
|
|
54
|
+
session_id = parts[1].strip()
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
output_lines.append(line)
|
|
58
|
+
|
|
59
|
+
agent_text = "\n".join(output_lines)
|
|
60
|
+
return agent_text, session_id, ""
|