router-maestro 0.1.6__tar.gz → 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {router_maestro-0.1.6 → router_maestro-0.1.7}/CLAUDE.md +2 -2
  2. {router_maestro-0.1.6 → router_maestro-0.1.7}/PKG-INFO +24 -3
  3. {router_maestro-0.1.6 → router_maestro-0.1.7}/README.md +22 -2
  4. {router_maestro-0.1.6 → router_maestro-0.1.7}/pyproject.toml +4 -1
  5. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/__init__.py +1 -1
  6. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/config.py +126 -0
  7. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/providers/__init__.py +8 -0
  8. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/providers/base.py +80 -0
  9. router_maestro-0.1.7/src/router_maestro/providers/copilot.py +667 -0
  10. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/routing/router.py +158 -2
  11. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/app.py +8 -1
  12. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/routes/__init__.py +8 -1
  13. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/routes/anthropic.py +79 -0
  14. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/routes/chat.py +1 -2
  15. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/routes/models.py +1 -2
  16. router_maestro-0.1.7/src/router_maestro/server/routes/responses.py +517 -0
  17. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/schemas/__init__.py +33 -0
  18. router_maestro-0.1.7/src/router_maestro/server/schemas/responses.py +214 -0
  19. {router_maestro-0.1.6 → router_maestro-0.1.7}/uv.lock +12 -1
  20. router_maestro-0.1.6/src/router_maestro/providers/copilot.py +0 -346
  21. {router_maestro-0.1.6 → router_maestro-0.1.7}/.env.example +0 -0
  22. {router_maestro-0.1.6 → router_maestro-0.1.7}/.github/workflows/ci.yml +0 -0
  23. {router_maestro-0.1.6 → router_maestro-0.1.7}/.github/workflows/release.yml +0 -0
  24. {router_maestro-0.1.6 → router_maestro-0.1.7}/.gitignore +0 -0
  25. {router_maestro-0.1.6 → router_maestro-0.1.7}/.markdownlint.json +0 -0
  26. {router_maestro-0.1.6 → router_maestro-0.1.7}/Dockerfile +0 -0
  27. {router_maestro-0.1.6 → router_maestro-0.1.7}/LICENSE +0 -0
  28. {router_maestro-0.1.6 → router_maestro-0.1.7}/Makefile +0 -0
  29. {router_maestro-0.1.6 → router_maestro-0.1.7}/docker-compose.yml +0 -0
  30. {router_maestro-0.1.6 → router_maestro-0.1.7}/docs/deployment.md +0 -0
  31. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/__main__.py +0 -0
  32. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/auth/__init__.py +0 -0
  33. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/auth/github_oauth.py +0 -0
  34. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/auth/manager.py +0 -0
  35. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/auth/storage.py +0 -0
  36. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/__init__.py +0 -0
  37. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/auth.py +0 -0
  38. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/client.py +0 -0
  39. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/context.py +0 -0
  40. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/main.py +0 -0
  41. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/model.py +0 -0
  42. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/cli/server.py +0 -0
  43. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/__init__.py +0 -0
  44. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/contexts.py +0 -0
  45. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/paths.py +0 -0
  46. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/priorities.py +0 -0
  47. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/providers.py +0 -0
  48. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/server.py +0 -0
  49. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/config/settings.py +0 -0
  50. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/providers/anthropic.py +0 -0
  51. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/providers/openai.py +0 -0
  52. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/providers/openai_compat.py +0 -0
  53. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/routing/__init__.py +0 -0
  54. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/__init__.py +0 -0
  55. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/middleware/__init__.py +0 -0
  56. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/middleware/auth.py +0 -0
  57. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/oauth_sessions.py +0 -0
  58. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/routes/admin.py +0 -0
  59. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/schemas/admin.py +0 -0
  60. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/schemas/anthropic.py +0 -0
  61. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/schemas/openai.py +0 -0
  62. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/server/translation.py +0 -0
  63. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/utils/__init__.py +0 -0
  64. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/utils/logging.py +0 -0
  65. {router_maestro-0.1.6 → router_maestro-0.1.7}/src/router_maestro/utils/tokens.py +0 -0
  66. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/__init__.py +0 -0
  67. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_anthropic_models.py +0 -0
  68. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_auth.py +0 -0
  69. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_config.py +0 -0
  70. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_providers.py +0 -0
  71. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_router.py +0 -0
  72. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_translation.py +0 -0
  73. {router_maestro-0.1.6 → router_maestro-0.1.7}/tests/test_utils.py +0 -0
@@ -51,8 +51,8 @@ Router-Maestro is a multi-model routing system that exposes both OpenAI-compatib
51
51
 
52
52
  **Server (`src/router_maestro/server/`)**
53
53
  - FastAPI application with two API flavors:
54
- - OpenAI-compatible: `/v1/chat/completions`, `/v1/models`
55
- - Anthropic-compatible: `/v1/messages`, `/api/anthropic/v1/messages`
54
+ - OpenAI-compatible: `/api/openai/v1/chat/completions`, `/api/openai/v1/models`
55
+ - Anthropic-compatible: `/api/anthropic/v1/messages`
56
56
  - `translation.py` - Converts between Anthropic and OpenAI request/response formats
57
57
  - `schemas/` - Pydantic models for both API formats
58
58
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: router-maestro
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Multi-model routing and load balancing system with OpenAI-compatible API
5
5
  Author-email: Kanwen Li <likanwen@icloud.com>
6
6
  License-Expression: MIT
@@ -26,6 +26,7 @@ Requires-Dist: pydantic>=2.5.0
26
26
  Requires-Dist: python-dotenv>=1.0.0
27
27
  Requires-Dist: rich>=13.7.0
28
28
  Requires-Dist: tiktoken>=0.5.0
29
+ Requires-Dist: tomlkit>=0.12.0
29
30
  Requires-Dist: typer>=0.12.0
30
31
  Requires-Dist: uvicorn>=0.27.0
31
32
  Provides-Extra: dev
@@ -118,14 +119,33 @@ router-maestro auth login github-copilot
118
119
  # 3. Authorize "GitHub Copilot Chat"
119
120
  ```
120
121
 
121
- ### 4. Configure Claude Code
122
+ ### 4. Configure Your CLI Tool
123
+
124
+ #### Claude Code
122
125
 
123
126
  ```bash
124
127
  router-maestro config claude-code
125
128
  # Follow the wizard to select models
126
129
  ```
127
130
 
128
- **Done!** Now run `claude` and your requests will route through Router-Maestro.
131
+ #### OpenAI Codex (CLI, Extension, App)
132
+
133
+ ```bash
134
+ router-maestro config codex
135
+ # Follow the wizard to select models
136
+ ```
137
+
138
+ After configuration, set the API key environment variable:
139
+
140
+ ```bash
141
+ # Get your API key
142
+ router-maestro server show-key
143
+
144
+ # Set the environment variable (add to your shell profile)
145
+ export ROUTER_MAESTRO_API_KEY="your-api-key-here"
146
+ ```
147
+
148
+ **Done!** Now run `claude` or `codex` and your requests will route through Router-Maestro.
129
149
 
130
150
  > **For production deployment**, see the [Deployment](#deployment) section.
131
151
 
@@ -258,6 +278,7 @@ router-maestro model list
258
278
  | Command | Description |
259
279
  | -------------------- | ----------------------------- |
260
280
  | `config claude-code` | Generate Claude Code settings |
281
+ | `config codex` | Generate Codex config (CLI/Extension/App) |
261
282
 
262
283
  ## API Reference
263
284
 
@@ -81,14 +81,33 @@ router-maestro auth login github-copilot
81
81
  # 3. Authorize "GitHub Copilot Chat"
82
82
  ```
83
83
 
84
- ### 4. Configure Claude Code
84
+ ### 4. Configure Your CLI Tool
85
+
86
+ #### Claude Code
85
87
 
86
88
  ```bash
87
89
  router-maestro config claude-code
88
90
  # Follow the wizard to select models
89
91
  ```
90
92
 
91
- **Done!** Now run `claude` and your requests will route through Router-Maestro.
93
+ #### OpenAI Codex (CLI, Extension, App)
94
+
95
+ ```bash
96
+ router-maestro config codex
97
+ # Follow the wizard to select models
98
+ ```
99
+
100
+ After configuration, set the API key environment variable:
101
+
102
+ ```bash
103
+ # Get your API key
104
+ router-maestro server show-key
105
+
106
+ # Set the environment variable (add to your shell profile)
107
+ export ROUTER_MAESTRO_API_KEY="your-api-key-here"
108
+ ```
109
+
110
+ **Done!** Now run `claude` or `codex` and your requests will route through Router-Maestro.
92
111
 
93
112
  > **For production deployment**, see the [Deployment](#deployment) section.
94
113
 
@@ -221,6 +240,7 @@ router-maestro model list
221
240
  | Command | Description |
222
241
  | -------------------- | ----------------------------- |
223
242
  | `config claude-code` | Generate Claude Code settings |
243
+ | `config codex` | Generate Codex config (CLI/Extension/App) |
224
244
 
225
245
  ## API Reference
226
246
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "router-maestro"
3
- version = "0.1.6"
3
+ version = "0.1.7"
4
4
  description = "Multi-model routing and load balancing system with OpenAI-compatible API"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -49,6 +49,9 @@ dependencies = [
49
49
  # Utils
50
50
  "aiosqlite>=0.19.0",
51
51
  "python-dotenv>=1.0.0",
52
+
53
+ # TOML writing (for Codex config generation)
54
+ "tomlkit>=0.12.0",
52
55
  ]
53
56
 
54
57
  [project.optional-dependencies]
@@ -1,3 +1,3 @@
1
1
  """Router-Maestro: Multi-model routing and load balancing system."""
2
2
 
3
- __version__ = "0.1.6"
3
+ __version__ = "0.1.7"
@@ -3,9 +3,11 @@
3
3
  import asyncio
4
4
  import json
5
5
  import shutil
6
+ import tomllib
6
7
  from datetime import datetime
7
8
  from pathlib import Path
8
9
 
10
+ import tomlkit
9
11
  import typer
10
12
  from rich.console import Console
11
13
  from rich.panel import Panel
@@ -24,6 +26,10 @@ CLI_TOOLS = {
24
26
  "name": "Claude Code",
25
27
  "description": "Generate settings.json for Claude Code CLI",
26
28
  },
29
+ "codex": {
30
+ "name": "OpenAI Codex",
31
+ "description": "Generate config.toml for OpenAI Codex CLI",
32
+ },
27
33
  }
28
34
 
29
35
 
@@ -35,6 +41,14 @@ def get_claude_code_paths() -> dict[str, Path]:
35
41
  }
36
42
 
37
43
 
44
+ def get_codex_paths() -> dict[str, Path]:
45
+ """Get Codex config paths."""
46
+ return {
47
+ "user": Path.home() / ".codex" / "config.toml",
48
+ "project": Path.cwd() / ".codex" / "config.toml",
49
+ }
50
+
51
+
38
52
  @app.callback(invoke_without_command=True)
39
53
  def config_callback(ctx: typer.Context) -> None:
40
54
  """Generate configuration for CLI tools (interactive selection if not specified)."""
@@ -60,6 +74,8 @@ def config_callback(ctx: typer.Context) -> None:
60
74
  # Dispatch to the appropriate command
61
75
  if tool_key == "claude-code":
62
76
  claude_code_config()
77
+ elif tool_key == "codex":
78
+ codex_config()
63
79
 
64
80
 
65
81
  @app.command(name="claude-code")
@@ -175,3 +191,113 @@ def claude_code_config() -> None:
175
191
  border_style="green",
176
192
  )
177
193
  )
194
+
195
+
196
+ @app.command(name="codex")
197
+ def codex_config() -> None:
198
+ """Generate OpenAI Codex CLI config.toml for router-maestro."""
199
+ # Step 1: Select level
200
+ console.print("\n[bold]Step 1: Select configuration level[/bold]")
201
+ console.print(" 1. User-level (~/.codex/config.toml)")
202
+ console.print(" 2. Project-level (./.codex/config.toml)")
203
+ choice = Prompt.ask("Select", choices=["1", "2"], default="1")
204
+
205
+ paths = get_codex_paths()
206
+ level = "user" if choice == "1" else "project"
207
+ config_path = paths[level]
208
+
209
+ # Step 2: Backup if exists
210
+ if config_path.exists():
211
+ console.print(f"\n[yellow]config.toml already exists at {config_path}[/yellow]")
212
+ if Confirm.ask("Backup existing file?", default=True):
213
+ backup_path = config_path.with_suffix(
214
+ f".toml.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}"
215
+ )
216
+ shutil.copy(config_path, backup_path)
217
+ console.print(f"[green]Backed up to {backup_path}[/green]")
218
+
219
+ # Step 3: Get models from server
220
+ try:
221
+ client = get_admin_client()
222
+ models = asyncio.run(client.list_models())
223
+ except ServerNotRunningError as e:
224
+ console.print(f"[red]{e}[/red]")
225
+ console.print("[dim]Tip: Start router-maestro server first.[/dim]")
226
+ raise typer.Exit(1)
227
+ except Exception as e:
228
+ console.print(f"[red]Error: {e}[/red]")
229
+ raise typer.Exit(1)
230
+
231
+ if not models:
232
+ console.print("[red]No models available. Please authenticate first.[/red]")
233
+ raise typer.Exit(1)
234
+
235
+ # Display models
236
+ console.print("\n[bold]Available models:[/bold]")
237
+ table = Table()
238
+ table.add_column("#", style="dim")
239
+ table.add_column("Model Key", style="green")
240
+ table.add_column("Name", style="white")
241
+ for i, model in enumerate(models, 1):
242
+ table.add_row(str(i), f"{model['provider']}/{model['id']}", model["name"])
243
+ console.print(table)
244
+
245
+ # Select model
246
+ console.print("\n[bold]Step 2: Select model[/bold]")
247
+ model_choice = Prompt.ask("Enter number (or 0 for auto-routing)", default="0")
248
+ selected_model = "router-maestro"
249
+ if model_choice != "0" and model_choice.isdigit():
250
+ idx = int(model_choice) - 1
251
+ if 0 <= idx < len(models):
252
+ m = models[idx]
253
+ selected_model = f"{m['provider']}/{m['id']}"
254
+
255
+ # Step 4: Generate config
256
+ client = get_admin_client()
257
+ base_url = (
258
+ client.endpoint.rstrip("/") if hasattr(client, "endpoint") else "http://localhost:8080"
259
+ )
260
+ openai_url = f"{base_url}/api/openai/v1"
261
+
262
+ # Load existing config to preserve other sections
263
+ existing_config: tomlkit.TOMLDocument = tomlkit.document()
264
+ if config_path.exists():
265
+ try:
266
+ with open(config_path, "rb") as f:
267
+ existing_config = tomlkit.load(f)
268
+ except (tomllib.TOMLDecodeError, OSError):
269
+ pass # If file is corrupted, start fresh
270
+
271
+ # Update configuration
272
+ existing_config["model"] = selected_model
273
+ existing_config["model_provider"] = "router-maestro"
274
+
275
+ # Create or update model_providers section
276
+ if "model_providers" not in existing_config:
277
+ existing_config["model_providers"] = tomlkit.table()
278
+
279
+ provider_config = tomlkit.table()
280
+ provider_config["name"] = "Router Maestro"
281
+ provider_config["base_url"] = openai_url
282
+ provider_config["env_key"] = "ROUTER_MAESTRO_API_KEY"
283
+ provider_config["wire_api"] = "responses"
284
+ existing_config["model_providers"]["router-maestro"] = provider_config
285
+
286
+ # Write config
287
+ config_path.parent.mkdir(parents=True, exist_ok=True)
288
+ with open(config_path, "w", encoding="utf-8") as f:
289
+ f.write(tomlkit.dumps(existing_config))
290
+
291
+ console.print(
292
+ Panel(
293
+ f"[green]Created {config_path}[/green]\n\n"
294
+ f"Model: {selected_model}\n\n"
295
+ f"Endpoint: {openai_url}\n\n"
296
+ "[dim]Start router-maestro server before using Codex:[/dim]\n"
297
+ " router-maestro server start\n\n"
298
+ "[dim]Set API key environment variable (optional):[/dim]\n"
299
+ " export ROUTER_MAESTRO_API_KEY=your-key",
300
+ title="Success",
301
+ border_style="green",
302
+ )
303
+ )
@@ -9,6 +9,10 @@ from router_maestro.providers.base import (
9
9
  Message,
10
10
  ModelInfo,
11
11
  ProviderError,
12
+ ResponsesRequest,
13
+ ResponsesResponse,
14
+ ResponsesStreamChunk,
15
+ ResponsesToolCall,
12
16
  )
13
17
  from router_maestro.providers.copilot import CopilotProvider
14
18
  from router_maestro.providers.openai import OpenAIProvider
@@ -23,6 +27,10 @@ __all__ = [
23
27
  "ChatResponse",
24
28
  "ChatStreamChunk",
25
29
  "ModelInfo",
30
+ "ResponsesRequest",
31
+ "ResponsesResponse",
32
+ "ResponsesStreamChunk",
33
+ "ResponsesToolCall",
26
34
  # Providers
27
35
  "CopilotProvider",
28
36
  "OpenAIProvider",
@@ -59,6 +59,53 @@ class ModelInfo:
59
59
  provider: str
60
60
 
61
61
 
62
+ @dataclass
63
+ class ResponsesToolCall:
64
+ """A tool/function call from the Responses API."""
65
+
66
+ call_id: str
67
+ name: str
68
+ arguments: str
69
+
70
+
71
+ @dataclass
72
+ class ResponsesRequest:
73
+ """Request for the Responses API (used by Codex models)."""
74
+
75
+ model: str
76
+ input: str | list # Can be string or list of message dicts
77
+ stream: bool = False
78
+ instructions: str | None = None
79
+ temperature: float = 1.0
80
+ max_output_tokens: int | None = None
81
+ # Tool support
82
+ tools: list[dict] | None = None
83
+ tool_choice: str | dict | None = None
84
+ parallel_tool_calls: bool | None = None
85
+
86
+
87
+ @dataclass
88
+ class ResponsesResponse:
89
+ """Response from the Responses API."""
90
+
91
+ content: str
92
+ model: str
93
+ usage: dict | None = None
94
+ tool_calls: list[ResponsesToolCall] | None = None
95
+
96
+
97
+ @dataclass
98
+ class ResponsesStreamChunk:
99
+ """A chunk from streaming Responses API completion."""
100
+
101
+ content: str
102
+ finish_reason: str | None = None
103
+ usage: dict | None = None
104
+ # Tool call support
105
+ tool_call: ResponsesToolCall | None = None # A complete tool call
106
+ tool_call_delta: dict | None = None # Partial tool call for streaming
107
+
108
+
62
109
  class ProviderError(Exception):
63
110
  """Error from a provider."""
64
111
 
@@ -121,3 +168,36 @@ class BaseProvider(ABC):
121
168
  Override this for providers that need token refresh.
122
169
  """
123
170
  pass
171
+
172
+ async def responses_completion(self, request: ResponsesRequest) -> ResponsesResponse:
173
+ """Generate a Responses API completion (for Codex models).
174
+
175
+ Args:
176
+ request: Responses completion request
177
+
178
+ Returns:
179
+ Responses completion response
180
+
181
+ Raises:
182
+ NotImplementedError: If provider does not support Responses API
183
+ """
184
+ raise NotImplementedError("Provider does not support Responses API")
185
+
186
+ async def responses_completion_stream(
187
+ self, request: ResponsesRequest
188
+ ) -> AsyncIterator[ResponsesStreamChunk]:
189
+ """Generate a streaming Responses API completion (for Codex models).
190
+
191
+ Args:
192
+ request: Responses completion request
193
+
194
+ Yields:
195
+ Responses completion chunks
196
+
197
+ Raises:
198
+ NotImplementedError: If provider does not support Responses API
199
+ """
200
+ raise NotImplementedError("Provider does not support Responses API")
201
+ # Make this a generator (required for type checking)
202
+ if False:
203
+ yield ResponsesStreamChunk(content="")