router-maestro 0.1.5__tar.gz → 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. router_maestro-0.1.7/.github/workflows/ci.yml +55 -0
  2. router_maestro-0.1.7/.github/workflows/release.yml +167 -0
  3. {router_maestro-0.1.5 → router_maestro-0.1.7}/.gitignore +11 -0
  4. {router_maestro-0.1.5 → router_maestro-0.1.7}/CLAUDE.md +2 -2
  5. {router_maestro-0.1.5 → router_maestro-0.1.7}/PKG-INFO +27 -3
  6. {router_maestro-0.1.5 → router_maestro-0.1.7}/README.md +25 -2
  7. {router_maestro-0.1.5 → router_maestro-0.1.7}/pyproject.toml +4 -1
  8. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/__init__.py +1 -1
  9. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/config.py +126 -0
  10. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/providers/__init__.py +8 -0
  11. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/providers/base.py +80 -0
  12. router_maestro-0.1.7/src/router_maestro/providers/copilot.py +667 -0
  13. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/routing/router.py +158 -2
  14. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/app.py +8 -1
  15. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/routes/__init__.py +8 -1
  16. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/routes/anthropic.py +172 -0
  17. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/routes/chat.py +1 -2
  18. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/routes/models.py +1 -2
  19. router_maestro-0.1.7/src/router_maestro/server/routes/responses.py +517 -0
  20. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/schemas/__init__.py +33 -0
  21. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/schemas/anthropic.py +21 -0
  22. router_maestro-0.1.7/src/router_maestro/server/schemas/responses.py +214 -0
  23. router_maestro-0.1.7/tests/test_anthropic_models.py +220 -0
  24. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_auth.py +8 -7
  25. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_providers.py +0 -2
  26. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_router.py +1 -3
  27. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_translation.py +5 -7
  28. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_utils.py +0 -2
  29. {router_maestro-0.1.5 → router_maestro-0.1.7}/uv.lock +12 -1
  30. router_maestro-0.1.5/src/router_maestro/providers/copilot.py +0 -346
  31. {router_maestro-0.1.5 → router_maestro-0.1.7}/.env.example +0 -0
  32. {router_maestro-0.1.5 → router_maestro-0.1.7}/.markdownlint.json +0 -0
  33. {router_maestro-0.1.5 → router_maestro-0.1.7}/Dockerfile +0 -0
  34. {router_maestro-0.1.5 → router_maestro-0.1.7}/LICENSE +0 -0
  35. {router_maestro-0.1.5 → router_maestro-0.1.7}/Makefile +0 -0
  36. {router_maestro-0.1.5 → router_maestro-0.1.7}/docker-compose.yml +0 -0
  37. {router_maestro-0.1.5 → router_maestro-0.1.7}/docs/deployment.md +0 -0
  38. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/__main__.py +0 -0
  39. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/auth/__init__.py +0 -0
  40. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/auth/github_oauth.py +0 -0
  41. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/auth/manager.py +0 -0
  42. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/auth/storage.py +0 -0
  43. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/__init__.py +0 -0
  44. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/auth.py +0 -0
  45. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/client.py +0 -0
  46. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/context.py +0 -0
  47. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/main.py +0 -0
  48. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/model.py +0 -0
  49. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/cli/server.py +0 -0
  50. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/__init__.py +0 -0
  51. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/contexts.py +0 -0
  52. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/paths.py +0 -0
  53. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/priorities.py +0 -0
  54. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/providers.py +0 -0
  55. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/server.py +0 -0
  56. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/config/settings.py +0 -0
  57. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/providers/anthropic.py +0 -0
  58. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/providers/openai.py +0 -0
  59. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/providers/openai_compat.py +0 -0
  60. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/routing/__init__.py +0 -0
  61. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/__init__.py +0 -0
  62. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/middleware/__init__.py +0 -0
  63. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/middleware/auth.py +0 -0
  64. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/oauth_sessions.py +0 -0
  65. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/routes/admin.py +0 -0
  66. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/schemas/admin.py +0 -0
  67. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/schemas/openai.py +0 -0
  68. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/server/translation.py +0 -0
  69. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/utils/__init__.py +0 -0
  70. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/utils/logging.py +0 -0
  71. {router_maestro-0.1.5 → router_maestro-0.1.7}/src/router_maestro/utils/tokens.py +0 -0
  72. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/__init__.py +0 -0
  73. {router_maestro-0.1.5 → router_maestro-0.1.7}/tests/test_config.py +0 -0
@@ -0,0 +1,55 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [master, main]
6
+ pull_request:
7
+ branches: [master, main]
8
+
9
+ concurrency:
10
+ group: ${{ github.workflow }}-${{ github.ref }}
11
+ cancel-in-progress: true
12
+
13
+ jobs:
14
+ lint:
15
+ name: Lint
16
+ runs-on: ubuntu-latest
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+
20
+ - name: Install uv
21
+ uses: astral-sh/setup-uv@v5
22
+
23
+ - name: Set up Python
24
+ run: uv python install 3.12
25
+
26
+ - name: Install dependencies
27
+ run: uv sync --extra dev
28
+
29
+ - name: Run ruff check
30
+ run: uv run ruff check src/ tests/
31
+
32
+ - name: Run ruff format check
33
+ run: uv run ruff format --check src/ tests/
34
+
35
+ test:
36
+ name: Test (Python ${{ matrix.python-version }})
37
+ runs-on: ubuntu-latest
38
+ strategy:
39
+ fail-fast: false
40
+ matrix:
41
+ python-version: ["3.11", "3.12"]
42
+ steps:
43
+ - uses: actions/checkout@v4
44
+
45
+ - name: Install uv
46
+ uses: astral-sh/setup-uv@v5
47
+
48
+ - name: Set up Python ${{ matrix.python-version }}
49
+ run: uv python install ${{ matrix.python-version }}
50
+
51
+ - name: Install dependencies
52
+ run: uv sync --extra dev
53
+
54
+ - name: Run tests
55
+ run: uv run pytest tests/ -v
@@ -0,0 +1,167 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*.*.*"
7
+ workflow_dispatch:
8
+ inputs:
9
+ version:
10
+ description: "Version to release (e.g., 0.1.6). Leave empty to use pyproject.toml version."
11
+ required: false
12
+ type: string
13
+ skip_pypi:
14
+ description: "Skip PyPI publishing"
15
+ required: false
16
+ type: boolean
17
+ default: false
18
+ skip_docker:
19
+ description: "Skip Docker publishing"
20
+ required: false
21
+ type: boolean
22
+ default: false
23
+
24
+ concurrency:
25
+ group: release
26
+ cancel-in-progress: false
27
+
28
+ jobs:
29
+ test:
30
+ name: Test
31
+ runs-on: ubuntu-latest
32
+ steps:
33
+ - uses: actions/checkout@v4
34
+
35
+ - name: Install uv
36
+ uses: astral-sh/setup-uv@v5
37
+
38
+ - name: Set up Python
39
+ run: uv python install 3.12
40
+
41
+ - name: Install dependencies
42
+ run: uv sync --extra dev
43
+
44
+ - name: Run ruff check
45
+ run: uv run ruff check src/ tests/
46
+
47
+ - name: Run tests
48
+ run: uv run pytest tests/ -v
49
+
50
+ prepare:
51
+ name: Prepare Release
52
+ runs-on: ubuntu-latest
53
+ needs: test
54
+ outputs:
55
+ version: ${{ steps.version.outputs.version }}
56
+ docker_tags: ${{ steps.docker.outputs.tags }}
57
+ steps:
58
+ - uses: actions/checkout@v4
59
+
60
+ - name: Determine version
61
+ id: version
62
+ run: |
63
+ if [[ "${{ github.event_name }}" == "push" && "${{ github.ref_type }}" == "tag" ]]; then
64
+ # Extract version from tag (v1.2.3 -> 1.2.3)
65
+ VERSION="${GITHUB_REF_NAME#v}"
66
+ elif [[ -n "${{ inputs.version }}" ]]; then
67
+ # Use workflow_dispatch input
68
+ VERSION="${{ inputs.version }}"
69
+ else
70
+ # Read from pyproject.toml
71
+ VERSION=$(grep -Po '(?<=^version = ")[^"]*' pyproject.toml)
72
+ fi
73
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
74
+ echo "Determined version: $VERSION"
75
+
76
+ - name: Prepare Docker tags
77
+ id: docker
78
+ run: |
79
+ VERSION="${{ steps.version.outputs.version }}"
80
+ TAGS="likanwen/router-maestro:${VERSION},likanwen/router-maestro:latest"
81
+ echo "tags=$TAGS" >> $GITHUB_OUTPUT
82
+ echo "Docker tags: $TAGS"
83
+
84
+ publish-pypi:
85
+ name: Publish to PyPI
86
+ runs-on: ubuntu-latest
87
+ needs: prepare
88
+ if: ${{ !inputs.skip_pypi }}
89
+ environment:
90
+ name: pypi
91
+ url: https://pypi.org/project/router-maestro/
92
+ permissions:
93
+ id-token: write
94
+ steps:
95
+ - uses: actions/checkout@v4
96
+
97
+ - name: Install uv
98
+ uses: astral-sh/setup-uv@v5
99
+
100
+ - name: Set up Python
101
+ run: uv python install 3.12
102
+
103
+ - name: Build package
104
+ run: uv build
105
+
106
+ - name: Verify build version
107
+ run: |
108
+ EXPECTED_VERSION="${{ needs.prepare.outputs.version }}"
109
+ BUILT_VERSION=$(ls dist/*.tar.gz | grep -Po 'router_maestro-\K[0-9]+\.[0-9]+\.[0-9]+')
110
+ if [[ "$BUILT_VERSION" != "$EXPECTED_VERSION" ]]; then
111
+ echo "Version mismatch! Expected: $EXPECTED_VERSION, Built: $BUILT_VERSION"
112
+ exit 1
113
+ fi
114
+ echo "Version verified: $BUILT_VERSION"
115
+
116
+ - name: Publish to PyPI
117
+ uses: pypa/gh-action-pypi-publish@release/v1
118
+ with:
119
+ verbose: true
120
+
121
+ publish-docker:
122
+ name: Publish Docker Image
123
+ runs-on: ubuntu-latest
124
+ needs: prepare
125
+ if: ${{ !inputs.skip_docker }}
126
+ steps:
127
+ - uses: actions/checkout@v4
128
+
129
+ - name: Set up QEMU
130
+ uses: docker/setup-qemu-action@v3
131
+
132
+ - name: Set up Docker Buildx
133
+ uses: docker/setup-buildx-action@v3
134
+
135
+ - name: Login to Docker Hub
136
+ uses: docker/login-action@v3
137
+ with:
138
+ username: ${{ secrets.DOCKER_USERNAME }}
139
+ password: ${{ secrets.DOCKER_TOKEN }}
140
+
141
+ - name: Build and push
142
+ uses: docker/build-push-action@v6
143
+ with:
144
+ context: .
145
+ platforms: linux/amd64,linux/arm64
146
+ push: true
147
+ tags: ${{ needs.prepare.outputs.docker_tags }}
148
+ cache-from: type=gha
149
+ cache-to: type=gha,mode=max
150
+
151
+ create-release:
152
+ name: Create GitHub Release
153
+ runs-on: ubuntu-latest
154
+ needs: [prepare, publish-pypi, publish-docker]
155
+ if: ${{ github.event_name == 'push' && github.ref_type == 'tag' }}
156
+ permissions:
157
+ contents: write
158
+ steps:
159
+ - uses: actions/checkout@v4
160
+
161
+ - name: Create GitHub Release
162
+ uses: softprops/action-gh-release@v2
163
+ with:
164
+ name: v${{ needs.prepare.outputs.version }}
165
+ generate_release_notes: true
166
+ draft: false
167
+ prerelease: false
@@ -101,3 +101,14 @@ dmypy.json
101
101
  *.tmp
102
102
  *.bak
103
103
  *~
104
+
105
+ # Auto Claude data directory
106
+ .auto-claude/
107
+
108
+ # Auto Claude generated files
109
+ .auto-claude-security.json
110
+ .auto-claude-status
111
+ .claude_settings.json
112
+ .worktrees/
113
+ .security-key
114
+ logs/security/
@@ -51,8 +51,8 @@ Router-Maestro is a multi-model routing system that exposes both OpenAI-compatib
51
51
 
52
52
  **Server (`src/router_maestro/server/`)**
53
53
  - FastAPI application with two API flavors:
54
- - OpenAI-compatible: `/v1/chat/completions`, `/v1/models`
55
- - Anthropic-compatible: `/v1/messages`, `/api/anthropic/v1/messages`
54
+ - OpenAI-compatible: `/api/openai/v1/chat/completions`, `/api/openai/v1/models`
55
+ - Anthropic-compatible: `/api/anthropic/v1/messages`
56
56
  - `translation.py` - Converts between Anthropic and OpenAI request/response formats
57
57
  - `schemas/` - Pydantic models for both API formats
58
58
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: router-maestro
3
- Version: 0.1.5
3
+ Version: 0.1.7
4
4
  Summary: Multi-model routing and load balancing system with OpenAI-compatible API
5
5
  Author-email: Kanwen Li <likanwen@icloud.com>
6
6
  License-Expression: MIT
@@ -26,6 +26,7 @@ Requires-Dist: pydantic>=2.5.0
26
26
  Requires-Dist: python-dotenv>=1.0.0
27
27
  Requires-Dist: rich>=13.7.0
28
28
  Requires-Dist: tiktoken>=0.5.0
29
+ Requires-Dist: tomlkit>=0.12.0
29
30
  Requires-Dist: typer>=0.12.0
30
31
  Requires-Dist: uvicorn>=0.27.0
31
32
  Provides-Extra: dev
@@ -37,6 +38,9 @@ Description-Content-Type: text/markdown
37
38
 
38
39
  # Router-Maestro
39
40
 
41
+ [![CI](https://github.com/MadSkittles/Router-Maestro/actions/workflows/ci.yml/badge.svg)](https://github.com/MadSkittles/Router-Maestro/actions/workflows/ci.yml)
42
+ [![Release](https://github.com/MadSkittles/Router-Maestro/actions/workflows/release.yml/badge.svg)](https://github.com/MadSkittles/Router-Maestro/actions/workflows/release.yml)
43
+
40
44
  Multi-model routing router with OpenAI-compatible and Anthropic-compatible APIs. Route LLM requests across GitHub Copilot, OpenAI, Anthropic, and custom providers with intelligent fallback and priority-based selection.
41
45
 
42
46
  ## TL;DR
@@ -115,14 +119,33 @@ router-maestro auth login github-copilot
115
119
  # 3. Authorize "GitHub Copilot Chat"
116
120
  ```
117
121
 
118
- ### 4. Configure Claude Code
122
+ ### 4. Configure Your CLI Tool
123
+
124
+ #### Claude Code
119
125
 
120
126
  ```bash
121
127
  router-maestro config claude-code
122
128
  # Follow the wizard to select models
123
129
  ```
124
130
 
125
- **Done!** Now run `claude` and your requests will route through Router-Maestro.
131
+ #### OpenAI Codex (CLI, Extension, App)
132
+
133
+ ```bash
134
+ router-maestro config codex
135
+ # Follow the wizard to select models
136
+ ```
137
+
138
+ After configuration, set the API key environment variable:
139
+
140
+ ```bash
141
+ # Get your API key
142
+ router-maestro server show-key
143
+
144
+ # Set the environment variable (add to your shell profile)
145
+ export ROUTER_MAESTRO_API_KEY="your-api-key-here"
146
+ ```
147
+
148
+ **Done!** Now run `claude` or `codex` and your requests will route through Router-Maestro.
126
149
 
127
150
  > **For production deployment**, see the [Deployment](#deployment) section.
128
151
 
@@ -255,6 +278,7 @@ router-maestro model list
255
278
  | Command | Description |
256
279
  | -------------------- | ----------------------------- |
257
280
  | `config claude-code` | Generate Claude Code settings |
281
+ | `config codex` | Generate Codex config (CLI/Extension/App) |
258
282
 
259
283
  ## API Reference
260
284
 
@@ -1,5 +1,8 @@
1
1
  # Router-Maestro
2
2
 
3
+ [![CI](https://github.com/MadSkittles/Router-Maestro/actions/workflows/ci.yml/badge.svg)](https://github.com/MadSkittles/Router-Maestro/actions/workflows/ci.yml)
4
+ [![Release](https://github.com/MadSkittles/Router-Maestro/actions/workflows/release.yml/badge.svg)](https://github.com/MadSkittles/Router-Maestro/actions/workflows/release.yml)
5
+
3
6
  Multi-model routing router with OpenAI-compatible and Anthropic-compatible APIs. Route LLM requests across GitHub Copilot, OpenAI, Anthropic, and custom providers with intelligent fallback and priority-based selection.
4
7
 
5
8
  ## TL;DR
@@ -78,14 +81,33 @@ router-maestro auth login github-copilot
78
81
  # 3. Authorize "GitHub Copilot Chat"
79
82
  ```
80
83
 
81
- ### 4. Configure Claude Code
84
+ ### 4. Configure Your CLI Tool
85
+
86
+ #### Claude Code
82
87
 
83
88
  ```bash
84
89
  router-maestro config claude-code
85
90
  # Follow the wizard to select models
86
91
  ```
87
92
 
88
- **Done!** Now run `claude` and your requests will route through Router-Maestro.
93
+ #### OpenAI Codex (CLI, Extension, App)
94
+
95
+ ```bash
96
+ router-maestro config codex
97
+ # Follow the wizard to select models
98
+ ```
99
+
100
+ After configuration, set the API key environment variable:
101
+
102
+ ```bash
103
+ # Get your API key
104
+ router-maestro server show-key
105
+
106
+ # Set the environment variable (add to your shell profile)
107
+ export ROUTER_MAESTRO_API_KEY="your-api-key-here"
108
+ ```
109
+
110
+ **Done!** Now run `claude` or `codex` and your requests will route through Router-Maestro.
89
111
 
90
112
  > **For production deployment**, see the [Deployment](#deployment) section.
91
113
 
@@ -218,6 +240,7 @@ router-maestro model list
218
240
  | Command | Description |
219
241
  | -------------------- | ----------------------------- |
220
242
  | `config claude-code` | Generate Claude Code settings |
243
+ | `config codex` | Generate Codex config (CLI/Extension/App) |
221
244
 
222
245
  ## API Reference
223
246
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "router-maestro"
3
- version = "0.1.5"
3
+ version = "0.1.7"
4
4
  description = "Multi-model routing and load balancing system with OpenAI-compatible API"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -49,6 +49,9 @@ dependencies = [
49
49
  # Utils
50
50
  "aiosqlite>=0.19.0",
51
51
  "python-dotenv>=1.0.0",
52
+
53
+ # TOML writing (for Codex config generation)
54
+ "tomlkit>=0.12.0",
52
55
  ]
53
56
 
54
57
  [project.optional-dependencies]
@@ -1,3 +1,3 @@
1
1
  """Router-Maestro: Multi-model routing and load balancing system."""
2
2
 
3
- __version__ = "0.1.5"
3
+ __version__ = "0.1.7"
@@ -3,9 +3,11 @@
3
3
  import asyncio
4
4
  import json
5
5
  import shutil
6
+ import tomllib
6
7
  from datetime import datetime
7
8
  from pathlib import Path
8
9
 
10
+ import tomlkit
9
11
  import typer
10
12
  from rich.console import Console
11
13
  from rich.panel import Panel
@@ -24,6 +26,10 @@ CLI_TOOLS = {
24
26
  "name": "Claude Code",
25
27
  "description": "Generate settings.json for Claude Code CLI",
26
28
  },
29
+ "codex": {
30
+ "name": "OpenAI Codex",
31
+ "description": "Generate config.toml for OpenAI Codex CLI",
32
+ },
27
33
  }
28
34
 
29
35
 
@@ -35,6 +41,14 @@ def get_claude_code_paths() -> dict[str, Path]:
35
41
  }
36
42
 
37
43
 
44
+ def get_codex_paths() -> dict[str, Path]:
45
+ """Get Codex config paths."""
46
+ return {
47
+ "user": Path.home() / ".codex" / "config.toml",
48
+ "project": Path.cwd() / ".codex" / "config.toml",
49
+ }
50
+
51
+
38
52
  @app.callback(invoke_without_command=True)
39
53
  def config_callback(ctx: typer.Context) -> None:
40
54
  """Generate configuration for CLI tools (interactive selection if not specified)."""
@@ -60,6 +74,8 @@ def config_callback(ctx: typer.Context) -> None:
60
74
  # Dispatch to the appropriate command
61
75
  if tool_key == "claude-code":
62
76
  claude_code_config()
77
+ elif tool_key == "codex":
78
+ codex_config()
63
79
 
64
80
 
65
81
  @app.command(name="claude-code")
@@ -175,3 +191,113 @@ def claude_code_config() -> None:
175
191
  border_style="green",
176
192
  )
177
193
  )
194
+
195
+
196
+ @app.command(name="codex")
197
+ def codex_config() -> None:
198
+ """Generate OpenAI Codex CLI config.toml for router-maestro."""
199
+ # Step 1: Select level
200
+ console.print("\n[bold]Step 1: Select configuration level[/bold]")
201
+ console.print(" 1. User-level (~/.codex/config.toml)")
202
+ console.print(" 2. Project-level (./.codex/config.toml)")
203
+ choice = Prompt.ask("Select", choices=["1", "2"], default="1")
204
+
205
+ paths = get_codex_paths()
206
+ level = "user" if choice == "1" else "project"
207
+ config_path = paths[level]
208
+
209
+ # Step 2: Backup if exists
210
+ if config_path.exists():
211
+ console.print(f"\n[yellow]config.toml already exists at {config_path}[/yellow]")
212
+ if Confirm.ask("Backup existing file?", default=True):
213
+ backup_path = config_path.with_suffix(
214
+ f".toml.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}"
215
+ )
216
+ shutil.copy(config_path, backup_path)
217
+ console.print(f"[green]Backed up to {backup_path}[/green]")
218
+
219
+ # Step 3: Get models from server
220
+ try:
221
+ client = get_admin_client()
222
+ models = asyncio.run(client.list_models())
223
+ except ServerNotRunningError as e:
224
+ console.print(f"[red]{e}[/red]")
225
+ console.print("[dim]Tip: Start router-maestro server first.[/dim]")
226
+ raise typer.Exit(1)
227
+ except Exception as e:
228
+ console.print(f"[red]Error: {e}[/red]")
229
+ raise typer.Exit(1)
230
+
231
+ if not models:
232
+ console.print("[red]No models available. Please authenticate first.[/red]")
233
+ raise typer.Exit(1)
234
+
235
+ # Display models
236
+ console.print("\n[bold]Available models:[/bold]")
237
+ table = Table()
238
+ table.add_column("#", style="dim")
239
+ table.add_column("Model Key", style="green")
240
+ table.add_column("Name", style="white")
241
+ for i, model in enumerate(models, 1):
242
+ table.add_row(str(i), f"{model['provider']}/{model['id']}", model["name"])
243
+ console.print(table)
244
+
245
+ # Select model
246
+ console.print("\n[bold]Step 2: Select model[/bold]")
247
+ model_choice = Prompt.ask("Enter number (or 0 for auto-routing)", default="0")
248
+ selected_model = "router-maestro"
249
+ if model_choice != "0" and model_choice.isdigit():
250
+ idx = int(model_choice) - 1
251
+ if 0 <= idx < len(models):
252
+ m = models[idx]
253
+ selected_model = f"{m['provider']}/{m['id']}"
254
+
255
+ # Step 4: Generate config
256
+ client = get_admin_client()
257
+ base_url = (
258
+ client.endpoint.rstrip("/") if hasattr(client, "endpoint") else "http://localhost:8080"
259
+ )
260
+ openai_url = f"{base_url}/api/openai/v1"
261
+
262
+ # Load existing config to preserve other sections
263
+ existing_config: tomlkit.TOMLDocument = tomlkit.document()
264
+ if config_path.exists():
265
+ try:
266
+ with open(config_path, "rb") as f:
267
+ existing_config = tomlkit.load(f)
268
+ except (tomllib.TOMLDecodeError, OSError):
269
+ pass # If file is corrupted, start fresh
270
+
271
+ # Update configuration
272
+ existing_config["model"] = selected_model
273
+ existing_config["model_provider"] = "router-maestro"
274
+
275
+ # Create or update model_providers section
276
+ if "model_providers" not in existing_config:
277
+ existing_config["model_providers"] = tomlkit.table()
278
+
279
+ provider_config = tomlkit.table()
280
+ provider_config["name"] = "Router Maestro"
281
+ provider_config["base_url"] = openai_url
282
+ provider_config["env_key"] = "ROUTER_MAESTRO_API_KEY"
283
+ provider_config["wire_api"] = "responses"
284
+ existing_config["model_providers"]["router-maestro"] = provider_config
285
+
286
+ # Write config
287
+ config_path.parent.mkdir(parents=True, exist_ok=True)
288
+ with open(config_path, "w", encoding="utf-8") as f:
289
+ f.write(tomlkit.dumps(existing_config))
290
+
291
+ console.print(
292
+ Panel(
293
+ f"[green]Created {config_path}[/green]\n\n"
294
+ f"Model: {selected_model}\n\n"
295
+ f"Endpoint: {openai_url}\n\n"
296
+ "[dim]Start router-maestro server before using Codex:[/dim]\n"
297
+ " router-maestro server start\n\n"
298
+ "[dim]Set API key environment variable (optional):[/dim]\n"
299
+ " export ROUTER_MAESTRO_API_KEY=your-key",
300
+ title="Success",
301
+ border_style="green",
302
+ )
303
+ )
@@ -9,6 +9,10 @@ from router_maestro.providers.base import (
9
9
  Message,
10
10
  ModelInfo,
11
11
  ProviderError,
12
+ ResponsesRequest,
13
+ ResponsesResponse,
14
+ ResponsesStreamChunk,
15
+ ResponsesToolCall,
12
16
  )
13
17
  from router_maestro.providers.copilot import CopilotProvider
14
18
  from router_maestro.providers.openai import OpenAIProvider
@@ -23,6 +27,10 @@ __all__ = [
23
27
  "ChatResponse",
24
28
  "ChatStreamChunk",
25
29
  "ModelInfo",
30
+ "ResponsesRequest",
31
+ "ResponsesResponse",
32
+ "ResponsesStreamChunk",
33
+ "ResponsesToolCall",
26
34
  # Providers
27
35
  "CopilotProvider",
28
36
  "OpenAIProvider",
@@ -59,6 +59,53 @@ class ModelInfo:
59
59
  provider: str
60
60
 
61
61
 
62
+ @dataclass
63
+ class ResponsesToolCall:
64
+ """A tool/function call from the Responses API."""
65
+
66
+ call_id: str
67
+ name: str
68
+ arguments: str
69
+
70
+
71
+ @dataclass
72
+ class ResponsesRequest:
73
+ """Request for the Responses API (used by Codex models)."""
74
+
75
+ model: str
76
+ input: str | list # Can be string or list of message dicts
77
+ stream: bool = False
78
+ instructions: str | None = None
79
+ temperature: float = 1.0
80
+ max_output_tokens: int | None = None
81
+ # Tool support
82
+ tools: list[dict] | None = None
83
+ tool_choice: str | dict | None = None
84
+ parallel_tool_calls: bool | None = None
85
+
86
+
87
+ @dataclass
88
+ class ResponsesResponse:
89
+ """Response from the Responses API."""
90
+
91
+ content: str
92
+ model: str
93
+ usage: dict | None = None
94
+ tool_calls: list[ResponsesToolCall] | None = None
95
+
96
+
97
+ @dataclass
98
+ class ResponsesStreamChunk:
99
+ """A chunk from streaming Responses API completion."""
100
+
101
+ content: str
102
+ finish_reason: str | None = None
103
+ usage: dict | None = None
104
+ # Tool call support
105
+ tool_call: ResponsesToolCall | None = None # A complete tool call
106
+ tool_call_delta: dict | None = None # Partial tool call for streaming
107
+
108
+
62
109
  class ProviderError(Exception):
63
110
  """Error from a provider."""
64
111
 
@@ -121,3 +168,36 @@ class BaseProvider(ABC):
121
168
  Override this for providers that need token refresh.
122
169
  """
123
170
  pass
171
+
172
+ async def responses_completion(self, request: ResponsesRequest) -> ResponsesResponse:
173
+ """Generate a Responses API completion (for Codex models).
174
+
175
+ Args:
176
+ request: Responses completion request
177
+
178
+ Returns:
179
+ Responses completion response
180
+
181
+ Raises:
182
+ NotImplementedError: If provider does not support Responses API
183
+ """
184
+ raise NotImplementedError("Provider does not support Responses API")
185
+
186
+ async def responses_completion_stream(
187
+ self, request: ResponsesRequest
188
+ ) -> AsyncIterator[ResponsesStreamChunk]:
189
+ """Generate a streaming Responses API completion (for Codex models).
190
+
191
+ Args:
192
+ request: Responses completion request
193
+
194
+ Yields:
195
+ Responses completion chunks
196
+
197
+ Raises:
198
+ NotImplementedError: If provider does not support Responses API
199
+ """
200
+ raise NotImplementedError("Provider does not support Responses API")
201
+ # Make this a generator (required for type checking)
202
+ if False:
203
+ yield ResponsesStreamChunk(content="")