router-maestro 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- router_maestro/__init__.py +1 -1
- router_maestro/cli/config.py +19 -9
- router_maestro/server/routes/anthropic.py +99 -5
- router_maestro/server/schemas/anthropic.py +21 -0
- router_maestro/utils/tokens.py +10 -2
- {router_maestro-0.1.4.dist-info → router_maestro-0.1.6.dist-info}/METADATA +4 -1
- {router_maestro-0.1.4.dist-info → router_maestro-0.1.6.dist-info}/RECORD +10 -10
- {router_maestro-0.1.4.dist-info → router_maestro-0.1.6.dist-info}/WHEEL +0 -0
- {router_maestro-0.1.4.dist-info → router_maestro-0.1.6.dist-info}/entry_points.txt +0 -0
- {router_maestro-0.1.4.dist-info → router_maestro-0.1.6.dist-info}/licenses/LICENSE +0 -0
router_maestro/__init__.py
CHANGED
router_maestro/cli/config.py
CHANGED
|
@@ -139,19 +139,29 @@ def claude_code_config() -> None:
|
|
|
139
139
|
)
|
|
140
140
|
anthropic_url = f"{base_url}/api/anthropic"
|
|
141
141
|
|
|
142
|
-
|
|
143
|
-
"
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
"CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC": "1",
|
|
149
|
-
}
|
|
142
|
+
env_config = {
|
|
143
|
+
"ANTHROPIC_BASE_URL": anthropic_url,
|
|
144
|
+
"ANTHROPIC_AUTH_TOKEN": auth_token,
|
|
145
|
+
"ANTHROPIC_MODEL": main_model,
|
|
146
|
+
"ANTHROPIC_SMALL_FAST_MODEL": fast_model,
|
|
147
|
+
"CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC": "1",
|
|
150
148
|
}
|
|
151
149
|
|
|
150
|
+
# Load existing settings to preserve other sections (e.g., MCP servers)
|
|
151
|
+
existing_config: dict = {}
|
|
152
|
+
if settings_path.exists():
|
|
153
|
+
try:
|
|
154
|
+
with open(settings_path, encoding="utf-8") as f:
|
|
155
|
+
existing_config = json.load(f)
|
|
156
|
+
except (json.JSONDecodeError, OSError):
|
|
157
|
+
pass # If file is corrupted, start fresh
|
|
158
|
+
|
|
159
|
+
# Merge: update env section while preserving other sections
|
|
160
|
+
existing_config["env"] = env_config
|
|
161
|
+
|
|
152
162
|
settings_path.parent.mkdir(parents=True, exist_ok=True)
|
|
153
163
|
with open(settings_path, "w", encoding="utf-8") as f:
|
|
154
|
-
json.dump(
|
|
164
|
+
json.dump(existing_config, f, indent=2)
|
|
155
165
|
|
|
156
166
|
console.print(
|
|
157
167
|
Panel(
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
5
|
from collections.abc import AsyncGenerator
|
|
6
|
+
from datetime import UTC, datetime
|
|
6
7
|
|
|
7
8
|
from fastapi import APIRouter, HTTPException
|
|
8
9
|
from fastapi.responses import StreamingResponse
|
|
@@ -13,6 +14,8 @@ from router_maestro.server.schemas.anthropic import (
|
|
|
13
14
|
AnthropicCountTokensRequest,
|
|
14
15
|
AnthropicMessagesRequest,
|
|
15
16
|
AnthropicMessagesResponse,
|
|
17
|
+
AnthropicModelInfo,
|
|
18
|
+
AnthropicModelList,
|
|
16
19
|
AnthropicStreamState,
|
|
17
20
|
AnthropicTextBlock,
|
|
18
21
|
AnthropicUsage,
|
|
@@ -26,6 +29,7 @@ from router_maestro.utils import (
|
|
|
26
29
|
get_logger,
|
|
27
30
|
map_openai_stop_reason_to_anthropic,
|
|
28
31
|
)
|
|
32
|
+
from router_maestro.utils.tokens import AnthropicStopReason
|
|
29
33
|
|
|
30
34
|
logger = get_logger("server.routes.anthropic")
|
|
31
35
|
|
|
@@ -106,7 +110,7 @@ async def count_tokens(request: AnthropicCountTokensRequest):
|
|
|
106
110
|
|
|
107
111
|
# Count messages
|
|
108
112
|
for msg in request.messages:
|
|
109
|
-
content = msg.content
|
|
113
|
+
content = msg.content
|
|
110
114
|
if isinstance(content, str):
|
|
111
115
|
total_chars += len(content)
|
|
112
116
|
elif isinstance(content, list):
|
|
@@ -115,12 +119,12 @@ async def count_tokens(request: AnthropicCountTokensRequest):
|
|
|
115
119
|
if block.get("type") == "text":
|
|
116
120
|
total_chars += len(block.get("text", ""))
|
|
117
121
|
elif hasattr(block, "text"):
|
|
118
|
-
total_chars += len(block.text)
|
|
122
|
+
total_chars += len(block.text) # type: ignore[union-attr]
|
|
119
123
|
|
|
120
124
|
return {"input_tokens": estimate_tokens_from_char_count(total_chars)}
|
|
121
125
|
|
|
122
126
|
|
|
123
|
-
def _map_finish_reason(reason: str | None) ->
|
|
127
|
+
def _map_finish_reason(reason: str | None) -> AnthropicStopReason | None:
|
|
124
128
|
"""Map OpenAI finish reason to Anthropic stop reason."""
|
|
125
129
|
return map_openai_stop_reason_to_anthropic(reason)
|
|
126
130
|
|
|
@@ -144,7 +148,7 @@ def _estimate_input_tokens(request: AnthropicMessagesRequest) -> int:
|
|
|
144
148
|
|
|
145
149
|
# Count messages
|
|
146
150
|
for msg in request.messages:
|
|
147
|
-
content = msg.content
|
|
151
|
+
content = msg.content
|
|
148
152
|
if isinstance(content, str):
|
|
149
153
|
total_chars += len(content)
|
|
150
154
|
elif isinstance(content, list):
|
|
@@ -161,7 +165,7 @@ def _estimate_input_tokens(request: AnthropicMessagesRequest) -> int:
|
|
|
161
165
|
if isinstance(tc, dict) and tc.get("type") == "text":
|
|
162
166
|
total_chars += len(tc.get("text", ""))
|
|
163
167
|
elif hasattr(block, "text"):
|
|
164
|
-
total_chars += len(block.text)
|
|
168
|
+
total_chars += len(block.text) # type: ignore[union-attr]
|
|
165
169
|
|
|
166
170
|
# Count tools definitions if present
|
|
167
171
|
if request.tools:
|
|
@@ -226,3 +230,93 @@ async def stream_response(
|
|
|
226
230
|
},
|
|
227
231
|
}
|
|
228
232
|
yield f"event: error\ndata: {json.dumps(error_event)}\n\n"
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _generate_display_name(model_id: str) -> str:
|
|
236
|
+
"""Generate a human-readable display name from model ID.
|
|
237
|
+
|
|
238
|
+
Transforms model IDs like 'github-copilot/claude-sonnet-4' into
|
|
239
|
+
'Claude Sonnet 4 (github-copilot)'.
|
|
240
|
+
"""
|
|
241
|
+
if "/" in model_id:
|
|
242
|
+
provider, model_name = model_id.split("/", 1)
|
|
243
|
+
else:
|
|
244
|
+
provider = ""
|
|
245
|
+
model_name = model_id
|
|
246
|
+
|
|
247
|
+
# Capitalize words and handle common patterns
|
|
248
|
+
words = model_name.replace("-", " ").replace("_", " ").split()
|
|
249
|
+
display_words = []
|
|
250
|
+
for word in words:
|
|
251
|
+
# Keep version numbers as-is
|
|
252
|
+
if word.replace(".", "").isdigit():
|
|
253
|
+
display_words.append(word)
|
|
254
|
+
else:
|
|
255
|
+
display_words.append(word.capitalize())
|
|
256
|
+
|
|
257
|
+
display_name = " ".join(display_words)
|
|
258
|
+
if provider:
|
|
259
|
+
display_name = f"{display_name} ({provider})"
|
|
260
|
+
|
|
261
|
+
return display_name
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
@router.get("/api/anthropic/v1/models")
|
|
265
|
+
async def list_models(
|
|
266
|
+
limit: int = 20,
|
|
267
|
+
after_id: str | None = None,
|
|
268
|
+
before_id: str | None = None,
|
|
269
|
+
) -> AnthropicModelList:
|
|
270
|
+
"""List available models in Anthropic format.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
limit: Maximum number of models to return (default 20)
|
|
274
|
+
after_id: Return models after this ID (for forward pagination)
|
|
275
|
+
before_id: Return models before this ID (for backward pagination)
|
|
276
|
+
"""
|
|
277
|
+
model_router = get_router()
|
|
278
|
+
models = await model_router.list_models()
|
|
279
|
+
|
|
280
|
+
# Generate ISO 8601 timestamp for created_at
|
|
281
|
+
# Using current time since actual creation dates aren't tracked
|
|
282
|
+
created_at = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
283
|
+
|
|
284
|
+
# Convert to Anthropic format
|
|
285
|
+
anthropic_models = [
|
|
286
|
+
AnthropicModelInfo(
|
|
287
|
+
id=model.id,
|
|
288
|
+
created_at=created_at,
|
|
289
|
+
display_name=_generate_display_name(model.id),
|
|
290
|
+
type="model",
|
|
291
|
+
)
|
|
292
|
+
for model in models
|
|
293
|
+
]
|
|
294
|
+
|
|
295
|
+
# Handle pagination
|
|
296
|
+
start_idx = 0
|
|
297
|
+
if after_id:
|
|
298
|
+
for i, model in enumerate(anthropic_models):
|
|
299
|
+
if model.id == after_id:
|
|
300
|
+
start_idx = i + 1
|
|
301
|
+
break
|
|
302
|
+
|
|
303
|
+
end_idx = len(anthropic_models)
|
|
304
|
+
if before_id:
|
|
305
|
+
for i, model in enumerate(anthropic_models):
|
|
306
|
+
if model.id == before_id:
|
|
307
|
+
end_idx = i
|
|
308
|
+
break
|
|
309
|
+
|
|
310
|
+
# Apply limit
|
|
311
|
+
paginated = anthropic_models[start_idx : min(start_idx + limit, end_idx)]
|
|
312
|
+
|
|
313
|
+
first_id = paginated[0].id if paginated else None
|
|
314
|
+
last_id = paginated[-1].id if paginated else None
|
|
315
|
+
has_more = (start_idx + limit) < end_idx
|
|
316
|
+
|
|
317
|
+
return AnthropicModelList(
|
|
318
|
+
data=paginated,
|
|
319
|
+
first_id=first_id,
|
|
320
|
+
last_id=last_id,
|
|
321
|
+
has_more=has_more,
|
|
322
|
+
)
|
|
@@ -244,3 +244,24 @@ class AnthropicStreamState(BaseModel):
|
|
|
244
244
|
estimated_input_tokens: int = 0 # Estimated input tokens from request
|
|
245
245
|
last_usage: dict | None = None # Track the latest usage from stream chunks
|
|
246
246
|
message_complete: bool = False # Track if message_stop was sent
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# Models API types
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class AnthropicModelInfo(BaseModel):
|
|
253
|
+
"""Anthropic model object."""
|
|
254
|
+
|
|
255
|
+
id: str
|
|
256
|
+
created_at: str # ISO 8601 datetime
|
|
257
|
+
display_name: str
|
|
258
|
+
type: Literal["model"] = "model"
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class AnthropicModelList(BaseModel):
|
|
262
|
+
"""Anthropic models list response with pagination."""
|
|
263
|
+
|
|
264
|
+
data: list[AnthropicModelInfo]
|
|
265
|
+
first_id: str | None = None
|
|
266
|
+
last_id: str | None = None
|
|
267
|
+
has_more: bool = False
|
router_maestro/utils/tokens.py
CHANGED
|
@@ -1,8 +1,14 @@
|
|
|
1
1
|
"""Token estimation utilities."""
|
|
2
2
|
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
3
5
|
# Approximate characters per token for English text
|
|
4
6
|
CHARS_PER_TOKEN = 4
|
|
5
7
|
|
|
8
|
+
AnthropicStopReason = Literal[
|
|
9
|
+
"end_turn", "max_tokens", "stop_sequence", "tool_use", "pause_turn", "refusal"
|
|
10
|
+
]
|
|
11
|
+
|
|
6
12
|
|
|
7
13
|
def estimate_tokens(text: str) -> int:
|
|
8
14
|
"""Estimate token count from text.
|
|
@@ -31,7 +37,9 @@ def estimate_tokens_from_char_count(char_count: int) -> int:
|
|
|
31
37
|
return char_count // CHARS_PER_TOKEN
|
|
32
38
|
|
|
33
39
|
|
|
34
|
-
def map_openai_stop_reason_to_anthropic(
|
|
40
|
+
def map_openai_stop_reason_to_anthropic(
|
|
41
|
+
openai_reason: str | None,
|
|
42
|
+
) -> AnthropicStopReason | None:
|
|
35
43
|
"""Map OpenAI finish reason to Anthropic stop reason.
|
|
36
44
|
|
|
37
45
|
Args:
|
|
@@ -42,7 +50,7 @@ def map_openai_stop_reason_to_anthropic(openai_reason: str | None) -> str | None
|
|
|
42
50
|
"""
|
|
43
51
|
if openai_reason is None:
|
|
44
52
|
return None
|
|
45
|
-
mapping = {
|
|
53
|
+
mapping: dict[str, AnthropicStopReason] = {
|
|
46
54
|
"stop": "end_turn",
|
|
47
55
|
"length": "max_tokens",
|
|
48
56
|
"tool_calls": "tool_use",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: router-maestro
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.6
|
|
4
4
|
Summary: Multi-model routing and load balancing system with OpenAI-compatible API
|
|
5
5
|
Author-email: Kanwen Li <likanwen@icloud.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -37,6 +37,9 @@ Description-Content-Type: text/markdown
|
|
|
37
37
|
|
|
38
38
|
# Router-Maestro
|
|
39
39
|
|
|
40
|
+
[](https://github.com/MadSkittles/Router-Maestro/actions/workflows/ci.yml)
|
|
41
|
+
[](https://github.com/MadSkittles/Router-Maestro/actions/workflows/release.yml)
|
|
42
|
+
|
|
40
43
|
Multi-model routing router with OpenAI-compatible and Anthropic-compatible APIs. Route LLM requests across GitHub Copilot, OpenAI, Anthropic, and custom providers with intelligent fallback and priority-based selection.
|
|
41
44
|
|
|
42
45
|
## TL;DR
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
router_maestro/__init__.py,sha256=
|
|
1
|
+
router_maestro/__init__.py,sha256=HJQ2eE5cH3zVHfskAha2jYgacsEZHU9FmRfQTzraiAE,92
|
|
2
2
|
router_maestro/__main__.py,sha256=cUHr8B7JBiv5HhnN6l2iayDkGSBpI5Kf4I3jv9I_I3o,121
|
|
3
3
|
router_maestro/auth/__init__.py,sha256=0JgD1w2gtGSkj809kgSKQanYYkncg6eF-hHoz-jQPgo,353
|
|
4
4
|
router_maestro/auth/github_oauth.py,sha256=acQlAA2Zh6c8KQYdzXbC4ww0EJ41AgvbI5ixpFuNoRg,5060
|
|
@@ -7,7 +7,7 @@ router_maestro/auth/storage.py,sha256=TCLxgQ1lWcWD4xJXJzx5OMpvuAun_LSRItK0zhR6H0
|
|
|
7
7
|
router_maestro/cli/__init__.py,sha256=yIAshaHpLL0WrDFmRpoMRM2EUe75x0wmM5NlGW3C89s,37
|
|
8
8
|
router_maestro/cli/auth.py,sha256=eq5LBUohbMnHS4dZeyvq4OQAjzdrJ-StP2FGuUhkKa0,5940
|
|
9
9
|
router_maestro/cli/client.py,sha256=mRzpsA_Dxn-Xq7W1_t6EiyddMI0a3cvuTL6-2JuV4mE,9383
|
|
10
|
-
router_maestro/cli/config.py,sha256=
|
|
10
|
+
router_maestro/cli/config.py,sha256=SGrWdd59eX1F8KzhHMJ_u2AkYfFuYlyLl6Nfz6u_rNk,6100
|
|
11
11
|
router_maestro/cli/context.py,sha256=EPbT7fReIW17veU76CSAcv8QjzMsCIPm1QDBlGsV8fQ,4549
|
|
12
12
|
router_maestro/cli/main.py,sha256=5yiK4Q149goSB2KKzgMuF5EpcC8FBzOUCkEt8wY5NAU,1314
|
|
13
13
|
router_maestro/cli/model.py,sha256=2IG3IpQWh8Ejdv5Htcgr90O2v2UAa80TU15oOniPdvk,9054
|
|
@@ -35,18 +35,18 @@ router_maestro/server/middleware/__init__.py,sha256=PhtP2E04wApnOUBLE76mrOa0sSHp
|
|
|
35
35
|
router_maestro/server/middleware/auth.py,sha256=Ak3k5cC8m4qPGUIheuOB--QiFvs6GIAcTRJqtCGCjAA,2018
|
|
36
36
|
router_maestro/server/routes/__init__.py,sha256=eGEpNCnSRVQC1pFL7_evDmZfkMrviuI-n1okAS-YnhM,397
|
|
37
37
|
router_maestro/server/routes/admin.py,sha256=oub4hDrYaytuorXkJzmz0YZ4Z2rcyNuwKcK_4IGvcDY,8942
|
|
38
|
-
router_maestro/server/routes/anthropic.py,sha256=
|
|
38
|
+
router_maestro/server/routes/anthropic.py,sha256=r9GKXdb5yg0G6BFvbKx2-hIGlCM736NgEIESXbSomgE,10794
|
|
39
39
|
router_maestro/server/routes/chat.py,sha256=vyYX1ILhgAb9HYD87h1U3c5btpplqkTaejA81pWg4Oo,4752
|
|
40
40
|
router_maestro/server/routes/models.py,sha256=PTSXojNFN9j90Bke74ZO6sEsfIc8u_4A69eW1QzFIbc,716
|
|
41
41
|
router_maestro/server/schemas/__init__.py,sha256=VmJZoTMLb-bF33m79urhbejVdLfjDGMqCJP5QvWbHsU,1176
|
|
42
42
|
router_maestro/server/schemas/admin.py,sha256=DuUojkCcq9n8pDhWG6L0SpzQooh91lmHjCRzgZ4AMwk,2369
|
|
43
|
-
router_maestro/server/schemas/anthropic.py,sha256=
|
|
43
|
+
router_maestro/server/schemas/anthropic.py,sha256=S5TFYDd8Iw7Oxjki6ng84DGVB90G0-mOza5D5r3rwOY,6566
|
|
44
44
|
router_maestro/server/schemas/openai.py,sha256=s2487RYIn1h-CIaUpLue9BScDaTsafbVg5yc-kKhfME,2141
|
|
45
45
|
router_maestro/utils/__init__.py,sha256=oSQyV--FueMPggRfjWWVnAKtjkcZWFOm9hCTymu0oZU,409
|
|
46
46
|
router_maestro/utils/logging.py,sha256=gJWoRYibAxCWn4VmTmnrwpBRzQ7Uu5YIEk5zDiF9X_k,2393
|
|
47
|
-
router_maestro/utils/tokens.py,sha256=
|
|
48
|
-
router_maestro-0.1.
|
|
49
|
-
router_maestro-0.1.
|
|
50
|
-
router_maestro-0.1.
|
|
51
|
-
router_maestro-0.1.
|
|
52
|
-
router_maestro-0.1.
|
|
47
|
+
router_maestro/utils/tokens.py,sha256=U5PXJv_6ba5xgMBG0c5qB96Yu6uLscSUjMWYTdNests,1530
|
|
48
|
+
router_maestro-0.1.6.dist-info/METADATA,sha256=5NS_Z6XU27HDauDX1_ZIdPTD_RrXLnFCgTPVTQ-FSAM,12501
|
|
49
|
+
router_maestro-0.1.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
50
|
+
router_maestro-0.1.6.dist-info/entry_points.txt,sha256=zoFUxxvNcFe0nTgpRbIdygIDEOla3KbvW6HbOCOlgv4,63
|
|
51
|
+
router_maestro-0.1.6.dist-info/licenses/LICENSE,sha256=Ea86BSGu7_tpLAuzif_JmM9zjMoKQEf95VVF9sZw3Jo,1084
|
|
52
|
+
router_maestro-0.1.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|