router-maestro 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. router_maestro/__init__.py +3 -0
  2. router_maestro/__main__.py +6 -0
  3. router_maestro/auth/__init__.py +18 -0
  4. router_maestro/auth/github_oauth.py +181 -0
  5. router_maestro/auth/manager.py +136 -0
  6. router_maestro/auth/storage.py +91 -0
  7. router_maestro/cli/__init__.py +1 -0
  8. router_maestro/cli/auth.py +167 -0
  9. router_maestro/cli/client.py +322 -0
  10. router_maestro/cli/config.py +132 -0
  11. router_maestro/cli/context.py +146 -0
  12. router_maestro/cli/main.py +42 -0
  13. router_maestro/cli/model.py +288 -0
  14. router_maestro/cli/server.py +117 -0
  15. router_maestro/cli/stats.py +76 -0
  16. router_maestro/config/__init__.py +72 -0
  17. router_maestro/config/contexts.py +29 -0
  18. router_maestro/config/paths.py +50 -0
  19. router_maestro/config/priorities.py +93 -0
  20. router_maestro/config/providers.py +34 -0
  21. router_maestro/config/server.py +115 -0
  22. router_maestro/config/settings.py +76 -0
  23. router_maestro/providers/__init__.py +31 -0
  24. router_maestro/providers/anthropic.py +203 -0
  25. router_maestro/providers/base.py +123 -0
  26. router_maestro/providers/copilot.py +346 -0
  27. router_maestro/providers/openai.py +188 -0
  28. router_maestro/providers/openai_compat.py +175 -0
  29. router_maestro/routing/__init__.py +5 -0
  30. router_maestro/routing/router.py +526 -0
  31. router_maestro/server/__init__.py +5 -0
  32. router_maestro/server/app.py +87 -0
  33. router_maestro/server/middleware/__init__.py +11 -0
  34. router_maestro/server/middleware/auth.py +66 -0
  35. router_maestro/server/oauth_sessions.py +159 -0
  36. router_maestro/server/routes/__init__.py +8 -0
  37. router_maestro/server/routes/admin.py +358 -0
  38. router_maestro/server/routes/anthropic.py +228 -0
  39. router_maestro/server/routes/chat.py +142 -0
  40. router_maestro/server/routes/models.py +34 -0
  41. router_maestro/server/schemas/__init__.py +57 -0
  42. router_maestro/server/schemas/admin.py +87 -0
  43. router_maestro/server/schemas/anthropic.py +246 -0
  44. router_maestro/server/schemas/openai.py +107 -0
  45. router_maestro/server/translation.py +636 -0
  46. router_maestro/stats/__init__.py +14 -0
  47. router_maestro/stats/heatmap.py +154 -0
  48. router_maestro/stats/storage.py +228 -0
  49. router_maestro/stats/tracker.py +73 -0
  50. router_maestro/utils/__init__.py +16 -0
  51. router_maestro/utils/logging.py +81 -0
  52. router_maestro/utils/tokens.py +51 -0
  53. router_maestro-0.1.2.dist-info/METADATA +383 -0
  54. router_maestro-0.1.2.dist-info/RECORD +57 -0
  55. router_maestro-0.1.2.dist-info/WHEEL +4 -0
  56. router_maestro-0.1.2.dist-info/entry_points.txt +2 -0
  57. router_maestro-0.1.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,246 @@
1
+ """Anthropic API-compatible schemas."""
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+ # Request types
8
+
9
+
10
+ class AnthropicTextBlock(BaseModel):
11
+ """Text content block."""
12
+
13
+ type: Literal["text"] = "text"
14
+ text: str
15
+
16
+
17
+ class AnthropicImageSource(BaseModel):
18
+ """Image source for base64 encoded images."""
19
+
20
+ type: Literal["base64"] = "base64"
21
+ media_type: Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
22
+ data: str
23
+
24
+
25
+ class AnthropicImageBlock(BaseModel):
26
+ """Image content block."""
27
+
28
+ type: Literal["image"] = "image"
29
+ source: AnthropicImageSource
30
+
31
+
32
+ class AnthropicToolResultContentBlock(BaseModel):
33
+ """Content block within tool result (text or image)."""
34
+
35
+ type: Literal["text", "image"]
36
+ text: str | None = None
37
+ source: AnthropicImageSource | None = None
38
+
39
+
40
+ class AnthropicToolResultBlock(BaseModel):
41
+ """Tool result content block."""
42
+
43
+ type: Literal["tool_result"] = "tool_result"
44
+ tool_use_id: str
45
+ content: str | list[AnthropicToolResultContentBlock]
46
+ is_error: bool | None = None
47
+
48
+
49
+ class AnthropicToolUseBlock(BaseModel):
50
+ """Tool use content block."""
51
+
52
+ type: Literal["tool_use"] = "tool_use"
53
+ id: str
54
+ name: str
55
+ input: dict
56
+
57
+
58
+ class AnthropicThinkingBlock(BaseModel):
59
+ """Thinking content block."""
60
+
61
+ type: Literal["thinking"] = "thinking"
62
+ thinking: str
63
+
64
+
65
+ AnthropicUserContentBlock = AnthropicTextBlock | AnthropicImageBlock | AnthropicToolResultBlock
66
+ AnthropicAssistantContentBlock = AnthropicTextBlock | AnthropicToolUseBlock | AnthropicThinkingBlock
67
+
68
+
69
+ class AnthropicUserMessage(BaseModel):
70
+ """User message."""
71
+
72
+ role: Literal["user"] = "user"
73
+ content: str | list[AnthropicUserContentBlock]
74
+
75
+
76
+ class AnthropicAssistantMessage(BaseModel):
77
+ """Assistant message."""
78
+
79
+ role: Literal["assistant"] = "assistant"
80
+ content: str | list[AnthropicAssistantContentBlock]
81
+
82
+
83
+ AnthropicMessage = AnthropicUserMessage | AnthropicAssistantMessage
84
+
85
+
86
+ class AnthropicTool(BaseModel):
87
+ """Tool definition."""
88
+
89
+ name: str
90
+ description: str | None = None
91
+ input_schema: dict
92
+
93
+
94
+ class AnthropicToolChoice(BaseModel):
95
+ """Tool choice configuration."""
96
+
97
+ type: Literal["auto", "any", "tool", "none"]
98
+ name: str | None = None
99
+
100
+
101
+ class AnthropicThinkingConfig(BaseModel):
102
+ """Thinking configuration."""
103
+
104
+ type: Literal["enabled"] = "enabled"
105
+ budget_tokens: int | None = None
106
+
107
+
108
+ class AnthropicMessagesRequest(BaseModel):
109
+ """Anthropic Messages API request."""
110
+
111
+ model: str
112
+ messages: list[AnthropicMessage]
113
+ max_tokens: int
114
+ system: str | list[AnthropicTextBlock] | None = None
115
+ metadata: dict | None = None
116
+ stop_sequences: list[str] | None = None
117
+ stream: bool = False
118
+ temperature: float | None = None
119
+ top_p: float | None = None
120
+ top_k: int | None = None
121
+ tools: list[AnthropicTool] | None = None
122
+ tool_choice: AnthropicToolChoice | None = None
123
+ thinking: AnthropicThinkingConfig | None = None
124
+ service_tier: Literal["auto", "standard_only"] | None = None
125
+
126
+
127
+ class AnthropicCountTokensRequest(BaseModel):
128
+ """Anthropic count_tokens API request (max_tokens not required)."""
129
+
130
+ model: str
131
+ messages: list[AnthropicMessage]
132
+ system: str | list[AnthropicTextBlock] | None = None
133
+ tools: list[AnthropicTool] | None = None
134
+
135
+
136
+ # Response types
137
+
138
+
139
+ class AnthropicUsage(BaseModel):
140
+ """Token usage information."""
141
+
142
+ input_tokens: int
143
+ output_tokens: int
144
+ cache_creation_input_tokens: int | None = None
145
+ cache_read_input_tokens: int | None = None
146
+ service_tier: Literal["standard", "priority", "batch"] | None = None
147
+
148
+
149
+ class AnthropicMessagesResponse(BaseModel):
150
+ """Anthropic Messages API response."""
151
+
152
+ id: str
153
+ type: Literal["message"] = "message"
154
+ role: Literal["assistant"] = "assistant"
155
+ content: list[AnthropicAssistantContentBlock]
156
+ model: str
157
+ stop_reason: (
158
+ Literal["end_turn", "max_tokens", "stop_sequence", "tool_use", "pause_turn", "refusal"]
159
+ | None
160
+ )
161
+ stop_sequence: str | None = None
162
+ usage: AnthropicUsage
163
+
164
+
165
+ # Streaming event types
166
+
167
+
168
+ class AnthropicMessageStartEvent(BaseModel):
169
+ """Message start event."""
170
+
171
+ type: Literal["message_start"] = "message_start"
172
+ message: dict # Partial AnthropicMessagesResponse
173
+
174
+
175
+ class AnthropicContentBlockStartEvent(BaseModel):
176
+ """Content block start event."""
177
+
178
+ type: Literal["content_block_start"] = "content_block_start"
179
+ index: int
180
+ content_block: dict
181
+
182
+
183
+ class AnthropicContentBlockDeltaEvent(BaseModel):
184
+ """Content block delta event."""
185
+
186
+ type: Literal["content_block_delta"] = "content_block_delta"
187
+ index: int
188
+ delta: dict
189
+
190
+
191
+ class AnthropicContentBlockStopEvent(BaseModel):
192
+ """Content block stop event."""
193
+
194
+ type: Literal["content_block_stop"] = "content_block_stop"
195
+ index: int
196
+
197
+
198
+ class AnthropicMessageDeltaEvent(BaseModel):
199
+ """Message delta event."""
200
+
201
+ type: Literal["message_delta"] = "message_delta"
202
+ delta: dict
203
+ usage: dict | None = None
204
+
205
+
206
+ class AnthropicMessageStopEvent(BaseModel):
207
+ """Message stop event."""
208
+
209
+ type: Literal["message_stop"] = "message_stop"
210
+
211
+
212
+ class AnthropicPingEvent(BaseModel):
213
+ """Ping event."""
214
+
215
+ type: Literal["ping"] = "ping"
216
+
217
+
218
+ class AnthropicErrorEvent(BaseModel):
219
+ """Error event."""
220
+
221
+ type: Literal["error"] = "error"
222
+ error: dict
223
+
224
+
225
+ AnthropicStreamEvent = (
226
+ AnthropicMessageStartEvent
227
+ | AnthropicContentBlockStartEvent
228
+ | AnthropicContentBlockDeltaEvent
229
+ | AnthropicContentBlockStopEvent
230
+ | AnthropicMessageDeltaEvent
231
+ | AnthropicMessageStopEvent
232
+ | AnthropicPingEvent
233
+ | AnthropicErrorEvent
234
+ )
235
+
236
+
237
+ class AnthropicStreamState(BaseModel):
238
+ """State for tracking streaming translation."""
239
+
240
+ message_start_sent: bool = False
241
+ content_block_index: int = 0
242
+ content_block_open: bool = False
243
+ tool_calls: dict[int, dict] = Field(default_factory=dict)
244
+ estimated_input_tokens: int = 0 # Estimated input tokens from request
245
+ last_usage: dict | None = None # Track the latest usage from stream chunks
246
+ message_complete: bool = False # Track if message_stop was sent
@@ -0,0 +1,107 @@
1
+ """OpenAI-compatible API schemas."""
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class ChatMessage(BaseModel):
7
+ """A message in the chat."""
8
+
9
+ role: str
10
+ content: str
11
+
12
+
13
+ class ChatCompletionRequest(BaseModel):
14
+ """Request for chat completion."""
15
+
16
+ model: str
17
+ messages: list[ChatMessage]
18
+ temperature: float = Field(default=1.0, ge=0, le=2)
19
+ max_tokens: int | None = None
20
+ stream: bool = False
21
+ top_p: float | None = None
22
+ frequency_penalty: float | None = None
23
+ presence_penalty: float | None = None
24
+ stop: list[str] | str | None = None
25
+ user: str | None = None
26
+
27
+
28
+ class ChatCompletionChoice(BaseModel):
29
+ """A choice in the chat completion response."""
30
+
31
+ index: int
32
+ message: ChatMessage
33
+ finish_reason: str | None
34
+
35
+
36
+ class ChatCompletionUsage(BaseModel):
37
+ """Token usage information."""
38
+
39
+ prompt_tokens: int
40
+ completion_tokens: int
41
+ total_tokens: int
42
+
43
+
44
+ class ChatCompletionResponse(BaseModel):
45
+ """Response from chat completion."""
46
+
47
+ id: str
48
+ object: str = "chat.completion"
49
+ created: int
50
+ model: str
51
+ choices: list[ChatCompletionChoice]
52
+ usage: ChatCompletionUsage | None = None
53
+
54
+
55
+ class ChatCompletionChunkDelta(BaseModel):
56
+ """Delta in a streaming chunk."""
57
+
58
+ role: str | None = None
59
+ content: str | None = None
60
+
61
+
62
+ class ChatCompletionChunkChoice(BaseModel):
63
+ """A choice in a streaming chunk."""
64
+
65
+ index: int
66
+ delta: ChatCompletionChunkDelta
67
+ finish_reason: str | None = None
68
+
69
+
70
+ class ChatCompletionChunk(BaseModel):
71
+ """A chunk in streaming response."""
72
+
73
+ id: str
74
+ object: str = "chat.completion.chunk"
75
+ created: int
76
+ model: str
77
+ choices: list[ChatCompletionChunkChoice]
78
+
79
+
80
+ class ModelObject(BaseModel):
81
+ """A model object."""
82
+
83
+ id: str
84
+ object: str = "model"
85
+ created: int = 0
86
+ owned_by: str
87
+
88
+
89
+ class ModelList(BaseModel):
90
+ """List of models."""
91
+
92
+ object: str = "list"
93
+ data: list[ModelObject]
94
+
95
+
96
+ class ErrorDetail(BaseModel):
97
+ """Error detail."""
98
+
99
+ message: str
100
+ type: str
101
+ code: str | None = None
102
+
103
+
104
+ class ErrorResponse(BaseModel):
105
+ """Error response."""
106
+
107
+ error: ErrorDetail