sdkrouter 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. sdkrouter/__init__.py +110 -0
  2. sdkrouter/_api/__init__.py +28 -0
  3. sdkrouter/_api/client.py +204 -0
  4. sdkrouter/_api/generated/__init__.py +21 -0
  5. sdkrouter/_api/generated/cdn/__init__.py +209 -0
  6. sdkrouter/_api/generated/cdn/cdn__api__cdn/__init__.py +7 -0
  7. sdkrouter/_api/generated/cdn/cdn__api__cdn/client.py +133 -0
  8. sdkrouter/_api/generated/cdn/cdn__api__cdn/models.py +163 -0
  9. sdkrouter/_api/generated/cdn/cdn__api__cdn/sync_client.py +132 -0
  10. sdkrouter/_api/generated/cdn/client.py +75 -0
  11. sdkrouter/_api/generated/cdn/logger.py +256 -0
  12. sdkrouter/_api/generated/cdn/pyproject.toml +55 -0
  13. sdkrouter/_api/generated/cdn/retry.py +272 -0
  14. sdkrouter/_api/generated/cdn/sync_client.py +58 -0
  15. sdkrouter/_api/generated/cleaner/__init__.py +212 -0
  16. sdkrouter/_api/generated/cleaner/cleaner__api__cleaner/__init__.py +7 -0
  17. sdkrouter/_api/generated/cleaner/cleaner__api__cleaner/client.py +83 -0
  18. sdkrouter/_api/generated/cleaner/cleaner__api__cleaner/models.py +117 -0
  19. sdkrouter/_api/generated/cleaner/cleaner__api__cleaner/sync_client.py +82 -0
  20. sdkrouter/_api/generated/cleaner/client.py +75 -0
  21. sdkrouter/_api/generated/cleaner/enums.py +55 -0
  22. sdkrouter/_api/generated/cleaner/logger.py +256 -0
  23. sdkrouter/_api/generated/cleaner/pyproject.toml +55 -0
  24. sdkrouter/_api/generated/cleaner/retry.py +272 -0
  25. sdkrouter/_api/generated/cleaner/sync_client.py +58 -0
  26. sdkrouter/_api/generated/keys/__init__.py +212 -0
  27. sdkrouter/_api/generated/keys/client.py +75 -0
  28. sdkrouter/_api/generated/keys/enums.py +64 -0
  29. sdkrouter/_api/generated/keys/keys__api__keys/__init__.py +7 -0
  30. sdkrouter/_api/generated/keys/keys__api__keys/client.py +150 -0
  31. sdkrouter/_api/generated/keys/keys__api__keys/models.py +152 -0
  32. sdkrouter/_api/generated/keys/keys__api__keys/sync_client.py +149 -0
  33. sdkrouter/_api/generated/keys/logger.py +256 -0
  34. sdkrouter/_api/generated/keys/pyproject.toml +55 -0
  35. sdkrouter/_api/generated/keys/retry.py +272 -0
  36. sdkrouter/_api/generated/keys/sync_client.py +58 -0
  37. sdkrouter/_api/generated/models/__init__.py +209 -0
  38. sdkrouter/_api/generated/models/client.py +75 -0
  39. sdkrouter/_api/generated/models/logger.py +256 -0
  40. sdkrouter/_api/generated/models/models__api__llm_models/__init__.py +7 -0
  41. sdkrouter/_api/generated/models/models__api__llm_models/client.py +99 -0
  42. sdkrouter/_api/generated/models/models__api__llm_models/models.py +206 -0
  43. sdkrouter/_api/generated/models/models__api__llm_models/sync_client.py +99 -0
  44. sdkrouter/_api/generated/models/pyproject.toml +55 -0
  45. sdkrouter/_api/generated/models/retry.py +272 -0
  46. sdkrouter/_api/generated/models/sync_client.py +58 -0
  47. sdkrouter/_api/generated/shortlinks/__init__.py +209 -0
  48. sdkrouter/_api/generated/shortlinks/client.py +75 -0
  49. sdkrouter/_api/generated/shortlinks/logger.py +256 -0
  50. sdkrouter/_api/generated/shortlinks/pyproject.toml +55 -0
  51. sdkrouter/_api/generated/shortlinks/retry.py +272 -0
  52. sdkrouter/_api/generated/shortlinks/shortlinks__api__shortlinks/__init__.py +7 -0
  53. sdkrouter/_api/generated/shortlinks/shortlinks__api__shortlinks/client.py +137 -0
  54. sdkrouter/_api/generated/shortlinks/shortlinks__api__shortlinks/models.py +153 -0
  55. sdkrouter/_api/generated/shortlinks/shortlinks__api__shortlinks/sync_client.py +136 -0
  56. sdkrouter/_api/generated/shortlinks/sync_client.py +58 -0
  57. sdkrouter/_api/generated/vision/__init__.py +212 -0
  58. sdkrouter/_api/generated/vision/client.py +75 -0
  59. sdkrouter/_api/generated/vision/enums.py +40 -0
  60. sdkrouter/_api/generated/vision/logger.py +256 -0
  61. sdkrouter/_api/generated/vision/pyproject.toml +55 -0
  62. sdkrouter/_api/generated/vision/retry.py +272 -0
  63. sdkrouter/_api/generated/vision/sync_client.py +58 -0
  64. sdkrouter/_api/generated/vision/vision__api__vision/__init__.py +7 -0
  65. sdkrouter/_api/generated/vision/vision__api__vision/client.py +65 -0
  66. sdkrouter/_api/generated/vision/vision__api__vision/models.py +138 -0
  67. sdkrouter/_api/generated/vision/vision__api__vision/sync_client.py +65 -0
  68. sdkrouter/_client.py +432 -0
  69. sdkrouter/_config.py +74 -0
  70. sdkrouter/_constants.py +21 -0
  71. sdkrouter/_internal/__init__.py +1 -0
  72. sdkrouter/_types/__init__.py +30 -0
  73. sdkrouter/_types/cdn.py +27 -0
  74. sdkrouter/_types/models.py +26 -0
  75. sdkrouter/_types/ocr.py +24 -0
  76. sdkrouter/_types/parsed.py +101 -0
  77. sdkrouter/_types/shortlinks.py +27 -0
  78. sdkrouter/_types/vision.py +29 -0
  79. sdkrouter/_version.py +3 -0
  80. sdkrouter/helpers/__init__.py +13 -0
  81. sdkrouter/helpers/formatting.py +15 -0
  82. sdkrouter/helpers/html.py +100 -0
  83. sdkrouter/helpers/json_cleaner.py +53 -0
  84. sdkrouter/tools/__init__.py +129 -0
  85. sdkrouter/tools/cdn.py +285 -0
  86. sdkrouter/tools/cleaner.py +186 -0
  87. sdkrouter/tools/keys.py +215 -0
  88. sdkrouter/tools/models.py +196 -0
  89. sdkrouter/tools/shortlinks.py +165 -0
  90. sdkrouter/tools/vision.py +173 -0
  91. sdkrouter/utils/__init__.py +27 -0
  92. sdkrouter/utils/parsing.py +109 -0
  93. sdkrouter/utils/tokens.py +375 -0
  94. sdkrouter-0.1.1.dist-info/METADATA +411 -0
  95. sdkrouter-0.1.1.dist-info/RECORD +96 -0
  96. sdkrouter-0.1.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,256 @@
1
+ # Auto-generated by DjangoCFG - see CLAUDE.md
2
+ """
3
+ API Logger with Rich
4
+ Beautiful console logging for API requests and responses
5
+
6
+ Installation:
7
+ pip install rich
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import time
13
+ from dataclasses import dataclass, field
14
+ from typing import Any, Dict, Optional
15
+
16
+ from rich.console import Console
17
+ from rich.panel import Panel
18
+ from rich.table import Table
19
+ from rich.text import Text
20
+
21
+
22
+ @dataclass
23
+ class RequestLog:
24
+ """Request log data."""
25
+
26
+ method: str
27
+ url: str
28
+ headers: Optional[Dict[str, str]] = None
29
+ body: Optional[Any] = None
30
+ timestamp: float = field(default_factory=time.time)
31
+
32
+
33
+ @dataclass
34
+ class ResponseLog:
35
+ """Response log data."""
36
+
37
+ status: int
38
+ status_text: str
39
+ data: Optional[Any] = None
40
+ duration: float = 0.0
41
+ timestamp: float = field(default_factory=time.time)
42
+
43
+
44
+ @dataclass
45
+ class ErrorLog:
46
+ """Error log data."""
47
+
48
+ message: str
49
+ status_code: Optional[int] = None
50
+ field_errors: Optional[Dict[str, list[str]]] = None
51
+ duration: float = 0.0
52
+ timestamp: float = field(default_factory=time.time)
53
+
54
+
55
+ @dataclass
56
+ class LoggerConfig:
57
+ """Logger configuration."""
58
+
59
+ enabled: bool = True
60
+ log_requests: bool = True
61
+ log_responses: bool = True
62
+ log_errors: bool = True
63
+ log_bodies: bool = True
64
+ log_headers: bool = False
65
+ console: Optional[Console] = None
66
+
67
+
68
+ # Sensitive header names to filter out
69
+ SENSITIVE_HEADERS = [
70
+ "authorization",
71
+ "cookie",
72
+ "set-cookie",
73
+ "x-api-key",
74
+ "x-csrf-token",
75
+ ]
76
+
77
+
78
+ class APILogger:
79
+ """API Logger class."""
80
+
81
+ def __init__(self, config: Optional[LoggerConfig] = None):
82
+ """Initialize logger."""
83
+ self.config = config or LoggerConfig()
84
+ self.console = self.config.console or Console()
85
+
86
+ def enable(self) -> None:
87
+ """Enable logging."""
88
+ self.config.enabled = True
89
+
90
+ def disable(self) -> None:
91
+ """Disable logging."""
92
+ self.config.enabled = False
93
+
94
+ def set_config(self, **kwargs: Any) -> None:
95
+ """Update configuration."""
96
+ for key, value in kwargs.items():
97
+ if hasattr(self.config, key):
98
+ setattr(self.config, key, value)
99
+
100
+ def _filter_headers(self, headers: Optional[Dict[str, str]]) -> Dict[str, str]:
101
+ """Filter sensitive headers."""
102
+ if not headers:
103
+ return {}
104
+
105
+ filtered = {}
106
+ for key, value in headers.items():
107
+ if key.lower() in SENSITIVE_HEADERS:
108
+ filtered[key] = "***"
109
+ else:
110
+ filtered[key] = value
111
+
112
+ return filtered
113
+
114
+ def log_request(self, request: RequestLog) -> None:
115
+ """Log request."""
116
+ if not self.config.enabled or not self.config.log_requests:
117
+ return
118
+
119
+ # Create request info
120
+ text = Text()
121
+ text.append("→ ", style="bold blue")
122
+ text.append(request.method, style="bold yellow")
123
+ text.append(" ", style="")
124
+ text.append(request.url, style="cyan")
125
+
126
+ self.console.print(text)
127
+
128
+ if self.config.log_headers and request.headers:
129
+ headers = self._filter_headers(request.headers)
130
+ self.console.print(" Headers:", style="dim")
131
+ for key, value in headers.items():
132
+ self.console.print(f" {key}: {value}", style="dim")
133
+
134
+ if self.config.log_bodies and request.body:
135
+ self.console.print(" Body:", style="dim")
136
+ self.console.print(request.body, style="dim")
137
+
138
+ def log_response(self, request: RequestLog, response: ResponseLog) -> None:
139
+ """Log response."""
140
+ if not self.config.enabled or not self.config.log_responses:
141
+ return
142
+
143
+ # Determine color based on status
144
+ if response.status >= 500:
145
+ status_style = "bold red"
146
+ elif response.status >= 400:
147
+ status_style = "bold yellow"
148
+ elif response.status >= 300:
149
+ status_style = "bold cyan"
150
+ else:
151
+ status_style = "bold green"
152
+
153
+ # Create response info
154
+ text = Text()
155
+ text.append("← ", style="bold green")
156
+ text.append(request.method, style="bold yellow")
157
+ text.append(" ", style="")
158
+ text.append(request.url, style="cyan")
159
+ text.append(" ", style="")
160
+ text.append(str(response.status), style=status_style)
161
+ text.append(" ", style="")
162
+ text.append(response.status_text, style=status_style)
163
+ text.append(f" ({response.duration:.0f}ms)", style="dim")
164
+
165
+ self.console.print(text)
166
+
167
+ if self.config.log_bodies and response.data:
168
+ self.console.print(" Response:", style="dim")
169
+ self.console.print(response.data, style="dim")
170
+
171
+ def log_error(self, request: RequestLog, error: ErrorLog) -> None:
172
+ """Log error."""
173
+ if not self.config.enabled or not self.config.log_errors:
174
+ return
175
+
176
+ # Create error header
177
+ text = Text()
178
+ text.append("✗ ", style="bold red")
179
+ text.append(request.method, style="bold yellow")
180
+ text.append(" ", style="")
181
+ text.append(request.url, style="cyan")
182
+ text.append(" ", style="")
183
+ text.append(
184
+ str(error.status_code) if error.status_code else "Network",
185
+ style="bold red",
186
+ )
187
+ text.append(" Error", style="bold red")
188
+ text.append(f" ({error.duration:.0f}ms)", style="dim")
189
+
190
+ self.console.print(text)
191
+ self.console.print(f" Message: {error.message}", style="red")
192
+
193
+ if error.field_errors:
194
+ self.console.print(" Field Errors:", style="red")
195
+ for field, errors in error.field_errors.items():
196
+ for err in errors:
197
+ self.console.print(f" • {field}: {err}", style="red dim")
198
+
199
+ def info(self, message: str, **kwargs: Any) -> None:
200
+ """Log info message."""
201
+ if not self.config.enabled:
202
+ return
203
+ self.console.print(f"ℹ {message}", style="blue", **kwargs)
204
+
205
+ def warn(self, message: str, **kwargs: Any) -> None:
206
+ """Log warning message."""
207
+ if not self.config.enabled:
208
+ return
209
+ self.console.print(f"⚠ {message}", style="yellow", **kwargs)
210
+
211
+ def error(self, message: str, **kwargs: Any) -> None:
212
+ """Log error message."""
213
+ if not self.config.enabled:
214
+ return
215
+ self.console.print(f"✗ {message}", style="red", **kwargs)
216
+
217
+ def success(self, message: str, **kwargs: Any) -> None:
218
+ """Log success message."""
219
+ if not self.config.enabled:
220
+ return
221
+ self.console.print(f"✓ {message}", style="green", **kwargs)
222
+
223
+ def debug(self, message: str, **kwargs: Any) -> None:
224
+ """Log debug message."""
225
+ if not self.config.enabled:
226
+ return
227
+ self.console.print(f"🔍 {message}", style="dim", **kwargs)
228
+
229
+ def panel(self, content: Any, title: str, style: str = "blue") -> None:
230
+ """Log content in a panel."""
231
+ if not self.config.enabled:
232
+ return
233
+ self.console.print(Panel(content, title=title, border_style=style))
234
+
235
+ def table(
236
+ self,
237
+ headers: list[str],
238
+ rows: list[list[Any]],
239
+ title: Optional[str] = None,
240
+ ) -> None:
241
+ """Log data in a table."""
242
+ if not self.config.enabled:
243
+ return
244
+
245
+ table = Table(title=title)
246
+ for header in headers:
247
+ table.add_column(header, style="cyan")
248
+
249
+ for row in rows:
250
+ table.add_row(*[str(cell) for cell in row])
251
+
252
+ self.console.print(table)
253
+
254
+
255
+ # Default logger instance
256
+ default_logger = APILogger()
@@ -0,0 +1,7 @@
1
+ # Auto-generated by DjangoCFG - see CLAUDE.md
2
+ from .client import ModelsLlmModelsAPI
3
+ from .models import *
4
+
5
+ __all__ = [
6
+ "ModelsLlmModelsAPI",
7
+ ]
@@ -0,0 +1,99 @@
1
+ from __future__ import annotations
2
+
3
+ import httpx
4
+
5
+ from .models import *
6
+
7
+
8
+ class ModelsLlmModelsAPI:
9
+ """API endpoints for Llm Models."""
10
+
11
+ def __init__(self, client: httpx.AsyncClient):
12
+ """Initialize sub-client with shared httpx client."""
13
+ self._client = client
14
+
15
+ async def list(self, page: int | None = None, page_size: int | None = None) -> list[PaginatedLLMModelListList]:
16
+ """
17
+ List models
18
+
19
+ Get list of available LLM models with filtering.
20
+ """
21
+ url = "/api/llm_models/"
22
+ response = await self._client.get(url, params={"page": page if page is not None else None, "page_size": page_size if page_size is not None else None})
23
+ if not response.is_success:
24
+ try:
25
+ error_body = response.json()
26
+ except Exception:
27
+ error_body = response.text
28
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
29
+ return PaginatedLLMModelListList.model_validate(response.json())
30
+
31
+
32
+ async def retrieve(self, model_id: str) -> LLMModelDetail:
33
+ """
34
+ Get model details
35
+
36
+ Get detailed information about a specific model.
37
+ """
38
+ url = f"/api/llm_models/{model_id}/"
39
+ response = await self._client.get(url)
40
+ if not response.is_success:
41
+ try:
42
+ error_body = response.json()
43
+ except Exception:
44
+ error_body = response.text
45
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
46
+ return LLMModelDetail.model_validate(response.json())
47
+
48
+
49
+ async def calculate_cost_create(self, model_id: str, data: CostCalculationRequestRequest) -> CostCalculationResponse:
50
+ """
51
+ Calculate cost
52
+
53
+ Calculate cost for a request.
54
+ """
55
+ url = f"/api/llm_models/{model_id}/calculate-cost/"
56
+ response = await self._client.post(url, json=data.model_dump(exclude_unset=True))
57
+ if not response.is_success:
58
+ try:
59
+ error_body = response.json()
60
+ except Exception:
61
+ error_body = response.text
62
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
63
+ return CostCalculationResponse.model_validate(response.json())
64
+
65
+
66
+ async def providers_retrieve(self) -> ProvidersResponse:
67
+ """
68
+ List providers
69
+
70
+ Get list of available providers.
71
+ """
72
+ url = "/api/llm_models/providers/"
73
+ response = await self._client.get(url)
74
+ if not response.is_success:
75
+ try:
76
+ error_body = response.json()
77
+ except Exception:
78
+ error_body = response.text
79
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
80
+ return ProvidersResponse.model_validate(response.json())
81
+
82
+
83
+ async def stats_retrieve(self) -> StatsResponse:
84
+ """
85
+ Get statistics
86
+
87
+ Get model statistics.
88
+ """
89
+ url = "/api/llm_models/stats/"
90
+ response = await self._client.get(url)
91
+ if not response.is_success:
92
+ try:
93
+ error_body = response.json()
94
+ except Exception:
95
+ error_body = response.text
96
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
97
+ return StatsResponse.model_validate(response.json())
98
+
99
+
@@ -0,0 +1,206 @@
1
+ # Auto-generated by DjangoCFG - see CLAUDE.md
2
+ from __future__ import annotations
3
+
4
+ from datetime import datetime
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel, ConfigDict, Field
8
+
9
+
10
+ class LLMModelList(BaseModel):
11
+ """
12
+ Lightweight serializer for model listing.
13
+
14
+ Response model (includes read-only fields).
15
+ """
16
+
17
+ model_config = ConfigDict(
18
+ validate_assignment=True,
19
+ extra="allow",
20
+ frozen=False,
21
+ )
22
+
23
+ model_id: str = Field(description="Model identifier (e.g., 'openai/gpt-4o')", max_length=100)
24
+ name: str = Field(description='Human-readable model name', max_length=200)
25
+ owned_by: str = Field(description="Provider/owner name (e.g., 'OpenAI')", max_length=100)
26
+ context_length: int = Field(description='Maximum context length in tokens', ge=-2147483648, le=2147483647)
27
+ supports_vision: bool | None = Field(None, description='Whether model supports image inputs')
28
+ supports_tools: bool | None = Field(None, description='Whether model supports tool/function calling')
29
+ pricing: dict[str, Any] = Field(description='Get simplified pricing info.')
30
+
31
+
32
+
33
+ class PaginatedLLMModelListList(BaseModel):
34
+ """
35
+
36
+ Response model (includes read-only fields).
37
+ """
38
+
39
+ model_config = ConfigDict(
40
+ validate_assignment=True,
41
+ extra="allow",
42
+ frozen=False,
43
+ )
44
+
45
+ count: int = Field(description='Total number of items across all pages')
46
+ page: int = Field(description='Current page number (1-based)')
47
+ pages: int = Field(description='Total number of pages')
48
+ page_size: int = Field(description='Number of items per page')
49
+ has_next: bool = Field(description='Whether there is a next page')
50
+ has_previous: bool = Field(description='Whether there is a previous page')
51
+ next_page: int | None = Field(None, description='Next page number (null if no next page)')
52
+ previous_page: int | None = Field(None, description='Previous page number (null if no previous page)')
53
+ results: list[LLMModelList] = Field(description='Array of items for current page')
54
+
55
+
56
+
57
+ class LLMModelPricing(BaseModel):
58
+ """
59
+ Pricing information serializer.
60
+
61
+ Response model (includes read-only fields).
62
+ """
63
+
64
+ model_config = ConfigDict(
65
+ validate_assignment=True,
66
+ extra="allow",
67
+ frozen=False,
68
+ )
69
+
70
+ prompt: str = Field(description='Price per million prompt tokens (USD)', pattern='^-?\\d{0,4}(?:\\.\\d{0,6})?$')
71
+ completion: str = Field(description='Price per million completion tokens (USD)', pattern='^-?\\d{0,4}(?:\\.\\d{0,6})?$')
72
+ image: str | None = Field(None, description='Price per image (USD)', pattern='^-?\\d{0,4}(?:\\.\\d{0,6})?$')
73
+
74
+
75
+
76
+ class LLMModelDetail(BaseModel):
77
+ """
78
+ Detailed serializer for single model.
79
+
80
+ Response model (includes read-only fields).
81
+ """
82
+
83
+ model_config = ConfigDict(
84
+ validate_assignment=True,
85
+ extra="allow",
86
+ frozen=False,
87
+ )
88
+
89
+ id: Any = ...
90
+ model_id: str = Field(description="Model identifier (e.g., 'openai/gpt-4o')", max_length=100)
91
+ name: str = Field(description='Human-readable model name', max_length=200)
92
+ owned_by: str = Field(description="Provider/owner name (e.g., 'OpenAI')", max_length=100)
93
+ provider: Any = ...
94
+ description: str | None = Field(None, description='Model description')
95
+ context_length: int = Field(description='Maximum context length in tokens', ge=-2147483648, le=2147483647)
96
+ max_output_tokens: int | None = Field(None, description='Maximum output tokens (if limited)', ge=-2147483648, le=2147483647)
97
+ supports_vision: bool | None = Field(None, description='Whether model supports image inputs')
98
+ supports_tools: bool | None = Field(None, description='Whether model supports tool/function calling')
99
+ supports_streaming: bool | None = Field(None, description='Whether model supports streaming responses')
100
+ supports_json_mode: bool | None = Field(None, description='Whether model supports JSON output mode')
101
+ pricing: LLMModelPricing = ...
102
+ architecture: dict[str, Any] | None = Field(None, description='Architecture details (modality, tokenizer, etc.)')
103
+ top_provider: dict[str, Any] | None = Field(None, description='Top provider information from OpenRouter')
104
+ is_active: bool | None = Field(None, description='Whether the model is available for use')
105
+ last_synced_at: Any = Field(description='Last time this model was synced from OpenRouter')
106
+ created_at: Any = ...
107
+ updated_at: Any = ...
108
+
109
+
110
+
111
+ class CostCalculationRequestRequest(BaseModel):
112
+ """
113
+ Request serializer for cost calculation.
114
+
115
+ Request model (no read-only fields).
116
+ """
117
+
118
+ model_config = ConfigDict(
119
+ validate_assignment=True,
120
+ extra="allow",
121
+ frozen=False,
122
+ )
123
+
124
+ input_tokens: int = Field(description='Number of input tokens', ge=0)
125
+ output_tokens: int = Field(description='Number of output tokens', ge=0)
126
+
127
+
128
+
129
+ class CostCalculationResponse(BaseModel):
130
+ """
131
+ Response serializer for cost calculation.
132
+
133
+ Response model (includes read-only fields).
134
+ """
135
+
136
+ model_config = ConfigDict(
137
+ validate_assignment=True,
138
+ extra="allow",
139
+ frozen=False,
140
+ )
141
+
142
+ model_id: str = Field(description='Model ID')
143
+ input_cost_usd: float = Field(description='Cost of input tokens in USD')
144
+ output_cost_usd: float = Field(description='Cost of output tokens in USD')
145
+ total_cost_usd: float = Field(description='Total cost in USD')
146
+ input_tokens: int = Field(description='Number of input tokens')
147
+ output_tokens: int = Field(description='Number of output tokens')
148
+
149
+
150
+
151
+ class ProviderInfo(BaseModel):
152
+ """
153
+ Provider info serializer.
154
+
155
+ Response model (includes read-only fields).
156
+ """
157
+
158
+ model_config = ConfigDict(
159
+ validate_assignment=True,
160
+ extra="allow",
161
+ frozen=False,
162
+ )
163
+
164
+ name: str = Field(description='Provider name')
165
+ model_count: int = Field(description='Number of models')
166
+
167
+
168
+
169
+ class ProvidersResponse(BaseModel):
170
+ """
171
+ Response serializer for providers list.
172
+
173
+ Response model (includes read-only fields).
174
+ """
175
+
176
+ model_config = ConfigDict(
177
+ validate_assignment=True,
178
+ extra="allow",
179
+ frozen=False,
180
+ )
181
+
182
+ providers: list[ProviderInfo] = Field(description='List of providers')
183
+
184
+
185
+
186
+ class StatsResponse(BaseModel):
187
+ """
188
+ Response serializer for model statistics.
189
+
190
+ Response model (includes read-only fields).
191
+ """
192
+
193
+ model_config = ConfigDict(
194
+ validate_assignment=True,
195
+ extra="allow",
196
+ frozen=False,
197
+ )
198
+
199
+ total_models: int = Field(description='Total number of models')
200
+ active_models: int = Field(description='Number of active models')
201
+ vision_models: int = Field(description='Number of vision-capable models')
202
+ tool_models: int = Field(description='Number of tool-capable models')
203
+ provider_count: int = Field(description='Number of providers')
204
+
205
+
206
+
@@ -0,0 +1,99 @@
1
+ from __future__ import annotations
2
+
3
+ import httpx
4
+
5
+ from .models import *
6
+
7
+
8
+ class SyncModelsLlmModelsAPI:
9
+ """Synchronous API endpoints for Llm Models."""
10
+
11
+ def __init__(self, client: httpx.Client):
12
+ """Initialize sync sub-client with shared httpx client."""
13
+ self._client = client
14
+
15
+ def list(self, page: int | None = None, page_size: int | None = None) -> list[PaginatedLLMModelListList]:
16
+ """
17
+ List models
18
+
19
+ Get list of available LLM models with filtering.
20
+ """
21
+ url = "/api/llm_models/"
22
+ response = self._client.get(url, params={"page": page if page is not None else None, "page_size": page_size if page_size is not None else None})
23
+ if not response.is_success:
24
+ try:
25
+ error_body = response.json()
26
+ except Exception:
27
+ error_body = response.text
28
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
29
+ return PaginatedLLMModelListList.model_validate(response.json())
30
+
31
+
32
+ def retrieve(self, model_id: str) -> LLMModelDetail:
33
+ """
34
+ Get model details
35
+
36
+ Get detailed information about a specific model.
37
+ """
38
+ url = f"/api/llm_models/{model_id}/"
39
+ response = self._client.get(url)
40
+ if not response.is_success:
41
+ try:
42
+ error_body = response.json()
43
+ except Exception:
44
+ error_body = response.text
45
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
46
+ return LLMModelDetail.model_validate(response.json())
47
+
48
+
49
+ def calculate_cost_create(self, model_id: str, data: CostCalculationRequestRequest) -> CostCalculationResponse:
50
+ """
51
+ Calculate cost
52
+
53
+ Calculate cost for a request.
54
+ """
55
+ url = f"/api/llm_models/{model_id}/calculate-cost/"
56
+ response = self._client.post(url, json=data.model_dump(exclude_unset=True))
57
+ if not response.is_success:
58
+ try:
59
+ error_body = response.json()
60
+ except Exception:
61
+ error_body = response.text
62
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
63
+ return CostCalculationResponse.model_validate(response.json())
64
+
65
+
66
+ def providers_retrieve(self) -> ProvidersResponse:
67
+ """
68
+ List providers
69
+
70
+ Get list of available providers.
71
+ """
72
+ url = "/api/llm_models/providers/"
73
+ response = self._client.get(url)
74
+ if not response.is_success:
75
+ try:
76
+ error_body = response.json()
77
+ except Exception:
78
+ error_body = response.text
79
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
80
+ return ProvidersResponse.model_validate(response.json())
81
+
82
+
83
+ def stats_retrieve(self) -> StatsResponse:
84
+ """
85
+ Get statistics
86
+
87
+ Get model statistics.
88
+ """
89
+ url = "/api/llm_models/stats/"
90
+ response = self._client.get(url)
91
+ if not response.is_success:
92
+ try:
93
+ error_body = response.json()
94
+ except Exception:
95
+ error_body = response.text
96
+ raise httpx.HTTPStatusError(f"{response.status_code}: {error_body}", request=response.request, response=response)
97
+ return StatsResponse.model_validate(response.json())
98
+
99
+