indoxrouter 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- indoxrouter-0.1.3.dist-info/METADATA +188 -0
- indoxrouter-0.1.3.dist-info/RECORD +4 -0
- indoxrouter-0.1.3.dist-info/top_level.txt +1 -0
- indoxRouter/__init__.py +0 -83
- indoxRouter/client.py +0 -632
- indoxRouter/client_resourses/__init__.py +0 -20
- indoxRouter/client_resourses/base.py +0 -67
- indoxRouter/client_resourses/chat.py +0 -144
- indoxRouter/client_resourses/completion.py +0 -138
- indoxRouter/client_resourses/embedding.py +0 -83
- indoxRouter/client_resourses/image.py +0 -116
- indoxRouter/client_resourses/models.py +0 -114
- indoxRouter/config.py +0 -151
- indoxRouter/constants/__init__.py +0 -81
- indoxRouter/exceptions/__init__.py +0 -70
- indoxRouter/models/__init__.py +0 -111
- indoxRouter/providers/__init__.py +0 -108
- indoxRouter/providers/ai21labs.json +0 -128
- indoxRouter/providers/base_provider.py +0 -101
- indoxRouter/providers/claude.json +0 -164
- indoxRouter/providers/cohere.json +0 -116
- indoxRouter/providers/databricks.json +0 -110
- indoxRouter/providers/deepseek.json +0 -110
- indoxRouter/providers/google.json +0 -128
- indoxRouter/providers/meta.json +0 -128
- indoxRouter/providers/mistral.json +0 -146
- indoxRouter/providers/nvidia.json +0 -110
- indoxRouter/providers/openai.json +0 -308
- indoxRouter/providers/openai.py +0 -521
- indoxRouter/providers/qwen.json +0 -110
- indoxRouter/utils/__init__.py +0 -240
- indoxrouter-0.1.2.dist-info/LICENSE +0 -21
- indoxrouter-0.1.2.dist-info/METADATA +0 -259
- indoxrouter-0.1.2.dist-info/RECORD +0 -33
- indoxrouter-0.1.2.dist-info/top_level.txt +0 -1
- {indoxrouter-0.1.2.dist-info → indoxrouter-0.1.3.dist-info}/WHEEL +0 -0
indoxRouter/client.py
DELETED
@@ -1,632 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
IndoxRouter Client Module
|
3
|
-
|
4
|
-
This module provides a client for interacting with the IndoxRouter API, which serves as a unified
|
5
|
-
interface to multiple AI providers and models. The client handles authentication, rate limiting,
|
6
|
-
error handling, and provides a standardized response format across different AI services.
|
7
|
-
|
8
|
-
The Client class offers methods for:
|
9
|
-
- Authentication and session management
|
10
|
-
- Making API requests with automatic token refresh
|
11
|
-
- Accessing AI capabilities: chat completions, text completions, embeddings, and image generation
|
12
|
-
- Retrieving information about available providers and models
|
13
|
-
- Monitoring usage statistics
|
14
|
-
|
15
|
-
Usage example:
|
16
|
-
```python
|
17
|
-
from indoxRouter import Client
|
18
|
-
|
19
|
-
# Initialize client with API key
|
20
|
-
client = Client(api_key="your_api_key")
|
21
|
-
|
22
|
-
# Get available models
|
23
|
-
models = client.models()
|
24
|
-
|
25
|
-
# Generate a chat completion
|
26
|
-
response = client.chat([
|
27
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
28
|
-
{"role": "user", "content": "Tell me a joke."}
|
29
|
-
], provider="openai", model="gpt-3.5-turbo")
|
30
|
-
|
31
|
-
# Generate text embeddings
|
32
|
-
embeddings = client.embeddings("This is a sample text")
|
33
|
-
|
34
|
-
# Clean up resources when done
|
35
|
-
client.close()
|
36
|
-
```
|
37
|
-
|
38
|
-
The client can also be used as a context manager:
|
39
|
-
```python
|
40
|
-
with Client(api_key="your_api_key") as client:
|
41
|
-
response = client.chat([{"role": "user", "content": "Hello!"}])
|
42
|
-
```
|
43
|
-
"""
|
44
|
-
|
45
|
-
import os
|
46
|
-
import logging
|
47
|
-
from datetime import datetime, timedelta
|
48
|
-
from typing import Dict, List, Any, Optional, Union
|
49
|
-
import requests
|
50
|
-
import jwt
|
51
|
-
from uuid import uuid4
|
52
|
-
|
53
|
-
from .exceptions import (
|
54
|
-
AuthenticationError,
|
55
|
-
NetworkError,
|
56
|
-
RateLimitError,
|
57
|
-
ProviderError,
|
58
|
-
ModelNotFoundError,
|
59
|
-
ProviderNotFoundError,
|
60
|
-
InvalidParametersError
|
61
|
-
)
|
62
|
-
from .models import (
|
63
|
-
ChatMessage,
|
64
|
-
ChatResponse,
|
65
|
-
CompletionResponse,
|
66
|
-
EmbeddingResponse,
|
67
|
-
ImageResponse,
|
68
|
-
ModelInfo,
|
69
|
-
)
|
70
|
-
from .config import get_config
|
71
|
-
from .client_resourses import (
|
72
|
-
Chat,
|
73
|
-
Completions,
|
74
|
-
Embeddings,
|
75
|
-
Images,
|
76
|
-
Models,
|
77
|
-
)
|
78
|
-
from .constants import (
|
79
|
-
DEFAULT_API_VERSION,
|
80
|
-
DEFAULT_TIMEOUT,
|
81
|
-
DEFAULT_BASE_URL,
|
82
|
-
ERROR_INVALID_API_KEY,
|
83
|
-
ERROR_MODEL_NOT_FOUND,
|
84
|
-
ERROR_PROVIDER_NOT_FOUND,
|
85
|
-
ERROR_INVALID_PARAMETERS,
|
86
|
-
ERROR_RATE_LIMIT,
|
87
|
-
ERROR_REQUEST_FAILED,
|
88
|
-
)
|
89
|
-
|
90
|
-
logger = logging.getLogger(__name__)
|
91
|
-
|
92
|
-
class Client:
|
93
|
-
"""
|
94
|
-
Client for the IndoxRouter API that provides a unified interface to multiple AI providers.
|
95
|
-
|
96
|
-
The Client class handles:
|
97
|
-
- Authentication and token management with automatic refresh
|
98
|
-
- Rate limiting and quota tracking
|
99
|
-
- Standardized error handling across providers
|
100
|
-
- Consistent response formatting
|
101
|
-
|
102
|
-
This client provides access to various AI capabilities including:
|
103
|
-
- Chat completions (chat)
|
104
|
-
- Text completions (completion)
|
105
|
-
- Text embeddings (embeddings)
|
106
|
-
- Image generation (image)
|
107
|
-
|
108
|
-
It also offers methods to retrieve information about available providers and models,
|
109
|
-
as well as usage statistics for the authenticated user.
|
110
|
-
|
111
|
-
The client can be used directly or as a context manager with the 'with' statement.
|
112
|
-
"""
|
113
|
-
|
114
|
-
def __init__(
|
115
|
-
self,
|
116
|
-
api_key: Optional[str] = None,
|
117
|
-
base_url: str = f"{DEFAULT_BASE_URL}/{DEFAULT_API_VERSION}",
|
118
|
-
timeout: int = DEFAULT_TIMEOUT,
|
119
|
-
auto_refresh: bool = True,
|
120
|
-
):
|
121
|
-
"""
|
122
|
-
Initialize the client with authentication and session management
|
123
|
-
|
124
|
-
Args:
|
125
|
-
api_key: User's API key (default: INDOX_ROUTER_API_KEY env var)
|
126
|
-
base_url: Base URL for the API server
|
127
|
-
timeout: Request timeout in seconds
|
128
|
-
auto_refresh: Enable automatic token refresh
|
129
|
-
"""
|
130
|
-
# Authentication setup
|
131
|
-
self.api_key = api_key or os.getenv("INDOX_ROUTER_API_KEY")
|
132
|
-
if not self.api_key:
|
133
|
-
raise AuthenticationError(ERROR_INVALID_API_KEY)
|
134
|
-
|
135
|
-
self.base_url = base_url
|
136
|
-
self.timeout = timeout
|
137
|
-
self.auto_refresh = auto_refresh
|
138
|
-
self.config = get_config()
|
139
|
-
|
140
|
-
# Session state management
|
141
|
-
self.session = requests.Session()
|
142
|
-
self._auth_token = None
|
143
|
-
self._token_expiry = None
|
144
|
-
self.user_info = None
|
145
|
-
self.rate_limits = {}
|
146
|
-
|
147
|
-
# Initialize resources
|
148
|
-
self._init_resources()
|
149
|
-
self._authenticate()
|
150
|
-
|
151
|
-
def _init_resources(self):
|
152
|
-
"""Initialize resource controllers"""
|
153
|
-
self._chat = Chat(self)
|
154
|
-
self._completions = Completions(self)
|
155
|
-
self._embeddings = Embeddings(self)
|
156
|
-
self._images = Images(self)
|
157
|
-
self._models = Models(self)
|
158
|
-
|
159
|
-
# Backward compatibility
|
160
|
-
self.chat = self._chat
|
161
|
-
self.completions = self._completions
|
162
|
-
self.embeddings = self._embeddings
|
163
|
-
self.images = self._images
|
164
|
-
self.models = self._models
|
165
|
-
|
166
|
-
def _authenticate(self):
|
167
|
-
"""Full authentication flow with the API server"""
|
168
|
-
try:
|
169
|
-
# Get authentication token
|
170
|
-
auth_response = self.session.post(
|
171
|
-
f"{self.base_url}/auth/token",
|
172
|
-
json={"api_key": self.api_key},
|
173
|
-
timeout=self.timeout
|
174
|
-
)
|
175
|
-
auth_response.raise_for_status()
|
176
|
-
|
177
|
-
auth_data = auth_response.json()
|
178
|
-
self._process_auth_data(auth_data)
|
179
|
-
|
180
|
-
logger.info("Authenticated as user: %s", self.user_info['email'])
|
181
|
-
|
182
|
-
except requests.exceptions.RequestException as e:
|
183
|
-
raise NetworkError(f"Authentication failed: {str(e)}") from e
|
184
|
-
except jwt.PyJWTError as e:
|
185
|
-
raise AuthenticationError(f"Token validation failed: {str(e)}") from e
|
186
|
-
except KeyError as e:
|
187
|
-
raise AuthenticationError(f"Invalid auth response: {str(e)}") from e
|
188
|
-
|
189
|
-
def _process_auth_data(self, auth_data: dict):
|
190
|
-
"""Process authentication response data"""
|
191
|
-
# Validate and decode JWT
|
192
|
-
decoded_token = jwt.decode(
|
193
|
-
auth_data['access_token'],
|
194
|
-
self.config.JWT_PUBLIC_KEY,
|
195
|
-
algorithms=["RS256"],
|
196
|
-
audience="indox-router-api"
|
197
|
-
)
|
198
|
-
|
199
|
-
# Update session state
|
200
|
-
self._auth_token = auth_data['access_token']
|
201
|
-
self._token_expiry = datetime.fromtimestamp(decoded_token['exp'])
|
202
|
-
self.user_info = decoded_token['user']
|
203
|
-
self.rate_limits = decoded_token.get('rate_limits', {})
|
204
|
-
|
205
|
-
# Set default headers
|
206
|
-
self.session.headers.update({
|
207
|
-
"Authorization": f"Bearer {self._auth_token}",
|
208
|
-
"Content-Type": "application/json"
|
209
|
-
})
|
210
|
-
|
211
|
-
def _check_auth(self):
|
212
|
-
"""Validate authentication state"""
|
213
|
-
if datetime.now() >= self._token_expiry:
|
214
|
-
if self.auto_refresh:
|
215
|
-
self._refresh_token()
|
216
|
-
else:
|
217
|
-
raise AuthenticationError("Session expired")
|
218
|
-
|
219
|
-
def _refresh_token(self):
|
220
|
-
"""Refresh access token using refresh token"""
|
221
|
-
try:
|
222
|
-
refresh_response = self.session.post(
|
223
|
-
f"{self.base_url}/auth/refresh",
|
224
|
-
timeout=self.timeout
|
225
|
-
)
|
226
|
-
refresh_response.raise_for_status()
|
227
|
-
|
228
|
-
refresh_data = refresh_response.json()
|
229
|
-
self._process_auth_data(refresh_data)
|
230
|
-
|
231
|
-
logger.debug("Successfully refreshed authentication token")
|
232
|
-
|
233
|
-
except requests.exceptions.RequestException as e:
|
234
|
-
raise AuthenticationError(f"Token refresh failed: {str(e)}") from e
|
235
|
-
|
236
|
-
def _check_rate_limit(self, endpoint: str):
|
237
|
-
"""Check rate limits for specific endpoint"""
|
238
|
-
limits = self.rate_limits.get(endpoint, {})
|
239
|
-
if limits.get('remaining', 1) <= 0:
|
240
|
-
reset_time = datetime.fromtimestamp(limits.get('reset', datetime.now().timestamp()))
|
241
|
-
raise RateLimitError(
|
242
|
-
f"{ERROR_RATE_LIMIT} for {endpoint}. Resets at {reset_time}",
|
243
|
-
reset_time=reset_time
|
244
|
-
)
|
245
|
-
|
246
|
-
def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict[str, Any]:
|
247
|
-
"""
|
248
|
-
Unified request handler with:
|
249
|
-
- Authentication checks
|
250
|
-
- Rate limiting
|
251
|
-
- Error handling
|
252
|
-
- Response standardization
|
253
|
-
"""
|
254
|
-
self._check_auth()
|
255
|
-
self._check_rate_limit(endpoint)
|
256
|
-
|
257
|
-
request_id = uuid4().hex
|
258
|
-
url = f"{self.base_url}/{endpoint}"
|
259
|
-
start_time = datetime.now()
|
260
|
-
|
261
|
-
try:
|
262
|
-
response = self.session.request(
|
263
|
-
method=method,
|
264
|
-
url=url,
|
265
|
-
timeout=self.timeout,
|
266
|
-
**kwargs
|
267
|
-
)
|
268
|
-
duration = (datetime.now() - start_time).total_seconds()
|
269
|
-
|
270
|
-
# Update rate limits from headers
|
271
|
-
self._update_rate_limits(response.headers)
|
272
|
-
|
273
|
-
response.raise_for_status()
|
274
|
-
return self._format_success(response.json(), request_id, duration, endpoint)
|
275
|
-
|
276
|
-
except requests.exceptions.RequestException as e:
|
277
|
-
error_response = self._format_error(e, request_id, duration, endpoint)
|
278
|
-
|
279
|
-
# Map HTTP errors to appropriate exception types
|
280
|
-
if hasattr(e, 'response') and e.response is not None:
|
281
|
-
status_code = e.response.status_code
|
282
|
-
error_data = {}
|
283
|
-
|
284
|
-
try:
|
285
|
-
error_data = e.response.json().get('error', {})
|
286
|
-
except ValueError:
|
287
|
-
pass
|
288
|
-
|
289
|
-
error_type = error_data.get('type', '')
|
290
|
-
|
291
|
-
if status_code == 404:
|
292
|
-
if 'provider' in error_type.lower():
|
293
|
-
raise ProviderNotFoundError(f"{ERROR_PROVIDER_NOT_FOUND}: {error_data.get('message', str(e))}")
|
294
|
-
elif 'model' in error_type.lower():
|
295
|
-
raise ModelNotFoundError(f"{ERROR_MODEL_NOT_FOUND}: {error_data.get('message', str(e))}")
|
296
|
-
elif status_code == 400:
|
297
|
-
raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: {error_data.get('message', str(e))}")
|
298
|
-
elif status_code == 429:
|
299
|
-
reset_time = datetime.fromtimestamp(error_data.get('reset', datetime.now().timestamp() + 60))
|
300
|
-
raise RateLimitError(f"{ERROR_RATE_LIMIT}: {error_data.get('message', str(e))}", reset_time=reset_time)
|
301
|
-
elif status_code >= 500:
|
302
|
-
raise ProviderError(f"{ERROR_REQUEST_FAILED}: {error_data.get('message', str(e))}")
|
303
|
-
|
304
|
-
raise NetworkError(f"Request failed: {str(e)}")
|
305
|
-
|
306
|
-
def _update_rate_limits(self, headers: Dict[str, str]):
|
307
|
-
"""Update rate limits from response headers"""
|
308
|
-
for key, value in headers.items():
|
309
|
-
if key.startswith('X-Ratelimit-'):
|
310
|
-
endpoint = key.split('-')[-1]
|
311
|
-
self.rate_limits[endpoint] = {
|
312
|
-
'limit': int(headers[f'X-Ratelimit-Limit-{endpoint}']),
|
313
|
-
'remaining': int(headers[f'X-Ratelimit-Remaining-{endpoint}']),
|
314
|
-
'reset': int(headers[f'X-Ratelimit-Reset-{endpoint}'])
|
315
|
-
}
|
316
|
-
|
317
|
-
def _format_success(self, data: dict, request_id: str, duration: float, endpoint: str) -> Dict[str, Any]:
|
318
|
-
"""Standard success response format"""
|
319
|
-
return {
|
320
|
-
"request_id": request_id,
|
321
|
-
"data": data.get('result'),
|
322
|
-
"metadata": {
|
323
|
-
"endpoint": endpoint,
|
324
|
-
"duration": duration,
|
325
|
-
"timestamp": datetime.now().isoformat(),
|
326
|
-
**data.get('metadata', {})
|
327
|
-
},
|
328
|
-
"error": None
|
329
|
-
}
|
330
|
-
|
331
|
-
def _format_error(self, error: Exception, request_id: str, duration: float, endpoint: str) -> Dict[str, Any]:
|
332
|
-
"""Standard error response format"""
|
333
|
-
error_info = {
|
334
|
-
"request_id": request_id,
|
335
|
-
"data": None,
|
336
|
-
"metadata": {
|
337
|
-
"endpoint": endpoint,
|
338
|
-
"duration": duration,
|
339
|
-
"timestamp": datetime.now().isoformat()
|
340
|
-
},
|
341
|
-
"error": {
|
342
|
-
"type": error.__class__.__name__,
|
343
|
-
"message": str(error),
|
344
|
-
"details": {}
|
345
|
-
}
|
346
|
-
}
|
347
|
-
|
348
|
-
if isinstance(error, requests.exceptions.HTTPError):
|
349
|
-
error_info["error"]["code"] = error.response.status_code
|
350
|
-
error_info["error"]["details"] = error.response.json().get('error', {})
|
351
|
-
|
352
|
-
logger.error("Request %s failed: %s", request_id, error)
|
353
|
-
return error_info
|
354
|
-
|
355
|
-
# Resource proxy methods
|
356
|
-
def providers(self) -> List[Dict[str, Any]]:
|
357
|
-
"""
|
358
|
-
Get a list of all available AI providers.
|
359
|
-
|
360
|
-
Returns:
|
361
|
-
List[Dict[str, Any]]: A list of provider information dictionaries, each containing
|
362
|
-
details such as provider ID, name, description, and supported features.
|
363
|
-
|
364
|
-
Example:
|
365
|
-
```python
|
366
|
-
providers = client.providers()
|
367
|
-
for provider in providers:
|
368
|
-
print(f"{provider['id']}: {provider['name']}")
|
369
|
-
```
|
370
|
-
"""
|
371
|
-
return self._make_request('GET', 'providers')['data']
|
372
|
-
|
373
|
-
def models(self, provider: Optional[str] = None) -> Dict[str, List[Dict[str, Any]]]:
|
374
|
-
"""
|
375
|
-
Get available models, optionally filtered by provider.
|
376
|
-
|
377
|
-
Args:
|
378
|
-
provider (Optional[str]): Provider ID to filter models by. If None, returns models from all providers.
|
379
|
-
|
380
|
-
Returns:
|
381
|
-
Dict[str, List[Dict[str, Any]]]: A dictionary mapping provider IDs to lists of model information.
|
382
|
-
Each model information dictionary contains details such as model ID, name, capabilities, and pricing.
|
383
|
-
|
384
|
-
Example:
|
385
|
-
```python
|
386
|
-
# Get all models
|
387
|
-
all_models = client.models()
|
388
|
-
|
389
|
-
# Get models from a specific provider
|
390
|
-
openai_models = client.models(provider="openai")
|
391
|
-
```
|
392
|
-
"""
|
393
|
-
params = {"provider": provider} if provider else None
|
394
|
-
return self._make_request('GET', 'models', params=params)['data']
|
395
|
-
|
396
|
-
def model_info(self, provider: str, model: str) -> ModelInfo:
|
397
|
-
"""
|
398
|
-
Get detailed information about a specific model.
|
399
|
-
|
400
|
-
Args:
|
401
|
-
provider (str): Provider ID (e.g., "openai", "anthropic")
|
402
|
-
model (str): Model ID (e.g., "gpt-4", "claude-2")
|
403
|
-
|
404
|
-
Returns:
|
405
|
-
ModelInfo: Detailed information about the model, including capabilities,
|
406
|
-
pricing, rate limits, and other provider-specific details.
|
407
|
-
|
408
|
-
Raises:
|
409
|
-
ModelNotFoundError: If the specified model doesn't exist or isn't available
|
410
|
-
ProviderNotFoundError: If the specified provider doesn't exist
|
411
|
-
|
412
|
-
Example:
|
413
|
-
```python
|
414
|
-
model_details = client.model_info(provider="openai", model="gpt-4")
|
415
|
-
print(f"Context window: {model_details.context_window} tokens")
|
416
|
-
```
|
417
|
-
"""
|
418
|
-
try:
|
419
|
-
return self._make_request('GET', f'models/{provider}/{model}')['data']
|
420
|
-
except requests.exceptions.HTTPError as e:
|
421
|
-
if e.response.status_code == 404:
|
422
|
-
raise ModelNotFoundError(f"{ERROR_MODEL_NOT_FOUND}: '{model}' for provider '{provider}'")
|
423
|
-
raise
|
424
|
-
|
425
|
-
def chat(self, messages: List[Union[Dict[str, str], ChatMessage]], **kwargs) -> ChatResponse:
|
426
|
-
"""
|
427
|
-
Generate a chat completion from a series of messages.
|
428
|
-
|
429
|
-
Args:
|
430
|
-
messages (List[Union[Dict[str, str], ChatMessage]]): A list of messages, where each message
|
431
|
-
is either a dictionary with 'role' and 'content' keys, or a ChatMessage object.
|
432
|
-
**kwargs: Additional parameters to pass to the provider, such as:
|
433
|
-
- provider (str): Provider ID (e.g., "openai", "anthropic")
|
434
|
-
- model (str): Model ID (e.g., "gpt-4", "claude-2")
|
435
|
-
- temperature (float): Controls randomness (0.0-1.0)
|
436
|
-
- max_tokens (int): Maximum number of tokens to generate
|
437
|
-
- stream (bool): Whether to stream the response
|
438
|
-
|
439
|
-
Returns:
|
440
|
-
ChatResponse: The generated chat completion response, containing the assistant's message,
|
441
|
-
usage statistics, and other metadata.
|
442
|
-
|
443
|
-
Example:
|
444
|
-
```python
|
445
|
-
response = client.chat([
|
446
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
447
|
-
{"role": "user", "content": "Tell me about AI."}
|
448
|
-
], provider="openai", model="gpt-4")
|
449
|
-
|
450
|
-
print(response.message.content)
|
451
|
-
```
|
452
|
-
"""
|
453
|
-
return self._chat(messages, **kwargs)
|
454
|
-
|
455
|
-
def completion(self, prompt: str, **kwargs) -> CompletionResponse:
|
456
|
-
"""
|
457
|
-
Generate a text completion from a prompt.
|
458
|
-
|
459
|
-
Args:
|
460
|
-
prompt (str): The text prompt to complete
|
461
|
-
**kwargs: Additional parameters to pass to the provider, such as:
|
462
|
-
- provider (str): Provider ID (e.g., "openai", "cohere")
|
463
|
-
- model (str): Model ID (e.g., "text-davinci-003", "command")
|
464
|
-
- temperature (float): Controls randomness (0.0-1.0)
|
465
|
-
- max_tokens (int): Maximum number of tokens to generate
|
466
|
-
- stream (bool): Whether to stream the response
|
467
|
-
|
468
|
-
Returns:
|
469
|
-
CompletionResponse: The generated completion response, containing the completed text,
|
470
|
-
usage statistics, and other metadata.
|
471
|
-
|
472
|
-
Example:
|
473
|
-
```python
|
474
|
-
response = client.completion(
|
475
|
-
"Once upon a time,",
|
476
|
-
provider="openai",
|
477
|
-
model="text-davinci-003"
|
478
|
-
)
|
479
|
-
|
480
|
-
print(response.text)
|
481
|
-
```
|
482
|
-
"""
|
483
|
-
return self._completions(prompt, **kwargs)
|
484
|
-
|
485
|
-
def embeddings(self, text: Union[str, List[str]], **kwargs) -> EmbeddingResponse:
|
486
|
-
"""
|
487
|
-
Generate embeddings for text.
|
488
|
-
|
489
|
-
Args:
|
490
|
-
text (Union[str, List[str]]): Text or list of texts to generate embeddings for
|
491
|
-
**kwargs: Additional parameters to pass to the provider, such as:
|
492
|
-
- provider (str): Provider ID (e.g., "openai", "cohere")
|
493
|
-
- model (str): Model ID (e.g., "text-embedding-ada-002")
|
494
|
-
- dimensions (int): Desired dimensionality of the embeddings (if supported)
|
495
|
-
|
496
|
-
Returns:
|
497
|
-
EmbeddingResponse: The generated embeddings response, containing the vector representations,
|
498
|
-
usage statistics, and other metadata.
|
499
|
-
|
500
|
-
Example:
|
501
|
-
```python
|
502
|
-
response = client.embeddings(
|
503
|
-
["Hello world", "AI is amazing"],
|
504
|
-
provider="openai",
|
505
|
-
model="text-embedding-ada-002"
|
506
|
-
)
|
507
|
-
|
508
|
-
for i, embedding in enumerate(response.embeddings):
|
509
|
-
print(f"Embedding {i} has {len(embedding)} dimensions")
|
510
|
-
```
|
511
|
-
"""
|
512
|
-
return self._embeddings(text, **kwargs)
|
513
|
-
|
514
|
-
def image(self, prompt: str, **kwargs) -> ImageResponse:
|
515
|
-
"""
|
516
|
-
Generate an image from a text prompt.
|
517
|
-
|
518
|
-
Args:
|
519
|
-
prompt (str): The text description of the image to generate
|
520
|
-
**kwargs: Additional parameters to pass to the provider, such as:
|
521
|
-
- provider (str): Provider ID (e.g., "openai", "stability")
|
522
|
-
- model (str): Model ID (e.g., "dall-e-3", "stable-diffusion-xl")
|
523
|
-
- size (str): Image size (e.g., "1024x1024")
|
524
|
-
- quality (str): Image quality (e.g., "standard", "hd")
|
525
|
-
- style (str): Image style (e.g., "vivid", "natural")
|
526
|
-
- response_format (str): Format of the response (e.g., "url", "b64_json")
|
527
|
-
|
528
|
-
Returns:
|
529
|
-
ImageResponse: The generated image response, containing the image data or URLs,
|
530
|
-
usage statistics, and other metadata.
|
531
|
-
|
532
|
-
Example:
|
533
|
-
```python
|
534
|
-
response = client.image(
|
535
|
-
"A serene landscape with mountains and a lake",
|
536
|
-
provider="openai",
|
537
|
-
model="dall-e-3",
|
538
|
-
size="1024x1024"
|
539
|
-
)
|
540
|
-
|
541
|
-
print(f"Image URL: {response.url}")
|
542
|
-
```
|
543
|
-
"""
|
544
|
-
return self._images(prompt, **kwargs)
|
545
|
-
|
546
|
-
# Additional features
|
547
|
-
def get_usage(self) -> Dict[str, Any]:
|
548
|
-
"""
|
549
|
-
Get current usage statistics for the authenticated user.
|
550
|
-
|
551
|
-
Returns:
|
552
|
-
Dict[str, Any]: Usage statistics including token counts, request counts,
|
553
|
-
billing information, and quota limits.
|
554
|
-
|
555
|
-
Example:
|
556
|
-
```python
|
557
|
-
usage = client.get_usage()
|
558
|
-
print(f"Total tokens used: {usage['total_tokens']}")
|
559
|
-
print(f"Remaining quota: {usage['remaining_quota']}")
|
560
|
-
```
|
561
|
-
"""
|
562
|
-
return self._make_request('GET', 'usage')['data']
|
563
|
-
|
564
|
-
def get_user_info(self) -> Dict[str, Any]:
|
565
|
-
"""
|
566
|
-
Get information about the authenticated user.
|
567
|
-
|
568
|
-
Returns:
|
569
|
-
Dict[str, Any]: User information including ID, name, email, account type,
|
570
|
-
subscription details, and other user-specific data.
|
571
|
-
|
572
|
-
Example:
|
573
|
-
```python
|
574
|
-
user = client.get_user_info()
|
575
|
-
print(f"User ID: {user['id']}")
|
576
|
-
print(f"Account type: {user['account_type']}")
|
577
|
-
```
|
578
|
-
"""
|
579
|
-
return self.user_info.copy()
|
580
|
-
|
581
|
-
def close(self):
|
582
|
-
"""
|
583
|
-
Clean up client resources and close the session.
|
584
|
-
|
585
|
-
This method should be called when the client is no longer needed to ensure
|
586
|
-
proper cleanup of resources, particularly the HTTP session.
|
587
|
-
|
588
|
-
Example:
|
589
|
-
```python
|
590
|
-
client = Client(api_key="your_api_key")
|
591
|
-
# Use the client...
|
592
|
-
client.close() # Clean up when done
|
593
|
-
```
|
594
|
-
"""
|
595
|
-
self.session.close()
|
596
|
-
logger.info("Client session closed")
|
597
|
-
|
598
|
-
def __enter__(self):
|
599
|
-
"""
|
600
|
-
Enter the context manager.
|
601
|
-
|
602
|
-
This method enables the client to be used as a context manager with the 'with' statement.
|
603
|
-
|
604
|
-
Returns:
|
605
|
-
Client: The client instance.
|
606
|
-
|
607
|
-
Example:
|
608
|
-
```python
|
609
|
-
with Client(api_key="your_api_key") as client:
|
610
|
-
# Use the client within this block
|
611
|
-
response = client.chat([{"role": "user", "content": "Hello!"}])
|
612
|
-
# Client is automatically closed when exiting the block
|
613
|
-
```
|
614
|
-
"""
|
615
|
-
return self
|
616
|
-
|
617
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
618
|
-
"""
|
619
|
-
Exit the context manager.
|
620
|
-
|
621
|
-
This method is called when exiting a 'with' block. It ensures the client
|
622
|
-
is properly closed, even if an exception occurs within the block.
|
623
|
-
|
624
|
-
Args:
|
625
|
-
exc_type: The exception type, if an exception was raised in the with block, otherwise None
|
626
|
-
exc_val: The exception value, if an exception was raised in the with block, otherwise None
|
627
|
-
exc_tb: The traceback, if an exception was raised in the with block, otherwise None
|
628
|
-
"""
|
629
|
-
self.close()
|
630
|
-
|
631
|
-
# Backward compatibility
|
632
|
-
IndoxRouter = Client
|
@@ -1,20 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Resources module for indoxRouter.
|
3
|
-
This module contains resource classes for different API endpoints.
|
4
|
-
"""
|
5
|
-
|
6
|
-
from .base import BaseResource
|
7
|
-
from .chat import Chat
|
8
|
-
from .completion import Completions
|
9
|
-
from .embedding import Embeddings
|
10
|
-
from .image import Images
|
11
|
-
from .models import Models
|
12
|
-
|
13
|
-
__all__ = [
|
14
|
-
"BaseResource",
|
15
|
-
"Chat",
|
16
|
-
"Completions",
|
17
|
-
"Embeddings",
|
18
|
-
"Images",
|
19
|
-
"Models",
|
20
|
-
]
|