indoxrouter 0.1.4__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,58 @@
1
+ """
2
+ IndoxRouter: A unified client for various AI providers.
3
+
4
+ This package provides a client for interacting with the IndoxRouter server,
5
+ which serves as a unified interface to multiple AI providers and models.
6
+
7
+ Example:
8
+ ```python
9
+ from indoxrouter import Client
10
+
11
+ # Initialize client with API key
12
+ client = Client(api_key="your_api_key")
13
+
14
+ # Generate a chat completion
15
+ response = client.chat([
16
+ {"role": "system", "content": "You are a helpful assistant."},
17
+ {"role": "user", "content": "Tell me a joke."}
18
+ ], model="openai/gpt-4o-mini")
19
+
20
+ print(response["data"])
21
+ ```
22
+
23
+ For custom server URLs:
24
+ ```python
25
+ # Connect to a specific server
26
+ client = Client(
27
+ api_key="your_api_key",
28
+ base_url="http://your-custom-server:8000"
29
+ )
30
+ ```
31
+ """
32
+
33
+ from .client import Client
34
+ from .exceptions import (
35
+ IndoxRouterError,
36
+ AuthenticationError,
37
+ NetworkError,
38
+ RateLimitError,
39
+ ProviderError,
40
+ ModelNotFoundError,
41
+ ProviderNotFoundError,
42
+ InvalidParametersError,
43
+ InsufficientCreditsError,
44
+ )
45
+
46
+ __version__ = "0.2.1"
47
+ __all__ = [
48
+ "Client",
49
+ "IndoxRouterError",
50
+ "AuthenticationError",
51
+ "NetworkError",
52
+ "RateLimitError",
53
+ "ProviderError",
54
+ "ModelNotFoundError",
55
+ "ProviderNotFoundError",
56
+ "InvalidParametersError",
57
+ "InsufficientCreditsError",
58
+ ]
indoxrouter/client.py ADDED
@@ -0,0 +1,672 @@
1
+ """
2
+ IndoxRouter Client Module
3
+
4
+ This module provides a client for interacting with the IndoxRouter API, which serves as a unified
5
+ interface to multiple AI providers and models. The client handles authentication, rate limiting,
6
+ error handling, and provides a standardized response format across different AI services.
7
+
8
+ The Client class offers methods for:
9
+ - Authentication and session management
10
+ - Making API requests with automatic token refresh
11
+ - Accessing AI capabilities: chat completions, text completions, embeddings, and image generation
12
+ - Retrieving information about available providers and models
13
+ - Monitoring usage statistics and credit consumption
14
+
15
+ Usage example:
16
+ ```python
17
+ from indoxRouter import Client
18
+
19
+ # Initialize client with API key
20
+ client = Client(api_key="your_api_key")
21
+
22
+ # Get available models
23
+ models = client.models()
24
+
25
+ # Generate a chat completion
26
+ response = client.chat([
27
+ {"role": "system", "content": "You are a helpful assistant."},
28
+ {"role": "user", "content": "Tell me a joke."}
29
+ ], model="openai/gpt-4o-mini")
30
+
31
+ # Generate text embeddings
32
+ embeddings = client.embeddings("This is a sample text", model="openai/text-embedding-ada-002")
33
+
34
+ # Clean up resources when done
35
+ client.close()
36
+ ```
37
+
38
+ The client can also be used as a context manager:
39
+ ```python
40
+ with Client(api_key="your_api_key") as client:
41
+ response = client.chat([{"role": "user", "content": "Hello!"}], model="openai/gpt-4o-mini")
42
+ ```
43
+ """
44
+
45
+ import os
46
+ import logging
47
+ from datetime import datetime, timedelta
48
+ from typing import Dict, List, Any, Optional, Union
49
+ import requests
50
+ import json
51
+
52
+ from .exceptions import (
53
+ AuthenticationError,
54
+ NetworkError,
55
+ ProviderNotFoundError,
56
+ ModelNotFoundError,
57
+ InvalidParametersError,
58
+ RateLimitError,
59
+ ProviderError,
60
+ InsufficientCreditsError,
61
+ )
62
+ from .constants import (
63
+ DEFAULT_BASE_URL,
64
+ DEFAULT_TIMEOUT,
65
+ DEFAULT_MODEL,
66
+ DEFAULT_EMBEDDING_MODEL,
67
+ DEFAULT_IMAGE_MODEL,
68
+ CHAT_ENDPOINT,
69
+ COMPLETION_ENDPOINT,
70
+ EMBEDDING_ENDPOINT,
71
+ IMAGE_ENDPOINT,
72
+ MODEL_ENDPOINT,
73
+ USAGE_ENDPOINT,
74
+ )
75
+
76
+ logger = logging.getLogger(__name__)
77
+
78
+
79
+ class Client:
80
+ """
81
+ Client for interacting with the IndoxRouter API.
82
+ """
83
+
84
+ def __init__(
85
+ self,
86
+ api_key: Optional[str] = None,
87
+ base_url: Optional[str] = None,
88
+ timeout: int = DEFAULT_TIMEOUT,
89
+ ):
90
+ """
91
+ Initialize the client.
92
+
93
+ Args:
94
+ api_key: API key for authentication. If not provided, the client will look for the
95
+ INDOX_ROUTER_API_KEY environment variable.
96
+ base_url: Custom base URL for the API. If not provided, the default base URL will be used.
97
+ timeout: Request timeout in seconds.
98
+ """
99
+ self.api_key = api_key or os.environ.get("INDOX_ROUTER_API_KEY")
100
+ if not self.api_key:
101
+ raise ValueError(
102
+ "API key must be provided either as an argument or as the INDOX_ROUTER_API_KEY environment variable."
103
+ )
104
+
105
+ self.base_url = base_url or DEFAULT_BASE_URL
106
+ self.timeout = timeout
107
+ self.session = requests.Session()
108
+ self.session.headers.update({"Authorization": f"Bearer {self.api_key}"})
109
+
110
+ def enable_debug(self, level=logging.DEBUG):
111
+ """
112
+ Enable debug logging for the client.
113
+
114
+ Args:
115
+ level: Logging level (default: logging.DEBUG)
116
+ """
117
+ handler = logging.StreamHandler()
118
+ handler.setFormatter(
119
+ logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
120
+ )
121
+ logger.addHandler(handler)
122
+ logger.setLevel(level)
123
+ logger.debug("Debug logging enabled")
124
+
125
+ def _request(
126
+ self,
127
+ method: str,
128
+ endpoint: str,
129
+ data: Optional[Dict[str, Any]] = None,
130
+ stream: bool = False,
131
+ ) -> Any:
132
+ """
133
+ Make a request to the API.
134
+
135
+ Args:
136
+ method: HTTP method (GET, POST, etc.)
137
+ endpoint: API endpoint
138
+ data: Request data
139
+ stream: Whether to stream the response
140
+
141
+ Returns:
142
+ Response data
143
+ """
144
+ # Add API version prefix if not already present
145
+ if not endpoint.startswith("api/v1/") and not endpoint.startswith("/api/v1/"):
146
+ endpoint = f"api/v1/{endpoint}"
147
+
148
+ # Remove any leading slash for consistent URL construction
149
+ if endpoint.startswith("/"):
150
+ endpoint = endpoint[1:]
151
+
152
+ url = f"{self.base_url}/{endpoint}"
153
+ headers = {"Content-Type": "application/json"}
154
+
155
+ # logger.debug(f"Making {method} request to {url}")
156
+ # if data:
157
+ # logger.debug(f"Request data: {json.dumps(data, indent=2)}")
158
+
159
+ # Diagnose potential issues with the request
160
+ if method == "POST" and data:
161
+ diagnosis = self.diagnose_request(endpoint, data)
162
+ if not diagnosis["is_valid"]:
163
+ issues_str = "\n".join([f"- {issue}" for issue in diagnosis["issues"]])
164
+ logger.warning(f"Request validation issues:\n{issues_str}")
165
+ # We'll still send the request, but log the issues
166
+
167
+ try:
168
+ response = self.session.request(
169
+ method,
170
+ url,
171
+ headers=headers,
172
+ json=data,
173
+ timeout=self.timeout,
174
+ stream=stream,
175
+ )
176
+
177
+ if stream:
178
+ return response
179
+
180
+ response.raise_for_status()
181
+ return response.json()
182
+ except requests.HTTPError as e:
183
+ error_data = {}
184
+ try:
185
+ error_data = e.response.json()
186
+ logger.error(f"HTTP error response: {json.dumps(error_data, indent=2)}")
187
+ except (ValueError, AttributeError):
188
+ error_data = {"detail": str(e)}
189
+ logger.error(f"HTTP error (no JSON response): {str(e)}")
190
+
191
+ status_code = getattr(e.response, "status_code", 500)
192
+ error_message = error_data.get("detail", str(e))
193
+
194
+ if status_code == 401:
195
+ raise AuthenticationError(f"Authentication failed: {error_message}")
196
+ elif status_code == 404:
197
+ if "provider" in error_message.lower():
198
+ raise ProviderNotFoundError(error_message)
199
+ elif "model" in error_message.lower():
200
+ raise ModelNotFoundError(error_message)
201
+ else:
202
+ raise NetworkError(
203
+ f"Resource not found: {error_message} (URL: {url})"
204
+ )
205
+ elif status_code == 429:
206
+ raise RateLimitError(f"Rate limit exceeded: {error_message}")
207
+ elif status_code == 400:
208
+ raise InvalidParametersError(f"Invalid parameters: {error_message}")
209
+ elif status_code == 402:
210
+ raise InsufficientCreditsError(f"Insufficient credits: {error_message}")
211
+ elif status_code == 500:
212
+ # Provide more detailed information for server errors
213
+ error_detail = error_data.get("detail", "No details provided")
214
+ # Include the request data in the error message for better debugging
215
+ request_data_str = json.dumps(data, indent=2) if data else "None"
216
+ raise ProviderError(
217
+ f"Server error (500): {error_detail}. URL: {url}.\n"
218
+ f"Request data: {request_data_str}\n"
219
+ f"This may indicate an issue with the server configuration or a problem with the provider service."
220
+ )
221
+ else:
222
+ raise ProviderError(f"Provider error ({status_code}): {error_message}")
223
+ except requests.RequestException as e:
224
+ logger.error(f"Request exception: {str(e)}")
225
+ raise NetworkError(f"Network error: {str(e)}")
226
+
227
+ def _format_model_string(self, model: str) -> str:
228
+ """
229
+ Format the model string in a way that the server expects.
230
+
231
+ The server might be expecting a different format than "provider/model".
232
+ This method handles different formatting requirements.
233
+
234
+ Args:
235
+ model: Model string in the format "provider/model"
236
+
237
+ Returns:
238
+ Formatted model string
239
+ """
240
+ if not model or "/" not in model:
241
+ return model
242
+
243
+ # The standard format is "provider/model"
244
+ # But the server might be expecting something different
245
+ provider, model_name = model.split("/", 1)
246
+
247
+ # For now, return the original format as it seems the server
248
+ # is having issues with JSON formatted model strings
249
+ return model
250
+
251
+ def chat(
252
+ self,
253
+ messages: List[Dict[str, str]],
254
+ model: str = DEFAULT_MODEL,
255
+ temperature: float = 0.7,
256
+ max_tokens: Optional[int] = None,
257
+ stream: bool = False,
258
+ **kwargs,
259
+ ) -> Dict[str, Any]:
260
+ """
261
+ Generate a chat completion.
262
+
263
+ Args:
264
+ messages: List of messages in the conversation
265
+ model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
266
+ temperature: Sampling temperature
267
+ max_tokens: Maximum number of tokens to generate
268
+ stream: Whether to stream the response
269
+ **kwargs: Additional parameters to pass to the API
270
+
271
+ Returns:
272
+ Response data
273
+ """
274
+ # Format the model string
275
+ formatted_model = self._format_model_string(model)
276
+
277
+ # Filter out problematic parameters
278
+ filtered_kwargs = {}
279
+ for key, value in kwargs.items():
280
+ if key not in ["return_generator"]: # List of parameters to exclude
281
+ filtered_kwargs[key] = value
282
+
283
+ data = {
284
+ "messages": messages,
285
+ "model": formatted_model,
286
+ "temperature": temperature,
287
+ "max_tokens": max_tokens,
288
+ "stream": stream,
289
+ "additional_params": filtered_kwargs,
290
+ }
291
+
292
+ if stream:
293
+ response = self._request("POST", CHAT_ENDPOINT, data, stream=True)
294
+ return self._handle_streaming_response(response)
295
+ else:
296
+ return self._request("POST", CHAT_ENDPOINT, data)
297
+
298
+ def completion(
299
+ self,
300
+ prompt: str,
301
+ model: str = DEFAULT_MODEL,
302
+ temperature: float = 0.7,
303
+ max_tokens: Optional[int] = None,
304
+ stream: bool = False,
305
+ **kwargs,
306
+ ) -> Dict[str, Any]:
307
+ """
308
+ Generate a text completion.
309
+
310
+ Args:
311
+ prompt: Text prompt
312
+ model: Model to use in the format "provider/model" (e.g., "openai/gpt-4o-mini")
313
+ temperature: Sampling temperature
314
+ max_tokens: Maximum number of tokens to generate
315
+ stream: Whether to stream the response
316
+ **kwargs: Additional parameters to pass to the API
317
+
318
+ Returns:
319
+ Response data
320
+ """
321
+ # Format the model string
322
+ formatted_model = self._format_model_string(model)
323
+
324
+ # Filter out problematic parameters
325
+ filtered_kwargs = {}
326
+ for key, value in kwargs.items():
327
+ if key not in ["return_generator"]: # List of parameters to exclude
328
+ filtered_kwargs[key] = value
329
+
330
+ data = {
331
+ "prompt": prompt,
332
+ "model": formatted_model,
333
+ "temperature": temperature,
334
+ "max_tokens": max_tokens,
335
+ "stream": stream,
336
+ "additional_params": filtered_kwargs,
337
+ }
338
+
339
+ if stream:
340
+ response = self._request("POST", COMPLETION_ENDPOINT, data, stream=True)
341
+ return self._handle_streaming_response(response)
342
+ else:
343
+ return self._request("POST", COMPLETION_ENDPOINT, data)
344
+
345
+ def embeddings(
346
+ self,
347
+ text: Union[str, List[str]],
348
+ model: str = DEFAULT_EMBEDDING_MODEL,
349
+ **kwargs,
350
+ ) -> Dict[str, Any]:
351
+ """
352
+ Generate embeddings for text.
353
+
354
+ Args:
355
+ text: Text to embed (string or list of strings)
356
+ model: Model to use in the format "provider/model" (e.g., "openai/text-embedding-ada-002")
357
+ **kwargs: Additional parameters to pass to the API
358
+
359
+ Returns:
360
+ Response data with embeddings
361
+ """
362
+ # Format the model string
363
+ formatted_model = self._format_model_string(model)
364
+
365
+ # Filter out problematic parameters
366
+ filtered_kwargs = {}
367
+ for key, value in kwargs.items():
368
+ if key not in ["return_generator"]: # List of parameters to exclude
369
+ filtered_kwargs[key] = value
370
+
371
+ data = {
372
+ "text": text if isinstance(text, list) else [text],
373
+ "model": formatted_model,
374
+ "additional_params": filtered_kwargs,
375
+ }
376
+
377
+ return self._request("POST", EMBEDDING_ENDPOINT, data)
378
+
379
+ def images(
380
+ self,
381
+ prompt: str,
382
+ model: str = DEFAULT_IMAGE_MODEL,
383
+ size: str = "1024x1024",
384
+ n: int = 1,
385
+ quality: str = "standard",
386
+ style: str = "vivid",
387
+ **kwargs,
388
+ ) -> Dict[str, Any]:
389
+ """
390
+ Generate images from a prompt.
391
+
392
+ Args:
393
+ prompt: Text prompt
394
+ model: Model to use in the format "provider/model" (e.g., "openai/dall-e-3")
395
+ size: Image size (e.g., "1024x1024")
396
+ n: Number of images to generate
397
+ quality: Image quality (e.g., "standard", "hd")
398
+ style: Image style (e.g., "vivid", "natural")
399
+ **kwargs: Additional parameters to pass to the API
400
+
401
+ Returns:
402
+ Response data with image URLs
403
+ """
404
+ # Format the model string
405
+ formatted_model = self._format_model_string(model)
406
+
407
+ # Filter out problematic parameters
408
+ filtered_kwargs = {}
409
+ for key, value in kwargs.items():
410
+ if key not in ["return_generator"]: # List of parameters to exclude
411
+ filtered_kwargs[key] = value
412
+
413
+ data = {
414
+ "prompt": prompt,
415
+ "model": formatted_model,
416
+ "n": n,
417
+ "size": size,
418
+ "quality": quality,
419
+ "style": style,
420
+ "additional_params": filtered_kwargs,
421
+ }
422
+
423
+ return self._request("POST", IMAGE_ENDPOINT, data)
424
+
425
+ def models(self, provider: Optional[str] = None) -> Dict[str, Any]:
426
+ """
427
+ Get available models.
428
+
429
+ Args:
430
+ provider: Provider to filter by
431
+
432
+ Returns:
433
+ List of available models with pricing information
434
+ """
435
+ endpoint = MODEL_ENDPOINT
436
+ if provider:
437
+ endpoint = f"{MODEL_ENDPOINT}/{provider}"
438
+
439
+ return self._request("GET", endpoint)
440
+
441
+ def get_model_info(self, provider: str, model: str) -> Dict[str, Any]:
442
+ """
443
+ Get information about a specific model.
444
+
445
+ Args:
446
+ provider: Provider ID
447
+ model: Model ID
448
+
449
+ Returns:
450
+ Model information including pricing
451
+ """
452
+ return self._request("GET", f"{MODEL_ENDPOINT}/{provider}/{model}")
453
+
454
+ def get_usage(self) -> Dict[str, Any]:
455
+ """
456
+ Get usage statistics for the current user.
457
+
458
+ Returns:
459
+ Usage statistics
460
+ """
461
+ return self._request("GET", USAGE_ENDPOINT)
462
+
463
+ def test_connection(self) -> Dict[str, Any]:
464
+ """
465
+ Test the connection to the server and return server status information.
466
+
467
+ This method can be used to diagnose connection issues and verify that
468
+ the server is accessible and properly configured.
469
+
470
+ Returns:
471
+ Dictionary containing server status information
472
+ """
473
+ try:
474
+ # Try to access the base URL
475
+ response = self.session.get(self.base_url, timeout=self.timeout)
476
+
477
+ # Try to get server info if available
478
+ server_info = {}
479
+ try:
480
+ if response.headers.get("Content-Type", "").startswith(
481
+ "application/json"
482
+ ):
483
+ server_info = response.json()
484
+ except:
485
+ pass
486
+
487
+ return {
488
+ "status": "connected",
489
+ "url": self.base_url,
490
+ "status_code": response.status_code,
491
+ "server_info": server_info,
492
+ "headers": dict(response.headers),
493
+ }
494
+ except requests.RequestException as e:
495
+ return {
496
+ "status": "error",
497
+ "url": self.base_url,
498
+ "error": str(e),
499
+ "error_type": type(e).__name__,
500
+ }
501
+
502
+ def diagnose_request(self, endpoint: str, data: Dict[str, Any]) -> Dict[str, Any]:
503
+ """
504
+ Diagnose potential issues with a request before sending it to the server.
505
+
506
+ This method checks for common issues like malformed model strings,
507
+ invalid message formats, or missing required parameters.
508
+
509
+ Args:
510
+ endpoint: API endpoint
511
+ data: Request data
512
+
513
+ Returns:
514
+ Dictionary with diagnosis results
515
+ """
516
+ issues = []
517
+ warnings = []
518
+
519
+ # Check if this is a chat request
520
+ if endpoint == CHAT_ENDPOINT:
521
+ # Check model format
522
+ if "model" in data:
523
+ model = data["model"]
524
+ # Check if the model is already formatted as JSON
525
+ if (
526
+ isinstance(model, str)
527
+ and model.startswith("{")
528
+ and model.endswith("}")
529
+ ):
530
+ try:
531
+ model_json = json.loads(model)
532
+ if (
533
+ not isinstance(model_json, dict)
534
+ or "provider" not in model_json
535
+ or "model" not in model_json
536
+ ):
537
+ issues.append(f"Invalid model JSON format: {model}")
538
+ except json.JSONDecodeError:
539
+ issues.append(f"Invalid model JSON format: {model}")
540
+ elif not isinstance(model, str):
541
+ issues.append(f"Model must be a string, got {type(model).__name__}")
542
+ elif "/" not in model:
543
+ issues.append(
544
+ f"Model '{model}' is missing provider prefix (should be 'provider/model')"
545
+ )
546
+ else:
547
+ provider, model_name = model.split("/", 1)
548
+ if not provider or not model_name:
549
+ issues.append(
550
+ f"Invalid model format: '{model}'. Should be 'provider/model'"
551
+ )
552
+ else:
553
+ warnings.append("No model specified, will use default model")
554
+
555
+ # Check messages format
556
+ if "messages" in data:
557
+ messages = data["messages"]
558
+ if not isinstance(messages, list):
559
+ issues.append(
560
+ f"Messages must be a list, got {type(messages).__name__}"
561
+ )
562
+ elif not messages:
563
+ issues.append("Messages list is empty")
564
+ else:
565
+ for i, msg in enumerate(messages):
566
+ if not isinstance(msg, dict):
567
+ issues.append(
568
+ f"Message {i} must be a dictionary, got {type(msg).__name__}"
569
+ )
570
+ elif "role" not in msg:
571
+ issues.append(f"Message {i} is missing 'role' field")
572
+ elif "content" not in msg:
573
+ issues.append(f"Message {i} is missing 'content' field")
574
+ else:
575
+ issues.append("No messages specified")
576
+
577
+ # Check if this is a completion request
578
+ elif endpoint == COMPLETION_ENDPOINT:
579
+ # Check model format (same as chat)
580
+ if "model" in data:
581
+ model = data["model"]
582
+ if not isinstance(model, str):
583
+ issues.append(f"Model must be a string, got {type(model).__name__}")
584
+ elif "/" not in model:
585
+ issues.append(
586
+ f"Model '{model}' is missing provider prefix (should be 'provider/model')"
587
+ )
588
+ else:
589
+ warnings.append("No model specified, will use default model")
590
+
591
+ # Check prompt
592
+ if "prompt" not in data:
593
+ issues.append("No prompt specified")
594
+ elif not isinstance(data["prompt"], str):
595
+ issues.append(
596
+ f"Prompt must be a string, got {type(data['prompt']).__name__}"
597
+ )
598
+
599
+ # Return diagnosis results
600
+ return {
601
+ "endpoint": endpoint,
602
+ "issues": issues,
603
+ "warnings": warnings,
604
+ "is_valid": len(issues) == 0,
605
+ "data": data,
606
+ }
607
+
608
+ def _handle_streaming_response(self, response):
609
+ """
610
+ Handle a streaming response.
611
+
612
+ Args:
613
+ response: Streaming response
614
+
615
+ Returns:
616
+ Generator yielding response chunks
617
+ """
618
+ try:
619
+ for line in response.iter_lines():
620
+ if line:
621
+ line = line.decode("utf-8")
622
+ if line.startswith("data: "):
623
+ data = line[6:]
624
+ if data == "[DONE]":
625
+ break
626
+ try:
627
+ # Parse JSON chunk
628
+ chunk = json.loads(data)
629
+
630
+ # For chat responses, return the processed chunk
631
+ # with data field for backward compatibility
632
+ if "choices" in chunk:
633
+ # For delta responses (streaming)
634
+ choice = chunk["choices"][0]
635
+ if "delta" in choice and "content" in choice["delta"]:
636
+ # Add a data field for backward compatibility
637
+ chunk["data"] = choice["delta"]["content"]
638
+ # For text responses (completion)
639
+ elif "text" in choice:
640
+ chunk["data"] = choice["text"]
641
+
642
+ yield chunk
643
+ except json.JSONDecodeError:
644
+ # For raw text responses
645
+ yield {"data": data}
646
+ finally:
647
+ response.close()
648
+
649
+ def close(self):
650
+ """Close the session."""
651
+ self.session.close()
652
+
653
+ def __enter__(self):
654
+ """Enter context manager."""
655
+ return self
656
+
657
+ def __exit__(self, exc_type, exc_val, exc_tb):
658
+ """Exit context manager."""
659
+ self.close()
660
+
661
+ def set_base_url(self, base_url: str) -> None:
662
+ """
663
+ Set a new base URL for the API.
664
+
665
+ Args:
666
+ base_url: New base URL for the API.
667
+ """
668
+ self.base_url = base_url
669
+ logger.debug(f"Base URL set to {base_url}")
670
+
671
+
672
+ IndoxRouter = Client
@@ -0,0 +1,31 @@
1
+ """
2
+ Constants for the IndoxRouter client.
3
+ """
4
+
5
+ # API settings
6
+ DEFAULT_API_VERSION = "v1"
7
+ DEFAULT_BASE_URL = "https://91.107.253.133" # Production server IP
8
+ # DEFAULT_BASE_URL = "http://localhost:8000" # Local development server
9
+ DEFAULT_TIMEOUT = 60
10
+
11
+ # Default models
12
+ DEFAULT_MODEL = "openai/gpt-4o-mini"
13
+ DEFAULT_EMBEDDING_MODEL = "openai/text-embedding-3-small"
14
+ DEFAULT_IMAGE_MODEL = "openai/dall-e-3"
15
+
16
+ # API endpoints
17
+ CHAT_ENDPOINT = "chat/completions"
18
+ COMPLETION_ENDPOINT = "completions"
19
+ EMBEDDING_ENDPOINT = "embeddings"
20
+ IMAGE_ENDPOINT = "images/generations"
21
+ MODEL_ENDPOINT = "models"
22
+ USAGE_ENDPOINT = "user/usage"
23
+
24
+ # Error messages
25
+ ERROR_INVALID_API_KEY = "API key must be provided either as an argument or as the INDOXROUTER_API_KEY environment variable"
26
+ ERROR_NETWORK = "Network error occurred while communicating with the IndoxRouter API"
27
+ ERROR_RATE_LIMIT = "Rate limit exceeded for the IndoxRouter API"
28
+ ERROR_PROVIDER_NOT_FOUND = "Provider not found"
29
+ ERROR_MODEL_NOT_FOUND = "Model not found"
30
+ ERROR_INVALID_PARAMETERS = "Invalid parameters provided"
31
+ ERROR_INSUFFICIENT_CREDITS = "Insufficient credits"
@@ -0,0 +1,62 @@
1
+ """
2
+ Exceptions for the IndoxRouter client.
3
+ """
4
+
5
+ from datetime import datetime
6
+ from typing import Optional
7
+
8
+
9
+ class IndoxRouterError(Exception):
10
+ """Base exception for all IndoxRouter errors."""
11
+
12
+ pass
13
+
14
+
15
+ class AuthenticationError(IndoxRouterError):
16
+ """Raised when authentication fails."""
17
+
18
+ pass
19
+
20
+
21
+ class NetworkError(IndoxRouterError):
22
+ """Raised when a network error occurs."""
23
+
24
+ pass
25
+
26
+
27
+ class RateLimitError(IndoxRouterError):
28
+ """Raised when rate limits are exceeded."""
29
+
30
+ def __init__(self, message: str, reset_time: Optional[datetime] = None):
31
+ super().__init__(message)
32
+ self.reset_time = reset_time
33
+
34
+
35
+ class ProviderError(IndoxRouterError):
36
+ """Raised when a provider returns an error."""
37
+
38
+ pass
39
+
40
+
41
+ class ModelNotFoundError(ProviderError):
42
+ """Raised when a requested model is not found."""
43
+
44
+ pass
45
+
46
+
47
+ class ProviderNotFoundError(ProviderError):
48
+ """Raised when a requested provider is not found."""
49
+
50
+ pass
51
+
52
+
53
+ class InvalidParametersError(IndoxRouterError):
54
+ """Raised when invalid parameters are provided."""
55
+
56
+ pass
57
+
58
+
59
+ class InsufficientCreditsError(IndoxRouterError):
60
+ """Raised when the user doesn't have enough credits."""
61
+
62
+ pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: indoxrouter
3
- Version: 0.1.4
3
+ Version: 0.1.7
4
4
  Summary: A unified client for various AI providers
5
5
  Home-page: https://github.com/indoxrouter/indoxrouter
6
6
  Author: indoxRouter Team
@@ -63,18 +63,6 @@ from indoxrouter import Client
63
63
  # Initialize with API key (default connects to localhost:8000)
64
64
  client = Client(api_key="your_api_key")
65
65
 
66
- # Or specify a custom server URL
67
- client = Client(
68
- api_key="your_api_key",
69
- base_url="http://your-server-url:8000"
70
- )
71
-
72
- # Connect to Docker container inside the Docker network
73
- client = Client(
74
- api_key="your_api_key",
75
- base_url="http://indoxrouter-server:8000"
76
- )
77
-
78
66
  # Using environment variables
79
67
  # Set INDOX_ROUTER_API_KEY environment variable
80
68
  import os
@@ -0,0 +1,8 @@
1
+ indoxrouter/__init__.py,sha256=28pdx482uGFF_S1msov0LTTGsFTvVBKRqMkDmoXWUBY,1416
2
+ indoxrouter/client.py,sha256=1XhBiz6CBoN_jNgKmmHvyXecdHZftDckssY2lAir_tA,24044
3
+ indoxrouter/constants.py,sha256=fBY0HNsVqiqk29QR2nkU_GjLEhT7IHg5k1d0wAIHDo8,1112
4
+ indoxrouter/exceptions.py,sha256=0ULxtK9va4718PGTO5VoClXYEJeojpiM-7AganeiZZ4,1263
5
+ indoxrouter-0.1.7.dist-info/METADATA,sha256=ZsIozuWEnCp_8oXrR1ABmTgiu2-4E3wrfaTOdjyW1xM,4971
6
+ indoxrouter-0.1.7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
7
+ indoxrouter-0.1.7.dist-info/top_level.txt,sha256=v6FGWkw0QAnXhyYtnXLI1cxzna0iveNvZUotVzCWabM,12
8
+ indoxrouter-0.1.7.dist-info/RECORD,,
@@ -0,0 +1 @@
1
+ indoxrouter
@@ -1,4 +0,0 @@
1
- indoxrouter-0.1.4.dist-info/METADATA,sha256=LdzdY3TuKijYVTtjvyWGSG9rvN0oYjLll8oSJ3Zr01k,5257
2
- indoxrouter-0.1.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
3
- indoxrouter-0.1.4.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
4
- indoxrouter-0.1.4.dist-info/RECORD,,
@@ -1 +0,0 @@
1
-