indoxrouter 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. indoxRouter/__init__.py +83 -0
  2. indoxRouter/client.py +564 -218
  3. indoxRouter/client_resourses/__init__.py +20 -0
  4. indoxRouter/client_resourses/base.py +67 -0
  5. indoxRouter/client_resourses/chat.py +144 -0
  6. indoxRouter/client_resourses/completion.py +138 -0
  7. indoxRouter/client_resourses/embedding.py +83 -0
  8. indoxRouter/client_resourses/image.py +116 -0
  9. indoxRouter/client_resourses/models.py +114 -0
  10. indoxRouter/config.py +151 -0
  11. indoxRouter/constants/__init__.py +81 -0
  12. indoxRouter/exceptions/__init__.py +70 -0
  13. indoxRouter/models/__init__.py +111 -0
  14. indoxRouter/providers/__init__.py +50 -50
  15. indoxRouter/providers/ai21labs.json +128 -0
  16. indoxRouter/providers/base_provider.py +62 -30
  17. indoxRouter/providers/claude.json +164 -0
  18. indoxRouter/providers/cohere.json +116 -0
  19. indoxRouter/providers/databricks.json +110 -0
  20. indoxRouter/providers/deepseek.json +110 -0
  21. indoxRouter/providers/google.json +128 -0
  22. indoxRouter/providers/meta.json +128 -0
  23. indoxRouter/providers/mistral.json +146 -0
  24. indoxRouter/providers/nvidia.json +110 -0
  25. indoxRouter/providers/openai.json +308 -0
  26. indoxRouter/providers/openai.py +471 -72
  27. indoxRouter/providers/qwen.json +110 -0
  28. indoxRouter/utils/__init__.py +240 -0
  29. indoxrouter-0.1.2.dist-info/LICENSE +21 -0
  30. indoxrouter-0.1.2.dist-info/METADATA +259 -0
  31. indoxrouter-0.1.2.dist-info/RECORD +33 -0
  32. indoxRouter/api_endpoints.py +0 -336
  33. indoxRouter/client_package.py +0 -138
  34. indoxRouter/init_db.py +0 -71
  35. indoxRouter/main.py +0 -711
  36. indoxRouter/migrations/__init__.py +0 -1
  37. indoxRouter/migrations/env.py +0 -98
  38. indoxRouter/migrations/versions/__init__.py +0 -1
  39. indoxRouter/migrations/versions/initial_schema.py +0 -84
  40. indoxRouter/providers/ai21.py +0 -268
  41. indoxRouter/providers/claude.py +0 -177
  42. indoxRouter/providers/cohere.py +0 -171
  43. indoxRouter/providers/databricks.py +0 -166
  44. indoxRouter/providers/deepseek.py +0 -166
  45. indoxRouter/providers/google.py +0 -216
  46. indoxRouter/providers/llama.py +0 -164
  47. indoxRouter/providers/meta.py +0 -227
  48. indoxRouter/providers/mistral.py +0 -182
  49. indoxRouter/providers/nvidia.py +0 -164
  50. indoxrouter-0.1.0.dist-info/METADATA +0 -179
  51. indoxrouter-0.1.0.dist-info/RECORD +0 -27
  52. {indoxrouter-0.1.0.dist-info → indoxrouter-0.1.2.dist-info}/WHEEL +0 -0
  53. {indoxrouter-0.1.0.dist-info → indoxrouter-0.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,20 @@
1
+ """
2
+ Resources module for indoxRouter.
3
+ This module contains resource classes for different API endpoints.
4
+ """
5
+
6
+ from .base import BaseResource
7
+ from .chat import Chat
8
+ from .completion import Completions
9
+ from .embedding import Embeddings
10
+ from .image import Images
11
+ from .models import Models
12
+
13
+ __all__ = [
14
+ "BaseResource",
15
+ "Chat",
16
+ "Completions",
17
+ "Embeddings",
18
+ "Images",
19
+ "Models",
20
+ ]
@@ -0,0 +1,67 @@
1
+ """
2
+ Base resource module for indoxRouter.
3
+ This module contains the BaseResource class that all resource classes inherit from.
4
+ """
5
+
6
+ from typing import Dict, Any, Optional
7
+
8
+ from ..config import Config
9
+ from ..exceptions import (
10
+ InvalidParametersError,
11
+ AuthenticationError,
12
+ ProviderNotFoundError,
13
+ )
14
+
15
+
16
+ class BaseResource:
17
+ """Base resource class for all API resources."""
18
+
19
+ def __init__(self, client):
20
+ """
21
+ Initialize the resource.
22
+
23
+ Args:
24
+ client: The client instance that this resource belongs to.
25
+ """
26
+ self.client = client
27
+ self.config = client.config
28
+
29
+ # Get user from client, or use a default if not available
30
+ if hasattr(client, "user") and client.user is not None:
31
+ self.user = client.user
32
+ else:
33
+ self.user = {
34
+ "id": 1,
35
+ "name": "Default User",
36
+ "email": "default@example.com",
37
+ }
38
+
39
+ def _get_provider_api_key(
40
+ self, provider: str, provider_api_key: Optional[str] = None
41
+ ) -> str:
42
+ """
43
+ Get the API key for a provider.
44
+
45
+ Args:
46
+ provider: The provider to get the API key for.
47
+ provider_api_key: Optional API key to use. If provided, this takes precedence.
48
+
49
+ Returns:
50
+ The API key for the provider.
51
+
52
+ Raises:
53
+ AuthenticationError: If no API key is found for the provider.
54
+ """
55
+ # If a provider API key is provided, use it
56
+ if provider_api_key:
57
+ return provider_api_key
58
+
59
+ # Otherwise, try to get it from the configuration
60
+ api_key = self.config.get_provider_key(provider)
61
+ if not api_key:
62
+ raise AuthenticationError(
63
+ f"No API key found for provider '{provider}'. "
64
+ f"Please provide an API key or configure it in the configuration file."
65
+ )
66
+
67
+ return api_key
@@ -0,0 +1,144 @@
1
+ """
2
+ Chat resource module for indoxRouter.
3
+ This module contains the Chat resource class for chat-related functionality.
4
+ """
5
+
6
+ import time
7
+ import os
8
+ from typing import Dict, List, Any, Optional, Union, Generator
9
+ from datetime import datetime
10
+ from .base import BaseResource
11
+ from ..models import ChatMessage, ChatResponse, Usage
12
+ from ..providers import get_provider
13
+ from ..constants import (
14
+ DEFAULT_TEMPERATURE,
15
+ DEFAULT_MAX_TOKENS,
16
+ DEFAULT_TOP_P,
17
+ DEFAULT_FREQUENCY_PENALTY,
18
+ DEFAULT_PRESENCE_PENALTY,
19
+ )
20
+ from ..exceptions import ProviderNotFoundError, InvalidParametersError
21
+
22
+
23
+ class Chat(BaseResource):
24
+ """Resource class for chat-related functionality."""
25
+
26
+ def __call__(
27
+ self,
28
+ messages: List[Union[Dict[str, str], ChatMessage]],
29
+ model: str,
30
+ temperature: float = DEFAULT_TEMPERATURE,
31
+ max_tokens: int = DEFAULT_MAX_TOKENS,
32
+ top_p: float = DEFAULT_TOP_P,
33
+ frequency_penalty: float = DEFAULT_FREQUENCY_PENALTY,
34
+ presence_penalty: float = DEFAULT_PRESENCE_PENALTY,
35
+ provider_api_key: Optional[str] = None,
36
+ stream: bool = False,
37
+ return_generator: bool = False,
38
+ **kwargs,
39
+ ) -> Union[ChatResponse, Generator[str, None, None]]:
40
+ """
41
+ Send a chat request to a provider.
42
+
43
+ Args:
44
+ messages: A list of messages to send to the provider.
45
+ model: The model to use.
46
+ temperature: The temperature to use for generation.
47
+ max_tokens: The maximum number of tokens to generate.
48
+ top_p: The top_p value to use for generation.
49
+ frequency_penalty: The frequency penalty to use for generation.
50
+ presence_penalty: The presence penalty to use for generation.
51
+ provider_api_key: Optional API key for the provider. If not provided, uses the configured key.
52
+ stream: Whether to stream the response. Default is False.
53
+ return_generator: Whether to return a generator that yields chunks of the response. Only applicable when stream=True.
54
+ **kwargs: Additional parameters to pass to the provider.
55
+
56
+ Returns:
57
+ A ChatResponse object containing the response from the provider.
58
+ If stream=True and return_generator=True, returns a generator that yields chunks of the response.
59
+
60
+ Raises:
61
+ ProviderNotFoundError: If the provider is not found.
62
+ ModelNotFoundError: If the model is not found.
63
+ InvalidParametersError: If the parameters are invalid.
64
+ RequestError: If the request to the provider fails.
65
+ """
66
+ # Convert messages to ChatMessage objects if they are dictionaries
67
+ chat_messages = []
68
+ for message in messages:
69
+ if isinstance(message, dict):
70
+ chat_messages.append(ChatMessage(**message))
71
+ else:
72
+ chat_messages.append(message)
73
+ # Get the provider and model
74
+ provider, model_name = model.split("/")
75
+
76
+ # Get the provider API key
77
+ provider_api_key = os.getenv(f"{provider.upper()}_API_KEY")
78
+ # Get the provider implementation
79
+ provider_impl = get_provider(provider, provider_api_key, model_name)
80
+
81
+ # Send the request to the provider
82
+ response = provider_impl.chat(
83
+ messages=chat_messages,
84
+ temperature=temperature,
85
+ max_tokens=max_tokens,
86
+ top_p=top_p,
87
+ frequency_penalty=frequency_penalty,
88
+ presence_penalty=presence_penalty,
89
+ stream=stream,
90
+ return_generator=return_generator,
91
+ **kwargs,
92
+ )
93
+
94
+ # If return_generator is True and we got a generator, return it directly
95
+ if (
96
+ return_generator
97
+ and stream
98
+ and hasattr(response, "__iter__")
99
+ and hasattr(response, "__next__")
100
+ ):
101
+ # Return the generator directly - it's already a StreamingGenerator
102
+ # that handles usage tracking internally
103
+ return response
104
+
105
+ # If the response is a dictionary, convert it to a ChatResponse object
106
+ if isinstance(response, dict):
107
+ # Create Usage object from response
108
+ usage_data = response.get("usage", {})
109
+
110
+ # Parse timestamp if it's a string
111
+ timestamp = response.get("timestamp")
112
+ if isinstance(timestamp, str):
113
+ try:
114
+ timestamp = datetime.fromisoformat(timestamp)
115
+ except ValueError:
116
+ timestamp = datetime.now()
117
+ else:
118
+ timestamp = datetime.now()
119
+
120
+ # Extract usage information with fallbacks for different formats
121
+ tokens_prompt = usage_data.get("tokens_prompt", 0)
122
+ tokens_completion = usage_data.get("tokens_completion", 0)
123
+ tokens_total = usage_data.get("tokens_total", 0)
124
+
125
+ usage = Usage(
126
+ tokens_prompt=tokens_prompt,
127
+ tokens_completion=tokens_completion,
128
+ tokens_total=tokens_total,
129
+ cost=response.get("cost", 0.0),
130
+ latency=0.0, # We don't have latency in the dictionary
131
+ timestamp=timestamp,
132
+ )
133
+
134
+ return ChatResponse(
135
+ data=response.get("data", ""),
136
+ model=response.get("model", model_name),
137
+ provider=provider,
138
+ success=response.get("success", False),
139
+ message=response.get("message", ""),
140
+ usage=usage,
141
+ finish_reason=response.get("finish_reason", None),
142
+ raw_response=response.get("raw_response", None),
143
+ )
144
+ return response
@@ -0,0 +1,138 @@
1
+ """
2
+ Completion resource module for indoxRouter.
3
+ This module contains the Completions resource class for text completion functionality.
4
+ """
5
+
6
+ from typing import Dict, List, Any, Optional, Union, Generator
7
+ import os
8
+ from datetime import datetime
9
+ from .base import BaseResource
10
+ from ..models import CompletionResponse, Usage
11
+ from ..providers import get_provider
12
+ from ..constants import (
13
+ DEFAULT_TEMPERATURE,
14
+ DEFAULT_MAX_TOKENS,
15
+ DEFAULT_TOP_P,
16
+ DEFAULT_FREQUENCY_PENALTY,
17
+ DEFAULT_PRESENCE_PENALTY,
18
+ )
19
+ from ..exceptions import ProviderNotFoundError, InvalidParametersError
20
+
21
+
22
+ class Completions(BaseResource):
23
+ """Resource class for text completion functionality."""
24
+
25
+ def __call__(
26
+ self,
27
+ prompt: str,
28
+ model: str,
29
+ temperature: float = DEFAULT_TEMPERATURE,
30
+ max_tokens: int = DEFAULT_MAX_TOKENS,
31
+ top_p: float = DEFAULT_TOP_P,
32
+ frequency_penalty: float = DEFAULT_FREQUENCY_PENALTY,
33
+ presence_penalty: float = DEFAULT_PRESENCE_PENALTY,
34
+ provider_api_key: Optional[str] = None,
35
+ stream: bool = False,
36
+ return_generator: bool = False,
37
+ **kwargs,
38
+ ) -> Union[CompletionResponse, Generator[str, None, None]]:
39
+ """
40
+ Send a completion request to a provider.
41
+
42
+ Args:
43
+ prompt: The prompt to complete.
44
+ model: The model to use.
45
+ temperature: The temperature to use for generation.
46
+ max_tokens: The maximum number of tokens to generate.
47
+ top_p: The top_p value to use for generation.
48
+ frequency_penalty: The frequency penalty to use for generation.
49
+ presence_penalty: The presence penalty to use for generation.
50
+ provider_api_key: Optional API key for the provider. If not provided, uses the configured key.
51
+ stream: Whether to stream the response. Default is False.
52
+ return_generator: Whether to return a generator that yields chunks of the response. Only applicable when stream=True.
53
+ **kwargs: Additional parameters to pass to the provider.
54
+
55
+ Returns:
56
+ A CompletionResponse object containing the response from the provider.
57
+ If stream=True and return_generator=True, returns a generator that yields chunks of the response.
58
+
59
+ Raises:
60
+ ProviderNotFoundError: If the provider is not found.
61
+ ModelNotFoundError: If the model is not found.
62
+ InvalidParametersError: If the parameters are invalid.
63
+ RequestError: If the request to the provider fails.
64
+ """
65
+
66
+ # Get the provider and model
67
+ provider, model_name = model.split("/")
68
+
69
+ # Get the provider API key
70
+ provider_api_key = os.getenv(f"{provider.upper()}_API_KEY")
71
+ # Get the provider implementation
72
+ provider_impl = get_provider(provider, provider_api_key, model_name)
73
+
74
+ # Send the request to the provider
75
+ response = provider_impl.complete(
76
+ prompt=prompt,
77
+ temperature=temperature,
78
+ max_tokens=max_tokens,
79
+ top_p=top_p,
80
+ frequency_penalty=frequency_penalty,
81
+ presence_penalty=presence_penalty,
82
+ stream=stream,
83
+ return_generator=return_generator,
84
+ **kwargs,
85
+ )
86
+
87
+ # If return_generator is True and we got a generator, return it directly
88
+ if (
89
+ return_generator
90
+ and stream
91
+ and hasattr(response, "__iter__")
92
+ and hasattr(response, "__next__")
93
+ ):
94
+ # Return the generator directly - it's already a StreamingGenerator
95
+ # that handles usage tracking internally
96
+ return response
97
+
98
+ # If the response is a dictionary, convert it to a CompletionResponse object
99
+ if isinstance(response, dict):
100
+ # Create Usage object from response
101
+ usage_data = response.get("usage", {})
102
+
103
+ # Parse timestamp if it's a string
104
+ timestamp = response.get("timestamp")
105
+ if isinstance(timestamp, str):
106
+ try:
107
+ timestamp = datetime.fromisoformat(timestamp)
108
+ except ValueError:
109
+ timestamp = datetime.now()
110
+ else:
111
+ timestamp = datetime.now()
112
+
113
+ # Extract usage information with fallbacks for different formats
114
+ tokens_prompt = usage_data.get("tokens_prompt", 0)
115
+ tokens_completion = usage_data.get("tokens_completion", 0)
116
+ tokens_total = usage_data.get("tokens_total", 0)
117
+
118
+ usage = Usage(
119
+ tokens_prompt=tokens_prompt,
120
+ tokens_completion=tokens_completion,
121
+ tokens_total=tokens_total,
122
+ cost=response.get("cost", 0.0),
123
+ latency=0.0, # We don't have latency in the dictionary
124
+ timestamp=timestamp,
125
+ )
126
+
127
+ return CompletionResponse(
128
+ data=response.get("data"),
129
+ model=response.get("model", model_name),
130
+ provider=provider,
131
+ success=response.get("success", False),
132
+ message=response.get("message", ""),
133
+ usage=usage,
134
+ finish_reason=response.get("finish_reason", None),
135
+ raw_response=response.get("raw_response", None),
136
+ )
137
+
138
+ return response
@@ -0,0 +1,83 @@
1
+ """
2
+ Embedding resource module for indoxRouter.
3
+ This module contains the Embeddings resource class for embedding functionality.
4
+ """
5
+
6
+ from typing import Dict, List, Any, Optional, Union
7
+ import os
8
+ from datetime import datetime
9
+ from .base import BaseResource
10
+ from ..models import EmbeddingResponse, Usage
11
+ from ..providers import get_provider
12
+ from ..constants import (
13
+ DEFAULT_EMBEDDING_MODEL,
14
+ DEFAULT_EMBEDDING_DIMENSIONS,
15
+ ERROR_INVALID_PARAMETERS,
16
+ ERROR_PROVIDER_NOT_FOUND
17
+ )
18
+ from ..exceptions import ProviderNotFoundError, InvalidParametersError
19
+
20
+
21
+ class Embeddings(BaseResource):
22
+ """Resource class for embedding functionality."""
23
+
24
+ def __call__(
25
+ self,
26
+ text: Union[str, List[str]],
27
+ model: str,
28
+ provider_api_key: Optional[str] = None,
29
+ **kwargs,
30
+ ) -> EmbeddingResponse:
31
+ # Split provider and model name correctly
32
+ try:
33
+ provider, model_name = model.split("/", 1)
34
+ except ValueError:
35
+ raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: Model must be in format 'provider/model-name'")
36
+
37
+ # Validate text parameter
38
+ if not isinstance(text, (str, list)):
39
+ raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: text must be a string or list of strings")
40
+
41
+ if isinstance(text, list) and not all(isinstance(t, str) for t in text):
42
+ raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: all items in text list must be strings")
43
+
44
+ # Get the provider
45
+ try:
46
+ provider_instance = self._get_provider(provider, model_name, provider_api_key)
47
+ except Exception as e:
48
+ raise ProviderNotFoundError(f"{ERROR_PROVIDER_NOT_FOUND}: {str(e)}")
49
+
50
+ # Make the request
51
+ start_time = datetime.now()
52
+ try:
53
+ response = provider_instance.embed(text=text, **kwargs)
54
+ except Exception as e:
55
+ self._handle_provider_error(e)
56
+
57
+ # Calculate duration
58
+ duration = (datetime.now() - start_time).total_seconds()
59
+
60
+ # Create usage information
61
+ usage = Usage(
62
+ tokens_prompt=response.get("tokens_prompt", 0),
63
+ tokens_completion=0, # Embeddings don't have completion tokens
64
+ tokens_total=response.get("tokens_total", 0),
65
+ cost=response.get("cost", 0.0),
66
+ latency=duration,
67
+ timestamp=datetime.now(),
68
+ )
69
+
70
+ # Get dimensions from the response or use default
71
+ dimensions = response.get("dimensions", DEFAULT_EMBEDDING_DIMENSIONS)
72
+
73
+ # Create and return the response
74
+ return EmbeddingResponse(
75
+ data=response.get("embeddings", []),
76
+ model=model_name,
77
+ provider=provider,
78
+ success=True,
79
+ message="Successfully generated embeddings",
80
+ usage=usage,
81
+ dimensions=dimensions,
82
+ raw_response=response,
83
+ )
@@ -0,0 +1,116 @@
1
+ """
2
+ Image resource module for indoxRouter.
3
+ This module contains the Images resource class for image generation functionality.
4
+ """
5
+
6
+ import os
7
+ from typing import Dict, List, Any, Optional
8
+ from datetime import datetime
9
+ from .base import BaseResource
10
+ from ..models import ImageResponse, Usage
11
+ from ..providers import get_provider
12
+ from ..constants import (
13
+ DEFAULT_IMAGE_SIZE,
14
+ DEFAULT_IMAGE_COUNT,
15
+ DEFAULT_IMAGE_QUALITY,
16
+ DEFAULT_IMAGE_STYLE,
17
+ ERROR_INVALID_PARAMETERS,
18
+ ERROR_PROVIDER_NOT_FOUND,
19
+ ERROR_INVALID_IMAGE_SIZE
20
+ )
21
+ from ..exceptions import ProviderNotFoundError, InvalidParametersError
22
+
23
+
24
+ class Images(BaseResource):
25
+ """Resource class for image generation functionality."""
26
+
27
+ def __call__(
28
+ self,
29
+ prompt: str,
30
+ model: str,
31
+ size: str = DEFAULT_IMAGE_SIZE,
32
+ n: int = DEFAULT_IMAGE_COUNT,
33
+ quality: str = DEFAULT_IMAGE_QUALITY,
34
+ style: str = DEFAULT_IMAGE_STYLE,
35
+ provider_api_key: Optional[str] = None,
36
+ **kwargs,
37
+ ) -> ImageResponse:
38
+ """
39
+ Generate an image from a prompt.
40
+
41
+ Args:
42
+ prompt: The prompt to generate an image from.
43
+ model: The model to use, in the format 'provider/model-name'.
44
+ size: The size of the image to generate.
45
+ n: The number of images to generate.
46
+ quality: The quality of the image to generate.
47
+ style: The style of the image to generate.
48
+ provider_api_key: The API key to use for the provider.
49
+ **kwargs: Additional parameters to pass to the provider.
50
+
51
+ Returns:
52
+ An ImageResponse object containing the generated images.
53
+
54
+ Raises:
55
+ ProviderNotFoundError: If the provider is not found.
56
+ ModelNotFoundError: If the model is not found.
57
+ InvalidParametersError: If the parameters are invalid.
58
+ """
59
+ # Validate parameters
60
+ if not isinstance(prompt, str):
61
+ raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: prompt must be a string")
62
+
63
+ # Split provider and model name correctly
64
+ try:
65
+ provider, model_name = model.split("/", 1)
66
+ except ValueError:
67
+ raise InvalidParametersError(f"{ERROR_INVALID_PARAMETERS}: Model must be in format 'provider/model-name'")
68
+
69
+ # Validate image size
70
+ valid_sizes = ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]
71
+ if size not in valid_sizes:
72
+ raise InvalidParametersError(f"{ERROR_INVALID_IMAGE_SIZE} Valid sizes: {', '.join(valid_sizes)}")
73
+
74
+ # Get the provider
75
+ try:
76
+ provider_instance = self._get_provider(provider, model_name, provider_api_key)
77
+ except Exception as e:
78
+ raise ProviderNotFoundError(f"{ERROR_PROVIDER_NOT_FOUND}: {str(e)}")
79
+
80
+ # Make the request
81
+ start_time = datetime.now()
82
+ try:
83
+ response = provider_instance.generate_image(
84
+ prompt=prompt,
85
+ size=size,
86
+ n=n,
87
+ quality=quality,
88
+ style=style,
89
+ **kwargs
90
+ )
91
+ except Exception as e:
92
+ self._handle_provider_error(e)
93
+
94
+ # Calculate duration
95
+ duration = (datetime.now() - start_time).total_seconds()
96
+
97
+ # Create usage information
98
+ usage = Usage(
99
+ tokens_prompt=response.get("tokens_prompt", 0),
100
+ tokens_completion=response.get("tokens_completion", 0),
101
+ tokens_total=response.get("tokens_total", 0),
102
+ cost=response.get("cost", 0.0),
103
+ latency=duration,
104
+ timestamp=datetime.now(),
105
+ )
106
+
107
+ # Create and return the response
108
+ return ImageResponse(
109
+ data=response.get("images", []),
110
+ model=model_name,
111
+ provider=provider,
112
+ success=True,
113
+ message="Successfully generated image",
114
+ usage=usage,
115
+ raw_response=response,
116
+ )
@@ -0,0 +1,114 @@
1
+ """
2
+ Models resource module for indoxRouter.
3
+ This module contains the Models resource class for model-related functionality.
4
+ """
5
+
6
+ from typing import Dict, List, Any, Optional
7
+
8
+ from .base import BaseResource
9
+ from ..models import ModelInfo
10
+ from ..utils import list_available_providers, list_available_models, get_model_info
11
+ from ..exceptions import ProviderNotFoundError, ModelNotFoundError
12
+
13
+
14
+ class Models(BaseResource):
15
+ """Resource class for model-related functionality."""
16
+
17
+ def list_providers(self) -> List[Dict[str, Any]]:
18
+ """
19
+ List all available providers.
20
+
21
+ Returns:
22
+ A list of provider dictionaries with information.
23
+ """
24
+ providers = list_available_providers()
25
+ result = []
26
+
27
+ for provider in providers:
28
+ # Get the first model to extract provider information
29
+ try:
30
+ models = list_available_models(provider)
31
+ if provider in models and models[provider]:
32
+ model_info = models[provider][0]
33
+ result.append(
34
+ {
35
+ "id": provider,
36
+ "name": provider.capitalize(),
37
+ "description": model_info.get(
38
+ "providerDescription",
39
+ f"{provider.capitalize()} AI provider",
40
+ ),
41
+ "website": model_info.get("providerWebsite", ""),
42
+ "model_count": len(models[provider]),
43
+ }
44
+ )
45
+ else:
46
+ result.append(
47
+ {
48
+ "id": provider,
49
+ "name": provider.capitalize(),
50
+ "description": f"{provider.capitalize()} AI provider",
51
+ "website": "",
52
+ "model_count": 0,
53
+ }
54
+ )
55
+ except Exception:
56
+ # If there's an error, still include the provider with minimal info
57
+ result.append(
58
+ {
59
+ "id": provider,
60
+ "name": provider.capitalize(),
61
+ "description": f"{provider.capitalize()} AI provider",
62
+ "website": "",
63
+ "model_count": 0,
64
+ }
65
+ )
66
+
67
+ return result
68
+
69
+ def list(self, provider: Optional[str] = None) -> Dict[str, List[Dict[str, Any]]]:
70
+ """
71
+ List all available models, optionally filtered by provider.
72
+
73
+ Args:
74
+ provider: The name of the provider to filter by. If None, lists models from all providers.
75
+
76
+ Returns:
77
+ A dictionary mapping provider names to lists of model dictionaries.
78
+
79
+ Raises:
80
+ ProviderNotFoundError: If the specified provider is not found.
81
+ """
82
+ return list_available_models(provider)
83
+
84
+ def get(self, provider: str, model: str) -> ModelInfo:
85
+ """
86
+ Get information about a specific model from a provider.
87
+
88
+ Args:
89
+ provider: The name of the provider.
90
+ model: The name of the model.
91
+
92
+ Returns:
93
+ A ModelInfo object containing information about the model.
94
+
95
+ Raises:
96
+ ProviderNotFoundError: If the provider is not found.
97
+ ModelNotFoundError: If the model is not found.
98
+ """
99
+ model_data = get_model_info(provider, model)
100
+
101
+ return ModelInfo(
102
+ name=model_data.get("modelName", model),
103
+ provider=provider,
104
+ type=model_data.get("type", "Unknown"),
105
+ description=model_data.get("description"),
106
+ input_price_per_1k_tokens=model_data.get("inputPricePer1KTokens", 0.0),
107
+ output_price_per_1k_tokens=model_data.get("outputPricePer1KTokens", 0.0),
108
+ context_window=model_data.get("contextWindow"),
109
+ max_output_tokens=model_data.get("maxOutputTokens"),
110
+ recommended=model_data.get("recommended", False),
111
+ commercial=model_data.get("commercial", False),
112
+ pricey=model_data.get("pricey", False),
113
+ raw_info=model_data,
114
+ )