indoxrouter 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. indoxRouter/__init__.py +83 -0
  2. indoxRouter/client.py +564 -218
  3. indoxRouter/client_resourses/__init__.py +20 -0
  4. indoxRouter/client_resourses/base.py +67 -0
  5. indoxRouter/client_resourses/chat.py +144 -0
  6. indoxRouter/client_resourses/completion.py +138 -0
  7. indoxRouter/client_resourses/embedding.py +83 -0
  8. indoxRouter/client_resourses/image.py +116 -0
  9. indoxRouter/client_resourses/models.py +114 -0
  10. indoxRouter/config.py +151 -0
  11. indoxRouter/constants/__init__.py +81 -0
  12. indoxRouter/exceptions/__init__.py +70 -0
  13. indoxRouter/models/__init__.py +111 -0
  14. indoxRouter/providers/__init__.py +50 -50
  15. indoxRouter/providers/ai21labs.json +128 -0
  16. indoxRouter/providers/base_provider.py +62 -30
  17. indoxRouter/providers/claude.json +164 -0
  18. indoxRouter/providers/cohere.json +116 -0
  19. indoxRouter/providers/databricks.json +110 -0
  20. indoxRouter/providers/deepseek.json +110 -0
  21. indoxRouter/providers/google.json +128 -0
  22. indoxRouter/providers/meta.json +128 -0
  23. indoxRouter/providers/mistral.json +146 -0
  24. indoxRouter/providers/nvidia.json +110 -0
  25. indoxRouter/providers/openai.json +308 -0
  26. indoxRouter/providers/openai.py +471 -72
  27. indoxRouter/providers/qwen.json +110 -0
  28. indoxRouter/utils/__init__.py +240 -0
  29. indoxrouter-0.1.2.dist-info/LICENSE +21 -0
  30. indoxrouter-0.1.2.dist-info/METADATA +259 -0
  31. indoxrouter-0.1.2.dist-info/RECORD +33 -0
  32. indoxRouter/api_endpoints.py +0 -336
  33. indoxRouter/client_package.py +0 -138
  34. indoxRouter/init_db.py +0 -71
  35. indoxRouter/main.py +0 -711
  36. indoxRouter/migrations/__init__.py +0 -1
  37. indoxRouter/migrations/env.py +0 -98
  38. indoxRouter/migrations/versions/__init__.py +0 -1
  39. indoxRouter/migrations/versions/initial_schema.py +0 -84
  40. indoxRouter/providers/ai21.py +0 -268
  41. indoxRouter/providers/claude.py +0 -177
  42. indoxRouter/providers/cohere.py +0 -171
  43. indoxRouter/providers/databricks.py +0 -166
  44. indoxRouter/providers/deepseek.py +0 -166
  45. indoxRouter/providers/google.py +0 -216
  46. indoxRouter/providers/llama.py +0 -164
  47. indoxRouter/providers/meta.py +0 -227
  48. indoxRouter/providers/mistral.py +0 -182
  49. indoxRouter/providers/nvidia.py +0 -164
  50. indoxrouter-0.1.0.dist-info/METADATA +0 -179
  51. indoxrouter-0.1.0.dist-info/RECORD +0 -27
  52. {indoxrouter-0.1.0.dist-info → indoxrouter-0.1.2.dist-info}/WHEEL +0 -0
  53. {indoxrouter-0.1.0.dist-info → indoxrouter-0.1.2.dist-info}/top_level.txt +0 -0
indoxRouter/config.py ADDED
@@ -0,0 +1,151 @@
1
+ """
2
+ Configuration module for indoxRouter.
3
+ This module contains the Config class and related functions.
4
+ """
5
+
6
+ import os
7
+ import json
8
+ from typing import Dict, Any, Optional
9
+
10
+ from .constants import DEFAULT_CONFIG_PATH
11
+
12
+
13
+ class Config:
14
+ """Configuration class for indoxRouter."""
15
+
16
+ def __init__(self, config_path: Optional[str] = None):
17
+ """
18
+ Initialize the configuration.
19
+
20
+ Args:
21
+ config_path: Path to the configuration file. If None, uses the default path.
22
+ """
23
+ self.config_path = config_path or os.path.expanduser(DEFAULT_CONFIG_PATH)
24
+ self.config = {}
25
+ self._load_config()
26
+
27
+ def _load_config(self):
28
+ """Load the configuration from the file and environment variables."""
29
+ # First try to load from file
30
+ try:
31
+ if os.path.exists(self.config_path):
32
+ with open(self.config_path, "r") as f:
33
+ self.config = json.load(f)
34
+ except Exception as e:
35
+ print(f"Warning: Failed to load config from {self.config_path}: {e}")
36
+ self.config = {}
37
+
38
+ # Then load from environment variables
39
+ self._load_from_env()
40
+
41
+ def _load_from_env(self):
42
+ """Load configuration from environment variables."""
43
+ # Initialize provider_keys if it doesn't exist
44
+ if "provider_keys" not in self.config:
45
+ self.config["provider_keys"] = {}
46
+
47
+ # Load provider API keys from environment variables
48
+ env_mapping = {
49
+ "OPENAI_API_KEY": "openai",
50
+ "ANTHROPIC_API_KEY": "anthropic",
51
+ "MISTRAL_API_KEY": "mistral",
52
+ "COHERE_API_KEY": "cohere",
53
+ "GOOGLE_API_KEY": "google",
54
+ }
55
+
56
+ for env_var, provider in env_mapping.items():
57
+ api_key = os.environ.get(env_var)
58
+ if api_key:
59
+ self.config["provider_keys"][provider] = api_key
60
+
61
+ def get_provider_key(self, provider: str) -> Optional[str]:
62
+ """
63
+ Get the API key for a provider.
64
+
65
+ Args:
66
+ provider: The name of the provider.
67
+
68
+ Returns:
69
+ The API key for the provider, or None if not found.
70
+ """
71
+ # Check if the provider key exists in the configuration
72
+ if "provider_keys" in self.config and provider in self.config["provider_keys"]:
73
+ return self.config["provider_keys"][provider]
74
+
75
+ # Check environment variables as a fallback
76
+ env_var = f"{provider.upper()}_API_KEY"
77
+ return os.environ.get(env_var)
78
+
79
+ def set_provider_key(self, provider: str, api_key: str):
80
+ """
81
+ Set the API key for a provider.
82
+
83
+ Args:
84
+ provider: The name of the provider.
85
+ api_key: The API key for the provider.
86
+ """
87
+ if "provider_keys" not in self.config:
88
+ self.config["provider_keys"] = {}
89
+
90
+ self.config["provider_keys"][provider] = api_key
91
+
92
+ def save_config(self, config_path: Optional[str] = None):
93
+ """
94
+ Save the configuration to a file.
95
+
96
+ Args:
97
+ config_path: Path to save the configuration to. If None, uses the current config_path.
98
+ """
99
+ save_path = config_path or self.config_path
100
+
101
+ # Create the directory if it doesn't exist
102
+ os.makedirs(os.path.dirname(save_path), exist_ok=True)
103
+
104
+ try:
105
+ with open(save_path, "w") as f:
106
+ json.dump(self.config, f, indent=2)
107
+ except Exception as e:
108
+ print(f"Warning: Failed to save config to {save_path}: {e}")
109
+
110
+ def get(self, key: str, default: Any = None) -> Any:
111
+ """
112
+ Get a configuration value.
113
+
114
+ Args:
115
+ key: The key to get.
116
+ default: The default value to return if the key is not found.
117
+
118
+ Returns:
119
+ The value for the key, or the default if not found.
120
+ """
121
+ return self.config.get(key, default)
122
+
123
+ def set(self, key: str, value: Any):
124
+ """
125
+ Set a configuration value.
126
+
127
+ Args:
128
+ key: The key to set.
129
+ value: The value to set.
130
+ """
131
+ self.config[key] = value
132
+
133
+
134
+ # Global configuration instance
135
+ _config_instance = None
136
+
137
+
138
+ def get_config(config_path: Optional[str] = None) -> Config:
139
+ """
140
+ Get the global configuration instance.
141
+
142
+ Args:
143
+ config_path: Path to the configuration file. If None, uses the default path.
144
+
145
+ Returns:
146
+ The global configuration instance.
147
+ """
148
+ global _config_instance
149
+ if _config_instance is None:
150
+ _config_instance = Config(config_path)
151
+ return _config_instance
@@ -0,0 +1,81 @@
1
+ """
2
+ Constants module for indoxRouter.
3
+ This module contains all the constants used throughout the application.
4
+ """
5
+
6
+ import os
7
+
8
+ # API related constants
9
+ DEFAULT_API_VERSION = "v1"
10
+ DEFAULT_TIMEOUT = 60 # seconds
11
+ DEFAULT_BASE_URL = "https://api.indoxrouter.com"
12
+
13
+ # Model related constants
14
+ DEFAULT_TEMPERATURE = 0.7
15
+ DEFAULT_MAX_TOKENS = 1000
16
+ DEFAULT_TOP_P = 1.0
17
+ DEFAULT_FREQUENCY_PENALTY = 0.0
18
+ DEFAULT_PRESENCE_PENALTY = 0.0
19
+
20
+ # Image generation related constants
21
+ DEFAULT_IMAGE_SIZE = "512x512"
22
+ DEFAULT_IMAGE_COUNT = 1
23
+ DEFAULT_IMAGE_QUALITY = "standard"
24
+ DEFAULT_IMAGE_STYLE = "vivid"
25
+
26
+ # Embedding related constants
27
+ DEFAULT_EMBEDDING_MODEL = "text-embedding-ada-002"
28
+ DEFAULT_EMBEDDING_DIMENSIONS = 1536
29
+
30
+ # Database related constants
31
+ DEFAULT_DB_PATH = os.path.join(os.path.expanduser("~/.indoxRouter"), "indoxRouter.db")
32
+ DEFAULT_POSTGRES_CONNECTION = (
33
+ "postgresql://postgres:postgres@localhost:5432/indoxrouter"
34
+ )
35
+
36
+ # Configuration related constants
37
+ DEFAULT_CONFIG_DIR = os.path.expanduser("~/.indoxRouter")
38
+ DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_CONFIG_DIR, "config.json")
39
+
40
+ # Error messages
41
+ ERROR_INVALID_API_KEY = "Invalid API key provided."
42
+ ERROR_MODEL_NOT_FOUND = "Model not found."
43
+ ERROR_PROVIDER_NOT_FOUND = "Provider not found."
44
+ ERROR_REQUEST_FAILED = "Request to provider failed."
45
+ ERROR_INVALID_PARAMETERS = "Invalid parameters provided."
46
+ ERROR_UNAUTHORIZED = "Unauthorized. Please check your API key."
47
+ ERROR_RATE_LIMIT = "Rate limit exceeded. Please try again later."
48
+ ERROR_QUOTA_EXCEEDED = "Quota exceeded. Please check your usage."
49
+ ERROR_PROVIDER_KEY_NOT_FOUND = "Provider API key not found. Please configure it first."
50
+ ERROR_FEATURE_NOT_SUPPORTED = "This feature is not supported by the selected provider."
51
+ ERROR_INVALID_IMAGE_SIZE = (
52
+ "Invalid image size. Please check the documentation for supported sizes."
53
+ )
54
+
55
+ # Success messages
56
+ SUCCESS_REQUEST = "Request successful."
57
+
58
+ # Provider names
59
+ PROVIDER_OPENAI = "openai"
60
+ PROVIDER_ANTHROPIC = "anthropic"
61
+ PROVIDER_MISTRAL = "mistral"
62
+ PROVIDER_COHERE = "cohere"
63
+ PROVIDER_GOOGLE = "google"
64
+
65
+ # Model types
66
+ MODEL_TYPE_CHAT = "chat"
67
+ MODEL_TYPE_TEXT = "text"
68
+ MODEL_TYPE_EMBEDDING = "embedding"
69
+ MODEL_TYPE_IMAGE = "image"
70
+
71
+ # Response formats
72
+ RESPONSE_FORMAT_JSON = "json"
73
+ RESPONSE_FORMAT_TEXT = "text"
74
+
75
+ # Database types
76
+ DB_TYPE_SQLITE = "sqlite"
77
+ DB_TYPE_POSTGRES = "postgres"
78
+
79
+ # Default paths
80
+ DEFAULT_CONFIG_PATH = "~/.indoxRouter/config.json"
81
+ DEFAULT_DB_PATH = "~/.indoxRouter/database.db"
@@ -0,0 +1,70 @@
1
+ """
2
+ Exceptions module for indoxRouter.
3
+ This module contains all the custom exceptions used throughout the application.
4
+ """
5
+
6
+
7
+ class IndoxRouterError(Exception):
8
+ """Base exception for all indoxRouter errors."""
9
+
10
+ pass
11
+
12
+
13
+ class AuthenticationError(IndoxRouterError):
14
+ """Raised when authentication fails."""
15
+
16
+ pass
17
+
18
+
19
+ class InvalidAPIKeyError(AuthenticationError):
20
+ """Raised when an invalid API key is provided."""
21
+
22
+ pass
23
+
24
+
25
+ class ProviderError(IndoxRouterError):
26
+ """Raised when there's an error with a provider."""
27
+
28
+ pass
29
+
30
+
31
+ class ModelNotFoundError(ProviderError):
32
+ """Raised when a model is not found."""
33
+
34
+ pass
35
+
36
+
37
+ class ProviderNotFoundError(ProviderError):
38
+ """Raised when a provider is not found."""
39
+
40
+ pass
41
+
42
+
43
+ class RequestError(IndoxRouterError):
44
+ """Raised when a request to a provider fails."""
45
+
46
+ pass
47
+
48
+
49
+ class NetworkError(RequestError):
50
+ """Raised when a network-related error occurs during API communication."""
51
+
52
+ pass
53
+
54
+
55
+ class RateLimitError(RequestError):
56
+ """Raised when a rate limit is exceeded."""
57
+
58
+ pass
59
+
60
+
61
+ class QuotaExceededError(RequestError):
62
+ """Raised when a quota is exceeded."""
63
+
64
+ pass
65
+
66
+
67
+ class InvalidParametersError(IndoxRouterError):
68
+ """Raised when invalid parameters are provided."""
69
+
70
+ pass
@@ -0,0 +1,111 @@
1
+ """
2
+ Models module for indoxRouter.
3
+ This module contains data models used throughout the application.
4
+ """
5
+
6
+ from dataclasses import dataclass, field
7
+ from typing import Dict, List, Any, Optional, Union
8
+ from datetime import datetime
9
+
10
+
11
+ @dataclass
12
+ class ChatMessage:
13
+ """A chat message."""
14
+
15
+ role: str
16
+ content: str
17
+
18
+
19
+ @dataclass
20
+ class Usage:
21
+ """Usage information for a response."""
22
+
23
+ tokens_prompt: int = 0
24
+ tokens_completion: int = 0
25
+ tokens_total: int = 0
26
+ cost: float = 0.0
27
+ latency: float = 0.0
28
+ timestamp: datetime = field(default_factory=datetime.now)
29
+
30
+
31
+ @dataclass
32
+ class ChatResponse:
33
+ """Response from a chat request."""
34
+
35
+ # Required fields (no defaults)
36
+ data: str
37
+ model: str
38
+ provider: str
39
+ success: bool
40
+ message: str
41
+
42
+ # Fields with defaults
43
+ usage: Usage = field(default_factory=Usage)
44
+ finish_reason: Optional[str] = None
45
+ raw_response: Optional[Dict[str, Any]] = None
46
+
47
+
48
+ @dataclass
49
+ class CompletionResponse:
50
+ """Response from a completion request."""
51
+
52
+ # Required fields (no defaults)
53
+ data: str
54
+ model: str
55
+ provider: str
56
+ success: bool
57
+ message: str
58
+
59
+ # Fields with defaults
60
+ usage: Usage = field(default_factory=Usage)
61
+ finish_reason: Optional[str] = None
62
+ raw_response: Optional[Dict[str, Any]] = None
63
+
64
+
65
+ @dataclass
66
+ class EmbeddingResponse:
67
+ """Response from an embedding request."""
68
+
69
+ data: Union[List[Dict[str, Any]], List[List[float]], List[float]]
70
+ model: str
71
+ provider: str
72
+ success: bool
73
+ message: str
74
+
75
+ # Fields with defaults
76
+ usage: Usage = field(default_factory=Usage)
77
+ dimensions: int = 0
78
+ raw_response: Optional[Dict[str, Any]] = None
79
+
80
+
81
+ @dataclass
82
+ class ImageResponse:
83
+ """Response from an image generation request."""
84
+
85
+ images: List[str] # URLs or base64 encoded images
86
+ model: str
87
+ provider: str
88
+
89
+ # Fields with defaults
90
+ usage: Usage = field(default_factory=Usage)
91
+ sizes: List[str] = field(default_factory=list)
92
+ formats: List[str] = field(default_factory=list)
93
+ raw_response: Optional[Dict[str, Any]] = None
94
+
95
+
96
+ @dataclass
97
+ class ModelInfo:
98
+ """Information about a model."""
99
+
100
+ name: str
101
+ provider: str
102
+ type: str
103
+ description: Optional[str] = None
104
+ input_price_per_1k_tokens: float = 0.0
105
+ output_price_per_1k_tokens: float = 0.0
106
+ context_window: Optional[int] = None
107
+ max_output_tokens: Optional[int] = None
108
+ recommended: bool = False
109
+ commercial: bool = False
110
+ pricey: bool = False
111
+ raw_info: Optional[Dict[str, Any]] = None
@@ -15,75 +15,75 @@ try:
15
15
  except ImportError as e:
16
16
  logger.warning(f"OpenAI provider not available: {e}")
17
17
 
18
- try:
19
- from . import claude
18
+ # try:
19
+ # from . import claude
20
20
 
21
- PROVIDERS["claude"] = claude
22
- except ImportError as e:
23
- logger.warning(f"Claude provider not available: {e}")
21
+ # PROVIDERS["claude"] = claude
22
+ # except ImportError as e:
23
+ # logger.warning(f"Claude provider not available: {e}")
24
24
 
25
- try:
26
- from . import mistral
25
+ # try:
26
+ # from . import mistral
27
27
 
28
- PROVIDERS["mistral"] = mistral
29
- except ImportError as e:
30
- logger.warning(f"Mistral provider not available: {e}")
28
+ # PROVIDERS["mistral"] = mistral
29
+ # except ImportError as e:
30
+ # logger.warning(f"Mistral provider not available: {e}")
31
31
 
32
- try:
33
- from . import cohere
32
+ # try:
33
+ # from . import cohere
34
34
 
35
- PROVIDERS["cohere"] = cohere
36
- except ImportError as e:
37
- logger.warning(f"Cohere provider not available: {e}")
35
+ # PROVIDERS["cohere"] = cohere
36
+ # except ImportError as e:
37
+ # logger.warning(f"Cohere provider not available: {e}")
38
38
 
39
- try:
40
- from . import google
39
+ # try:
40
+ # from . import google
41
41
 
42
- PROVIDERS["google"] = google
43
- except ImportError as e:
44
- logger.warning(f"Google provider not available: {e}")
42
+ # PROVIDERS["google"] = google
43
+ # except ImportError as e:
44
+ # logger.warning(f"Google provider not available: {e}")
45
45
 
46
- try:
47
- from . import meta
46
+ # try:
47
+ # from . import meta
48
48
 
49
- PROVIDERS["meta"] = meta
50
- except ImportError as e:
51
- logger.warning(f"Meta provider not available: {e}")
49
+ # PROVIDERS["meta"] = meta
50
+ # except ImportError as e:
51
+ # logger.warning(f"Meta provider not available: {e}")
52
52
 
53
- try:
54
- from . import ai21
53
+ # try:
54
+ # from . import ai21
55
55
 
56
- PROVIDERS["ai21"] = ai21
57
- except ImportError as e:
58
- logger.warning(f"AI21 provider not available: {e}")
56
+ # PROVIDERS["ai21"] = ai21
57
+ # except ImportError as e:
58
+ # logger.warning(f"AI21 provider not available: {e}")
59
59
 
60
- try:
61
- from . import llama
60
+ # try:
61
+ # from . import llama
62
62
 
63
- PROVIDERS["llama"] = llama
64
- except ImportError as e:
65
- logger.warning(f"Llama provider not available: {e}")
63
+ # PROVIDERS["llama"] = llama
64
+ # except ImportError as e:
65
+ # logger.warning(f"Llama provider not available: {e}")
66
66
 
67
- try:
68
- from . import nvidia
67
+ # try:
68
+ # from . import nvidia
69
69
 
70
- PROVIDERS["nvidia"] = nvidia
71
- except ImportError as e:
72
- logger.warning(f"NVIDIA provider not available: {e}")
70
+ # PROVIDERS["nvidia"] = nvidia
71
+ # except ImportError as e:
72
+ # logger.warning(f"NVIDIA provider not available: {e}")
73
73
 
74
- try:
75
- from . import deepseek
74
+ # try:
75
+ # from . import deepseek
76
76
 
77
- PROVIDERS["deepseek"] = deepseek
78
- except ImportError as e:
79
- logger.warning(f"Deepseek provider not available: {e}")
77
+ # PROVIDERS["deepseek"] = deepseek
78
+ # except ImportError as e:
79
+ # logger.warning(f"Deepseek provider not available: {e}")
80
80
 
81
- try:
82
- from . import databricks
81
+ # try:
82
+ # from . import databricks
83
83
 
84
- PROVIDERS["databricks"] = databricks
85
- except ImportError as e:
86
- logger.warning(f"Databricks provider not available: {e}")
84
+ # PROVIDERS["databricks"] = databricks
85
+ # except ImportError as e:
86
+ # logger.warning(f"Databricks provider not available: {e}")
87
87
 
88
88
 
89
89
  def get_provider(provider_name, api_key, model_name):
@@ -0,0 +1,128 @@
1
+ [
2
+ {
3
+ "number": "1",
4
+ "modelName": "jamba-1.5-instruct",
5
+ "name": "Jamba 1.5 Instruct",
6
+ "type": "Text Generation",
7
+ "inputPricePer1KTokens": 0.005,
8
+ "outputPricePer1KTokens": 0.025,
9
+ "description": "Jamba 1.5 Instruct is AI21's flagship model, offering advanced reasoning, strong factuality, and proficiency in long-form content generation. Optimized for instruction following.",
10
+ "contextWindows": "128k Tokens",
11
+ "recommended": true,
12
+ "commercial": true,
13
+ "pricey": true,
14
+ "output": "4096 Tokens",
15
+ "comments": "AI21's most capable model for enterprise applications.",
16
+ "companyModelName": "AI21 Labs : Jamba 1.5 Instruct",
17
+ "promptTemplate": "Human: %1\nAssistant: %2",
18
+ "systemPrompt": ""
19
+ },
20
+ {
21
+ "number": "2",
22
+ "modelName": "jamba-1.5-mini",
23
+ "name": "Jamba 1.5 Mini",
24
+ "type": "Text Generation",
25
+ "inputPricePer1KTokens": 0.0001,
26
+ "outputPricePer1KTokens": 0.0005,
27
+ "description": "Jamba 1.5 Mini is a smaller, more efficient version of Jamba optimized for speed and cost. Good for simpler tasks and high-throughput applications.",
28
+ "contextWindows": "8k Tokens",
29
+ "recommended": true,
30
+ "commercial": true,
31
+ "pricey": false,
32
+ "output": "4096 Tokens",
33
+ "comments": "Cost-effective model for simpler use cases.",
34
+ "companyModelName": "AI21 Labs : Jamba 1.5 Mini",
35
+ "promptTemplate": "Human: %1\nAssistant: %2",
36
+ "systemPrompt": ""
37
+ },
38
+ {
39
+ "number": "3",
40
+ "modelName": "jamba-1.5-express",
41
+ "name": "Jamba 1.5 Express",
42
+ "type": "Text Generation",
43
+ "inputPricePer1KTokens": 0.0003,
44
+ "outputPricePer1KTokens": 0.0015,
45
+ "description": "Jamba 1.5 Express balances capability and efficiency. Designed for applications requiring moderate complexity with faster response times than the full Jamba model.",
46
+ "contextWindows": "16k Tokens",
47
+ "recommended": true,
48
+ "commercial": true,
49
+ "pricey": false,
50
+ "output": "4096 Tokens",
51
+ "comments": "Good balance of capability and performance.",
52
+ "companyModelName": "AI21 Labs : Jamba 1.5 Express",
53
+ "promptTemplate": "Human: %1\nAssistant: %2",
54
+ "systemPrompt": ""
55
+ },
56
+ {
57
+ "number": "4",
58
+ "modelName": "j2-ultra",
59
+ "name": "Jurassic-2 Ultra",
60
+ "type": "Text Generation",
61
+ "inputPricePer1KTokens": 0.01,
62
+ "outputPricePer1KTokens": 0.03,
63
+ "description": "Jurassic-2 Ultra is AI21's previous generation flagship model. Still capable for many tasks but superseded by Jamba for most use cases.",
64
+ "contextWindows": "8k Tokens",
65
+ "recommended": false,
66
+ "commercial": true,
67
+ "pricey": true,
68
+ "output": "8192 Tokens",
69
+ "comments": "Legacy model from the Jurassic-2 series.",
70
+ "companyModelName": "AI21 Labs : Jurassic-2 Ultra",
71
+ "promptTemplate": "Human: %1\nAssistant: %2",
72
+ "systemPrompt": ""
73
+ },
74
+ {
75
+ "number": "5",
76
+ "modelName": "j2-mid",
77
+ "name": "Jurassic-2 Mid",
78
+ "type": "Text Generation",
79
+ "inputPricePer1KTokens": 0.005,
80
+ "outputPricePer1KTokens": 0.015,
81
+ "description": "Jurassic-2 Mid offers a balance of capability and cost from AI21's previous generation. Has been largely replaced by Jamba models.",
82
+ "contextWindows": "8k Tokens",
83
+ "recommended": false,
84
+ "commercial": true,
85
+ "pricey": false,
86
+ "output": "8192 Tokens",
87
+ "comments": "Mid-tier legacy model from the Jurassic-2 series.",
88
+ "companyModelName": "AI21 Labs : Jurassic-2 Mid",
89
+ "promptTemplate": "Human: %1\nAssistant: %2",
90
+ "systemPrompt": ""
91
+ },
92
+ {
93
+ "number": "6",
94
+ "modelName": "j2-light",
95
+ "name": "Jurassic-2 Light",
96
+ "type": "Text Generation",
97
+ "inputPricePer1KTokens": 0.0003,
98
+ "outputPricePer1KTokens": 0.0009,
99
+ "description": "Jurassic-2 Light is the smallest model in the Jurassic-2 series. Designed for simple tasks with good efficiency, but now replaced by Jamba Mini for most use cases.",
100
+ "contextWindows": "8k Tokens",
101
+ "recommended": false,
102
+ "commercial": true,
103
+ "pricey": false,
104
+ "output": "8192 Tokens",
105
+ "comments": "Lightweight legacy model from the Jurassic-2 series.",
106
+ "companyModelName": "AI21 Labs : Jurassic-2 Light",
107
+ "promptTemplate": "Human: %1\nAssistant: %2",
108
+ "systemPrompt": ""
109
+ },
110
+ {
111
+ "number": "7",
112
+ "modelName": "embed-v1",
113
+ "name": "Embed v1",
114
+ "type": "Embedding",
115
+ "inputPricePer1KTokens": 0.0001,
116
+ "outputPricePer1KTokens": 0.0,
117
+ "description": "AI21's embedding model for transforming text into vector representations. Useful for search, classification, and recommendation systems.",
118
+ "contextWindows": "8k Tokens",
119
+ "recommended": true,
120
+ "commercial": true,
121
+ "pricey": false,
122
+ "output": "N/A",
123
+ "comments": "General purpose embedding model for semantic applications.",
124
+ "companyModelName": "AI21 Labs : Embed v1",
125
+ "promptTemplate": "",
126
+ "systemPrompt": ""
127
+ }
128
+ ]