indoxrouter 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. indoxrouter-0.1.3.dist-info/METADATA +188 -0
  2. indoxrouter-0.1.3.dist-info/RECORD +4 -0
  3. indoxrouter-0.1.3.dist-info/top_level.txt +1 -0
  4. indoxRouter/__init__.py +0 -83
  5. indoxRouter/client.py +0 -632
  6. indoxRouter/client_resourses/__init__.py +0 -20
  7. indoxRouter/client_resourses/base.py +0 -67
  8. indoxRouter/client_resourses/chat.py +0 -144
  9. indoxRouter/client_resourses/completion.py +0 -138
  10. indoxRouter/client_resourses/embedding.py +0 -83
  11. indoxRouter/client_resourses/image.py +0 -116
  12. indoxRouter/client_resourses/models.py +0 -114
  13. indoxRouter/config.py +0 -151
  14. indoxRouter/constants/__init__.py +0 -81
  15. indoxRouter/exceptions/__init__.py +0 -70
  16. indoxRouter/models/__init__.py +0 -111
  17. indoxRouter/providers/__init__.py +0 -108
  18. indoxRouter/providers/ai21labs.json +0 -128
  19. indoxRouter/providers/base_provider.py +0 -101
  20. indoxRouter/providers/claude.json +0 -164
  21. indoxRouter/providers/cohere.json +0 -116
  22. indoxRouter/providers/databricks.json +0 -110
  23. indoxRouter/providers/deepseek.json +0 -110
  24. indoxRouter/providers/google.json +0 -128
  25. indoxRouter/providers/meta.json +0 -128
  26. indoxRouter/providers/mistral.json +0 -146
  27. indoxRouter/providers/nvidia.json +0 -110
  28. indoxRouter/providers/openai.json +0 -308
  29. indoxRouter/providers/openai.py +0 -521
  30. indoxRouter/providers/qwen.json +0 -110
  31. indoxRouter/utils/__init__.py +0 -240
  32. indoxrouter-0.1.2.dist-info/LICENSE +0 -21
  33. indoxrouter-0.1.2.dist-info/METADATA +0 -259
  34. indoxrouter-0.1.2.dist-info/RECORD +0 -33
  35. indoxrouter-0.1.2.dist-info/top_level.txt +0 -1
  36. {indoxrouter-0.1.2.dist-info → indoxrouter-0.1.3.dist-info}/WHEEL +0 -0
@@ -1,101 +0,0 @@
1
- """
2
- Base provider module for indoxRouter.
3
- This module contains the base provider class that all providers will inherit from.
4
- """
5
-
6
- from abc import ABC, abstractmethod
7
- from typing import Dict, List, Any, Optional, Union
8
-
9
-
10
- class BaseProvider(ABC):
11
- """Base provider class for all LLM providers."""
12
-
13
- def __init__(self, api_key: str, model_name: str):
14
- """
15
- Initialize the provider.
16
-
17
- Args:
18
- api_key: The API key for the provider.
19
- model_name: The name of the model to use.
20
- """
21
- self.api_key = api_key
22
- self.model_name = model_name
23
-
24
- @abstractmethod
25
- def chat(self, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
26
- """
27
- Send a chat request to the provider.
28
-
29
- Args:
30
- messages: A list of message dictionaries with 'role' and 'content' keys.
31
- **kwargs: Additional parameters to pass to the provider.
32
-
33
- Returns:
34
- A dictionary containing the response from the provider.
35
- """
36
- pass
37
-
38
- @abstractmethod
39
- def complete(self, prompt: str, **kwargs) -> Dict[str, Any]:
40
- """
41
- Send a completion request to the provider.
42
-
43
- Args:
44
- prompt: The prompt to complete.
45
- **kwargs: Additional parameters to pass to the provider.
46
-
47
- Returns:
48
- A dictionary containing the response from the provider.
49
- """
50
- pass
51
-
52
- @abstractmethod
53
- def embed(self, text: Union[str, List[str]], **kwargs) -> Dict[str, Any]:
54
- """
55
- Send an embedding request to the provider.
56
-
57
- Args:
58
- text: The text to embed. Can be a single string or a list of strings.
59
- **kwargs: Additional parameters to pass to the provider.
60
-
61
- Returns:
62
- A dictionary containing the embeddings from the provider.
63
- """
64
- pass
65
-
66
- @abstractmethod
67
- def generate_image(self, prompt: str, **kwargs) -> Dict[str, Any]:
68
- """
69
- Generate an image from a prompt.
70
-
71
- Args:
72
- prompt: The prompt to generate an image from.
73
- **kwargs: Additional parameters to pass to the provider.
74
-
75
- Returns:
76
- A dictionary containing the image URL or data.
77
- """
78
- pass
79
-
80
- @abstractmethod
81
- def get_token_count(self, text: str) -> int:
82
- """
83
- Get the number of tokens in a text.
84
-
85
- Args:
86
- text: The text to count tokens for.
87
-
88
- Returns:
89
- The number of tokens in the text.
90
- """
91
- pass
92
-
93
- @abstractmethod
94
- def get_model_info(self) -> Dict[str, Any]:
95
- """
96
- Get information about the model.
97
-
98
- Returns:
99
- A dictionary containing information about the model.
100
- """
101
- pass
@@ -1,164 +0,0 @@
1
- [
2
- {
3
- "number": "1",
4
- "modelName": "claude-3-7-sonnet-20250219",
5
- "name": "Claude 3.7 Sonnet",
6
- "type": "Text Generation",
7
- "inputPricePer1KTokens": 0.0025,
8
- "outputPricePer1KTokens": 0.0125,
9
- "description": "Claude 3.7 Sonnet is Anthropic's most capable model with enhanced reasoning abilities. It excels at complex tasks requiring careful analysis, problem-solving, and high-quality output generation.",
10
- "contextWindows": "200k Tokens",
11
- "recommended": true,
12
- "commercial": true,
13
- "pricey": false,
14
- "output": "4096 Tokens",
15
- "comments": "Anthropic's most advanced reasoning model with extended thinking capabilities.",
16
- "companyModelName": "Anthropic : Claude 3.7 Sonnet",
17
- "promptTemplate": "Human: %1\n\nAssistant: %2",
18
- "systemPrompt": ""
19
- },
20
- {
21
- "number": "2",
22
- "modelName": "claude-3-5-sonnet-20240620",
23
- "name": "Claude 3.5 Sonnet",
24
- "type": "Text Generation",
25
- "inputPricePer1KTokens": 0.0015,
26
- "outputPricePer1KTokens": 0.0075,
27
- "description": "Claude 3.5 Sonnet is Anthropic's balanced model offering strong capabilities with improved efficiency. Ideal for a wide range of tasks from content creation to complex reasoning.",
28
- "contextWindows": "200k Tokens",
29
- "recommended": true,
30
- "commercial": true,
31
- "pricey": false,
32
- "output": "4096 Tokens",
33
- "comments": "Well-balanced model for most general use cases.",
34
- "companyModelName": "Anthropic : Claude 3.5 Sonnet",
35
- "promptTemplate": "Human: %1\n\nAssistant: %2",
36
- "systemPrompt": ""
37
- },
38
- {
39
- "number": "3",
40
- "modelName": "claude-3-5-haiku-20240620",
41
- "name": "Claude 3.5 Haiku",
42
- "type": "Text Generation",
43
- "inputPricePer1KTokens": 0.00025,
44
- "outputPricePer1KTokens": 0.00125,
45
- "description": "Claude 3.5 Haiku is Anthropic's fastest and most cost-effective model. Optimized for high-throughput applications and quick responses while maintaining high quality outputs.",
46
- "contextWindows": "200k Tokens",
47
- "recommended": true,
48
- "commercial": true,
49
- "pricey": false,
50
- "output": "4096 Tokens",
51
- "comments": "Fastest and most affordable model in the Claude 3 family.",
52
- "companyModelName": "Anthropic : Claude 3.5 Haiku",
53
- "promptTemplate": "Human: %1\n\nAssistant: %2",
54
- "systemPrompt": ""
55
- },
56
- {
57
- "number": "4",
58
- "modelName": "claude-3-opus-20240229",
59
- "name": "Claude 3 Opus",
60
- "type": "Text Generation",
61
- "inputPricePer1KTokens": 0.015,
62
- "outputPricePer1KTokens": 0.075,
63
- "description": "Claude 3 Opus is Anthropic's most powerful model for sophisticated tasks requiring deep understanding and nuanced responses. Excels at complex writing, detailed analysis, and expert-level problem solving.",
64
- "contextWindows": "200k Tokens",
65
- "recommended": false,
66
- "commercial": true,
67
- "pricey": true,
68
- "output": "4096 Tokens",
69
- "comments": "Highest capability model for the most demanding tasks.",
70
- "companyModelName": "Anthropic : Claude 3 Opus",
71
- "promptTemplate": "Human: %1\n\nAssistant: %2",
72
- "systemPrompt": ""
73
- },
74
- {
75
- "number": "5",
76
- "modelName": "claude-3-sonnet-20240229",
77
- "name": "Claude 3 Sonnet",
78
- "type": "Text Generation",
79
- "inputPricePer1KTokens": 0.003,
80
- "outputPricePer1KTokens": 0.015,
81
- "description": "Claude 3 Sonnet offers a strong balance of intelligence and speed. Well-suited for enterprise applications requiring thoughtful responses and analysis.",
82
- "contextWindows": "200k Tokens",
83
- "recommended": true,
84
- "commercial": true,
85
- "pricey": false,
86
- "output": "4096 Tokens",
87
- "comments": "Strong all-around performer for most business use cases.",
88
- "companyModelName": "Anthropic : Claude 3 Sonnet",
89
- "promptTemplate": "Human: %1\n\nAssistant: %2",
90
- "systemPrompt": ""
91
- },
92
- {
93
- "number": "6",
94
- "modelName": "claude-3-haiku-20240307",
95
- "name": "Claude 3 Haiku",
96
- "type": "Text Generation",
97
- "inputPricePer1KTokens": 0.00025,
98
- "outputPricePer1KTokens": 0.00125,
99
- "description": "Claude 3 Haiku is Anthropic's fastest and most cost-effective model. Ideal for applications requiring quick responses and high throughput while maintaining good quality.",
100
- "contextWindows": "200k Tokens",
101
- "recommended": true,
102
- "commercial": true,
103
- "pricey": false,
104
- "output": "4096 Tokens",
105
- "comments": "Best for applications requiring speed and cost efficiency.",
106
- "companyModelName": "Anthropic : Claude 3 Haiku",
107
- "promptTemplate": "Human: %1\n\nAssistant: %2",
108
- "systemPrompt": ""
109
- },
110
- {
111
- "number": "7",
112
- "modelName": "claude-2.1",
113
- "name": "Claude 2.1",
114
- "type": "Text Generation",
115
- "inputPricePer1KTokens": 0.008,
116
- "outputPricePer1KTokens": 0.024,
117
- "description": "Claude 2.1 is a previous generation model with improved performance over Claude 2.0. Still capable for many tasks but with less capabilities than Claude 3 models.",
118
- "contextWindows": "100k Tokens",
119
- "recommended": false,
120
- "commercial": true,
121
- "pricey": false,
122
- "output": "4096 Tokens",
123
- "comments": "Legacy model, recommended to upgrade to Claude 3 series.",
124
- "companyModelName": "Anthropic : Claude 2.1",
125
- "promptTemplate": "Human: %1\n\nAssistant: %2",
126
- "systemPrompt": ""
127
- },
128
- {
129
- "number": "8",
130
- "modelName": "claude-2.0",
131
- "name": "Claude 2.0",
132
- "type": "Text Generation",
133
- "inputPricePer1KTokens": 0.008,
134
- "outputPricePer1KTokens": 0.024,
135
- "description": "Claude 2.0 is an earlier generation model. Provides reasonable performance for basic tasks but lacks the advanced capabilities of newer Claude models.",
136
- "contextWindows": "100k Tokens",
137
- "recommended": false,
138
- "commercial": true,
139
- "pricey": false,
140
- "output": "4096 Tokens",
141
- "comments": "Legacy model, recommended to upgrade to Claude 3 series.",
142
- "companyModelName": "Anthropic : Claude 2.0",
143
- "promptTemplate": "Human: %1\n\nAssistant: %2",
144
- "systemPrompt": ""
145
- },
146
- {
147
- "number": "9",
148
- "modelName": "claude-instant-1.2",
149
- "name": "Claude Instant 1.2",
150
- "type": "Text Generation",
151
- "inputPricePer1KTokens": 0.0008,
152
- "outputPricePer1KTokens": 0.0024,
153
- "description": "Claude Instant 1.2 is a lightweight, faster version of Claude 1 series designed for quick responses to simpler queries. Significantly less capable than Claude 3 models.",
154
- "contextWindows": "100k Tokens",
155
- "recommended": false,
156
- "commercial": true,
157
- "pricey": false,
158
- "output": "4096 Tokens",
159
- "comments": "Legacy model, recommended to use Claude 3.5 Haiku instead for better performance at similar price point.",
160
- "companyModelName": "Anthropic : Claude Instant 1.2",
161
- "promptTemplate": "Human: %1\n\nAssistant: %2",
162
- "systemPrompt": ""
163
- }
164
- ]
@@ -1,116 +0,0 @@
1
- [
2
- {
3
- "modelName": "command-r-plus",
4
- "displayName": "Command R+",
5
- "description": "Cohere's most powerful model for complex reasoning and generation tasks",
6
- "maxTokens": 4096,
7
- "inputPricePer1KTokens": 0.015,
8
- "outputPricePer1KTokens": 0.075,
9
- "systemPrompt": "You are a helpful AI assistant created by Cohere.",
10
- "capabilities": [
11
- "text-generation",
12
- "chat",
13
- "reasoning",
14
- "summarization",
15
- "code-generation"
16
- ]
17
- },
18
- {
19
- "modelName": "command-r",
20
- "displayName": "Command R",
21
- "description": "Balanced model for general-purpose text generation and reasoning",
22
- "maxTokens": 4096,
23
- "inputPricePer1KTokens": 0.01,
24
- "outputPricePer1KTokens": 0.05,
25
- "systemPrompt": "You are a helpful AI assistant created by Cohere.",
26
- "capabilities": ["text-generation", "chat", "reasoning", "summarization"]
27
- },
28
- {
29
- "modelName": "command-light",
30
- "displayName": "Command Light",
31
- "description": "Lightweight model for efficient text generation",
32
- "maxTokens": 4096,
33
- "inputPricePer1KTokens": 0.003,
34
- "outputPricePer1KTokens": 0.015,
35
- "systemPrompt": "You are a helpful AI assistant created by Cohere.",
36
- "capabilities": ["text-generation", "chat", "summarization"]
37
- },
38
- {
39
- "modelName": "command-r-plus-08-2024",
40
- "displayName": "Command R+ (August 2024)",
41
- "description": "Latest version of Cohere's most powerful model",
42
- "maxTokens": 4096,
43
- "inputPricePer1KTokens": 0.015,
44
- "outputPricePer1KTokens": 0.075,
45
- "systemPrompt": "You are a helpful AI assistant created by Cohere.",
46
- "capabilities": [
47
- "text-generation",
48
- "chat",
49
- "reasoning",
50
- "summarization",
51
- "code-generation"
52
- ]
53
- },
54
- {
55
- "modelName": "command",
56
- "displayName": "Command",
57
- "description": "Legacy model for general-purpose text generation",
58
- "maxTokens": 4096,
59
- "inputPricePer1KTokens": 0.005,
60
- "outputPricePer1KTokens": 0.025,
61
- "systemPrompt": "You are a helpful AI assistant created by Cohere.",
62
- "capabilities": ["text-generation", "chat", "summarization"]
63
- },
64
- {
65
- "modelName": "embed-english-v3.0",
66
- "displayName": "Embed English v3.0",
67
- "description": "English language embedding model",
68
- "maxTokens": 512,
69
- "inputPricePer1KTokens": 0.0001,
70
- "outputPricePer1KTokens": 0.0,
71
- "capabilities": ["embeddings"]
72
- },
73
- {
74
- "modelName": "embed-multilingual-v3.0",
75
- "displayName": "Embed Multilingual v3.0",
76
- "description": "Multilingual embedding model supporting 100+ languages",
77
- "maxTokens": 512,
78
- "inputPricePer1KTokens": 0.0001,
79
- "outputPricePer1KTokens": 0.0,
80
- "capabilities": ["embeddings"]
81
- },
82
- {
83
- "modelName": "rerank-english-v3.0",
84
- "name": "Rerank English v3.0",
85
- "type": "Reranking",
86
- "inputPricePer1KTokens": 0.0001,
87
- "outputPricePer1KTokens": 0.0,
88
- "description": "Rerank is designed to improve search quality by reordering results based on relevance to a query. Optimized for English language search applications.",
89
- "contextWindows": "512 Tokens",
90
- "recommended": true,
91
- "commercial": true,
92
- "pricey": false,
93
- "output": "N/A",
94
- "comments": "Specialized model for improving search result quality.",
95
- "companyModelName": "Cohere : Rerank English v3.0",
96
- "promptTemplate": "",
97
- "systemPrompt": ""
98
- },
99
- {
100
- "modelName": "rerank-multilingual-v3.0",
101
- "name": "Rerank Multilingual v3.0",
102
- "type": "Reranking",
103
- "inputPricePer1KTokens": 0.0001,
104
- "outputPricePer1KTokens": 0.0,
105
- "description": "Multilingual version of Rerank supporting 100+ languages. Reorders search results based on relevance across language boundaries.",
106
- "contextWindows": "512 Tokens",
107
- "recommended": true,
108
- "commercial": true,
109
- "pricey": false,
110
- "output": "N/A",
111
- "comments": "Best for global search applications across multiple languages.",
112
- "companyModelName": "Cohere : Rerank Multilingual v3.0",
113
- "promptTemplate": "",
114
- "systemPrompt": ""
115
- }
116
- ]
@@ -1,110 +0,0 @@
1
- [
2
- {
3
- "number": "1",
4
- "modelName": "databricks-dbrx-instruct",
5
- "name": "DBRX Instruct",
6
- "type": "Text Generation",
7
- "inputPricePer1KTokens": 0.0008,
8
- "outputPricePer1KTokens": 0.0024,
9
- "description": "Databricks' flagship instruction-following model capable of advanced reasoning, code generation, and text processing tasks.",
10
- "contextWindows": "32k Tokens",
11
- "recommended": true,
12
- "commercial": true,
13
- "pricey": false,
14
- "output": "8192 Tokens",
15
- "comments": "Optimized for enterprise use cases and data analysis tasks.",
16
- "companyModelName": "Databricks : DBRX Instruct",
17
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
18
- "systemPrompt": ""
19
- },
20
- {
21
- "number": "2",
22
- "modelName": "databricks-dbrx-instruct-1.0",
23
- "name": "DBRX Instruct 1.0",
24
- "type": "Text Generation",
25
- "inputPricePer1KTokens": 0.001,
26
- "outputPricePer1KTokens": 0.003,
27
- "description": "The version-locked release of DBRX Instruct optimized for production deployments requiring consistent outputs.",
28
- "contextWindows": "32k Tokens",
29
- "recommended": true,
30
- "commercial": true,
31
- "pricey": false,
32
- "output": "8192 Tokens",
33
- "comments": "Stable version for production applications.",
34
- "companyModelName": "Databricks : DBRX Instruct 1.0",
35
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
36
- "systemPrompt": ""
37
- },
38
- {
39
- "number": "3",
40
- "modelName": "databricks-dbrx-base",
41
- "name": "DBRX Base",
42
- "type": "Text Generation",
43
- "inputPricePer1KTokens": 0.0004,
44
- "outputPricePer1KTokens": 0.0012,
45
- "description": "Base model for custom fine-tuning and specialized enterprise applications with strong reasoning capabilities.",
46
- "contextWindows": "32k Tokens",
47
- "recommended": false,
48
- "commercial": true,
49
- "pricey": false,
50
- "output": "8192 Tokens",
51
- "comments": "Suitable for fine-tuning on domain-specific data.",
52
- "companyModelName": "Databricks : DBRX Base",
53
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
54
- "systemPrompt": ""
55
- },
56
- {
57
- "number": "4",
58
- "modelName": "databricks-mosaic-instruct",
59
- "name": "Mosaic Instruct",
60
- "type": "Text Generation",
61
- "inputPricePer1KTokens": 0.0003,
62
- "outputPricePer1KTokens": 0.0009,
63
- "description": "Cost-effective model designed for enterprise applications requiring good performance at lower price points.",
64
- "contextWindows": "16k Tokens",
65
- "recommended": true,
66
- "commercial": true,
67
- "pricey": false,
68
- "output": "4096 Tokens",
69
- "comments": "Excellent price-to-performance ratio for general tasks.",
70
- "companyModelName": "Databricks : Mosaic Instruct",
71
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
72
- "systemPrompt": ""
73
- },
74
- {
75
- "number": "5",
76
- "modelName": "databricks-mosaic-base",
77
- "name": "Mosaic Base",
78
- "type": "Text Generation",
79
- "inputPricePer1KTokens": 0.0002,
80
- "outputPricePer1KTokens": 0.0006,
81
- "description": "Base model for lightweight applications and custom fine-tuning projects requiring efficiency.",
82
- "contextWindows": "16k Tokens",
83
- "recommended": false,
84
- "commercial": true,
85
- "pricey": false,
86
- "output": "4096 Tokens",
87
- "comments": "Good starting point for smaller-scale customizations.",
88
- "companyModelName": "Databricks : Mosaic Base",
89
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
90
- "systemPrompt": ""
91
- },
92
- {
93
- "number": "6",
94
- "modelName": "databricks-claude-3.5-sonnet",
95
- "name": "Claude 3.5 Sonnet on Databricks",
96
- "type": "Text Generation",
97
- "inputPricePer1KTokens": 0.003,
98
- "outputPricePer1KTokens": 0.015,
99
- "description": "Anthropic's Claude 3.5 Sonnet offered through Databricks' platform, optimized for enterprise data analysis.",
100
- "contextWindows": "200k Tokens",
101
- "recommended": true,
102
- "commercial": true,
103
- "pricey": true,
104
- "output": "4096 Tokens",
105
- "comments": "High-quality model with enterprise security and compliance.",
106
- "companyModelName": "Databricks : Claude 3.5 Sonnet",
107
- "promptTemplate": "<human>\n%1\n</human>\n<assistant>\n%2\n</assistant>",
108
- "systemPrompt": ""
109
- }
110
- ]
@@ -1,110 +0,0 @@
1
- [
2
- {
3
- "number": "1",
4
- "modelName": "deepseek-llm-67b-chat",
5
- "name": "DeepSeek LLM 67B Chat",
6
- "type": "Text Generation",
7
- "inputPricePer1KTokens": 0.0007,
8
- "outputPricePer1KTokens": 0.0021,
9
- "description": "DeepSeek's primary large language model optimized for conversational applications with strong reasoning capabilities.",
10
- "contextWindows": "32k Tokens",
11
- "recommended": true,
12
- "commercial": true,
13
- "pricey": false,
14
- "output": "4096 Tokens",
15
- "comments": "Well-balanced model for enterprise and research applications.",
16
- "companyModelName": "DeepSeek : LLM 67B Chat",
17
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
18
- "systemPrompt": ""
19
- },
20
- {
21
- "number": "2",
22
- "modelName": "deepseek-coder-33b-instruct",
23
- "name": "DeepSeek Coder 33B Instruct",
24
- "type": "Text Generation",
25
- "inputPricePer1KTokens": 0.0008,
26
- "outputPricePer1KTokens": 0.0024,
27
- "description": "Specialized coding model with 33B parameters, trained extensively on code repositories and programming documentation.",
28
- "contextWindows": "16k Tokens",
29
- "recommended": true,
30
- "commercial": true,
31
- "pricey": false,
32
- "output": "8192 Tokens",
33
- "comments": "Best model for code generation and software development tasks.",
34
- "companyModelName": "DeepSeek : Coder 33B Instruct",
35
- "promptTemplate": "<|role|>user\n<|message|>%1<|role|>assistant\n<|message|>%2",
36
- "systemPrompt": ""
37
- },
38
- {
39
- "number": "3",
40
- "modelName": "deepseek-coder-6.7b-instruct",
41
- "name": "DeepSeek Coder 6.7B Instruct",
42
- "type": "Text Generation",
43
- "inputPricePer1KTokens": 0.0003,
44
- "outputPricePer1KTokens": 0.0009,
45
- "description": "Compact coding model designed for efficiency while maintaining strong programming capabilities.",
46
- "contextWindows": "16k Tokens",
47
- "recommended": true,
48
- "commercial": true,
49
- "pricey": false,
50
- "output": "4096 Tokens",
51
- "comments": "Cost-effective model for code generation tasks.",
52
- "companyModelName": "DeepSeek : Coder 6.7B Instruct",
53
- "promptTemplate": "<|role|>user\n<|message|>%1<|role|>assistant\n<|message|>%2",
54
- "systemPrompt": ""
55
- },
56
- {
57
- "number": "4",
58
- "modelName": "deepseek-math-7b",
59
- "name": "DeepSeek Math 7B",
60
- "type": "Text Generation",
61
- "inputPricePer1KTokens": 0.0004,
62
- "outputPricePer1KTokens": 0.0012,
63
- "description": "Specialized model for mathematical reasoning and problem-solving, trained on mathematical content.",
64
- "contextWindows": "8k Tokens",
65
- "recommended": true,
66
- "commercial": true,
67
- "pricey": false,
68
- "output": "4096 Tokens",
69
- "comments": "Excellent performance on mathematical tasks.",
70
- "companyModelName": "DeepSeek : Math 7B",
71
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
72
- "systemPrompt": ""
73
- },
74
- {
75
- "number": "5",
76
- "modelName": "deepseek-v2-base",
77
- "name": "DeepSeek V2 Base",
78
- "type": "Text Generation",
79
- "inputPricePer1KTokens": 0.0005,
80
- "outputPricePer1KTokens": 0.0015,
81
- "description": "Second-generation base model from DeepSeek, suitable for fine-tuning and specialized applications.",
82
- "contextWindows": "64k Tokens",
83
- "recommended": false,
84
- "commercial": true,
85
- "pricey": false,
86
- "output": "8192 Tokens",
87
- "comments": "Versatile base model for custom applications.",
88
- "companyModelName": "DeepSeek : V2 Base",
89
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
90
- "systemPrompt": ""
91
- },
92
- {
93
- "number": "6",
94
- "modelName": "deepseek-vision-7b",
95
- "name": "DeepSeek Vision 7B",
96
- "type": "Text and Vision",
97
- "inputPricePer1KTokens": 0.001,
98
- "outputPricePer1KTokens": 0.003,
99
- "description": "Multimodal model capable of understanding both images and text, with strong visual reasoning capabilities.",
100
- "contextWindows": "16k Tokens",
101
- "recommended": true,
102
- "commercial": true,
103
- "pricey": false,
104
- "output": "4096 Tokens",
105
- "comments": "Effective for image understanding and visual reasoning tasks.",
106
- "companyModelName": "DeepSeek : Vision 7B",
107
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>",
108
- "systemPrompt": ""
109
- }
110
- ]