@lobehub/chat 1.77.16 → 1.77.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/docker-compose/local/docker-compose.yml +2 -1
- package/locales/ar/components.json +4 -0
- package/locales/ar/modelProvider.json +1 -0
- package/locales/ar/models.json +8 -5
- package/locales/bg-BG/components.json +4 -0
- package/locales/bg-BG/modelProvider.json +1 -0
- package/locales/bg-BG/models.json +8 -5
- package/locales/de-DE/components.json +4 -0
- package/locales/de-DE/modelProvider.json +1 -0
- package/locales/de-DE/models.json +8 -5
- package/locales/en-US/components.json +4 -0
- package/locales/en-US/modelProvider.json +1 -0
- package/locales/en-US/models.json +8 -5
- package/locales/es-ES/components.json +4 -0
- package/locales/es-ES/modelProvider.json +1 -0
- package/locales/es-ES/models.json +7 -4
- package/locales/fa-IR/components.json +4 -0
- package/locales/fa-IR/modelProvider.json +1 -0
- package/locales/fa-IR/models.json +7 -4
- package/locales/fr-FR/components.json +4 -0
- package/locales/fr-FR/modelProvider.json +1 -0
- package/locales/fr-FR/models.json +8 -5
- package/locales/it-IT/components.json +4 -0
- package/locales/it-IT/modelProvider.json +1 -0
- package/locales/it-IT/models.json +7 -4
- package/locales/ja-JP/components.json +4 -0
- package/locales/ja-JP/modelProvider.json +1 -0
- package/locales/ja-JP/models.json +8 -5
- package/locales/ko-KR/components.json +4 -0
- package/locales/ko-KR/modelProvider.json +1 -0
- package/locales/ko-KR/models.json +8 -5
- package/locales/nl-NL/components.json +4 -0
- package/locales/nl-NL/modelProvider.json +1 -0
- package/locales/nl-NL/models.json +8 -5
- package/locales/pl-PL/components.json +4 -0
- package/locales/pl-PL/modelProvider.json +1 -0
- package/locales/pl-PL/models.json +8 -5
- package/locales/pt-BR/components.json +4 -0
- package/locales/pt-BR/modelProvider.json +1 -0
- package/locales/pt-BR/models.json +7 -4
- package/locales/ru-RU/components.json +4 -0
- package/locales/ru-RU/modelProvider.json +1 -0
- package/locales/ru-RU/models.json +7 -4
- package/locales/tr-TR/components.json +4 -0
- package/locales/tr-TR/modelProvider.json +1 -0
- package/locales/tr-TR/models.json +8 -5
- package/locales/vi-VN/components.json +4 -0
- package/locales/vi-VN/modelProvider.json +1 -0
- package/locales/vi-VN/models.json +8 -5
- package/locales/zh-CN/components.json +4 -0
- package/locales/zh-CN/modelProvider.json +1 -0
- package/locales/zh-CN/models.json +9 -6
- package/locales/zh-TW/components.json +4 -0
- package/locales/zh-TW/modelProvider.json +1 -0
- package/locales/zh-TW/models.json +7 -4
- package/package.json +1 -1
- package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +34 -0
- package/src/app/(backend)/webapi/{chat/models → models}/[provider]/route.ts +1 -2
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Ollama/index.tsx +0 -7
- package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/CheckError.tsx +1 -1
- package/src/components/FormAction/index.tsx +1 -1
- package/src/database/models/__tests__/aiProvider.test.ts +100 -0
- package/src/database/models/aiProvider.ts +11 -1
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel.tsx +43 -0
- package/src/features/Conversation/Error/OllamaDesktopSetupGuide/index.tsx +61 -0
- package/src/features/Conversation/Error/index.tsx +7 -0
- package/src/features/DevPanel/SystemInspector/ServerConfig.tsx +18 -2
- package/src/features/DevPanel/SystemInspector/index.tsx +25 -6
- package/src/features/OllamaModelDownloader/index.tsx +149 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +6 -0
- package/src/libs/agent-runtime/BaseAI.ts +7 -0
- package/src/libs/agent-runtime/ollama/index.ts +84 -2
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +24 -3263
- package/src/libs/agent-runtime/openrouter/fixtures/frontendModels.json +25 -0
- package/src/libs/agent-runtime/openrouter/fixtures/models.json +0 -3353
- package/src/libs/agent-runtime/openrouter/index.test.ts +56 -1
- package/src/libs/agent-runtime/openrouter/index.ts +9 -4
- package/src/libs/agent-runtime/types/index.ts +1 -0
- package/src/libs/agent-runtime/types/model.ts +44 -0
- package/src/libs/agent-runtime/utils/streams/index.ts +1 -0
- package/src/libs/agent-runtime/utils/streams/model.ts +110 -0
- package/src/locales/default/components.ts +4 -0
- package/src/locales/default/modelProvider.ts +1 -0
- package/src/services/__tests__/models.test.ts +21 -0
- package/src/services/_url.ts +4 -1
- package/src/services/chat.ts +1 -1
- package/src/services/models.ts +153 -7
- package/src/store/aiInfra/slices/aiModel/action.ts +1 -1
- package/src/store/aiInfra/slices/aiProvider/action.ts +2 -1
- package/src/store/user/slices/modelList/action.test.ts +2 -2
- package/src/store/user/slices/modelList/action.ts +1 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Ollama/Checker.tsx +0 -73
- package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +0 -127
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel/index.tsx +0 -154
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel/useDownloadMonitor.ts +0 -29
- package/src/services/__tests__/ollama.test.ts +0 -28
- package/src/services/ollama.ts +0 -83
- /package/src/{app/[variants]/(main)/settings/provider/(detail)/ollama → features}/OllamaModelDownloader/useDownloadMonitor.ts +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
2
2
|
|
3
|
-
exports[`LobeOpenRouterAI > models > should get models 1`] = `
|
3
|
+
exports[`LobeOpenRouterAI > models > should get models with frontend models data 1`] = `
|
4
4
|
[
|
5
5
|
{
|
6
6
|
"contextWindowTokens": 131072,
|
@@ -11,3304 +11,65 @@ The model was trained on synthetic data.
|
|
11
11
|
_These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/reflection-70b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
12
12
|
"displayName": "Reflection 70B (free)",
|
13
13
|
"enabled": false,
|
14
|
-
"functionCall":
|
14
|
+
"functionCall": true,
|
15
15
|
"id": "mattshumer/reflection-70b:free",
|
16
16
|
"maxTokens": 4096,
|
17
17
|
"pricing": {
|
18
18
|
"input": 0,
|
19
19
|
"output": 0,
|
20
20
|
},
|
21
|
-
"reasoning":
|
21
|
+
"reasoning": true,
|
22
22
|
"releasedAt": "2024-09-06",
|
23
23
|
"vision": false,
|
24
24
|
},
|
25
|
+
]
|
26
|
+
`;
|
27
|
+
|
28
|
+
exports[`LobeOpenRouterAI > models > should handle fetch error gracefully 1`] = `
|
29
|
+
[
|
25
30
|
{
|
26
31
|
"contextWindowTokens": 131072,
|
27
32
|
"description": "Reflection Llama-3.1 70B is trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course.
|
28
33
|
|
29
|
-
The model was trained on synthetic data.
|
30
|
-
"displayName": "Reflection 70B",
|
31
|
-
"enabled": false,
|
32
|
-
"functionCall": false,
|
33
|
-
"id": "mattshumer/reflection-70b",
|
34
|
-
"maxTokens": undefined,
|
35
|
-
"pricing": {
|
36
|
-
"input": 0.35,
|
37
|
-
"output": 0.4,
|
38
|
-
},
|
39
|
-
"reasoning": false,
|
40
|
-
"releasedAt": "2024-09-06",
|
41
|
-
"vision": false,
|
42
|
-
},
|
43
|
-
{
|
44
|
-
"contextWindowTokens": 128000,
|
45
|
-
"description": "Command-R is a 35B parameter model that performs conversational language tasks at a higher quality, more reliably, and with a longer context than previous models. It can be used for complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.
|
46
|
-
|
47
|
-
Read the launch post [here](https://txt.cohere.com/command-r/).
|
48
|
-
|
49
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
50
|
-
"displayName": "Cohere: Command R (03-2024)",
|
51
|
-
"enabled": false,
|
52
|
-
"functionCall": true,
|
53
|
-
"id": "cohere/command-r-03-2024",
|
54
|
-
"maxTokens": 4000,
|
55
|
-
"pricing": {
|
56
|
-
"input": 0.5,
|
57
|
-
"output": 1.5,
|
58
|
-
},
|
59
|
-
"reasoning": false,
|
60
|
-
"releasedAt": "2024-08-31",
|
61
|
-
"vision": false,
|
62
|
-
},
|
63
|
-
{
|
64
|
-
"contextWindowTokens": 128000,
|
65
|
-
"description": "Command R+ is a new, 104B-parameter LLM from Cohere. It's useful for roleplay, general consumer usecases, and Retrieval Augmented Generation (RAG).
|
66
|
-
|
67
|
-
It offers multilingual support for ten key languages to facilitate global business operations. See benchmarks and the launch post [here](https://txt.cohere.com/command-r-plus-microsoft-azure/).
|
68
|
-
|
69
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
70
|
-
"displayName": "Cohere: Command R+ (04-2024)",
|
71
|
-
"enabled": false,
|
72
|
-
"functionCall": true,
|
73
|
-
"id": "cohere/command-r-plus-04-2024",
|
74
|
-
"maxTokens": 4000,
|
75
|
-
"pricing": {
|
76
|
-
"input": 3,
|
77
|
-
"output": 15,
|
78
|
-
},
|
79
|
-
"reasoning": false,
|
80
|
-
"releasedAt": "2024-08-31",
|
81
|
-
"vision": false,
|
82
|
-
},
|
83
|
-
{
|
84
|
-
"contextWindowTokens": 128000,
|
85
|
-
"description": "command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint the same.
|
86
|
-
|
87
|
-
Read the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).
|
88
|
-
|
89
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
90
|
-
"displayName": "Cohere: Command R+ (08-2024)",
|
91
|
-
"enabled": false,
|
92
|
-
"functionCall": true,
|
93
|
-
"id": "cohere/command-r-plus-08-2024",
|
94
|
-
"maxTokens": 4000,
|
95
|
-
"pricing": {
|
96
|
-
"input": 2.5,
|
97
|
-
"output": 10,
|
98
|
-
},
|
99
|
-
"reasoning": false,
|
100
|
-
"releasedAt": "2024-08-30",
|
101
|
-
"vision": false,
|
102
|
-
},
|
103
|
-
{
|
104
|
-
"contextWindowTokens": 128000,
|
105
|
-
"description": "command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and is competitive with the previous version of the larger Command R+ model.
|
106
|
-
|
107
|
-
Read the launch post [here](https://docs.cohere.com/changelog/command-gets-refreshed).
|
108
|
-
|
109
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
110
|
-
"displayName": "Cohere: Command R (08-2024)",
|
111
|
-
"enabled": false,
|
112
|
-
"functionCall": true,
|
113
|
-
"id": "cohere/command-r-08-2024",
|
114
|
-
"maxTokens": 4000,
|
115
|
-
"pricing": {
|
116
|
-
"input": 0.15,
|
117
|
-
"output": 0.6,
|
118
|
-
},
|
119
|
-
"reasoning": false,
|
120
|
-
"releasedAt": "2024-08-30",
|
121
|
-
"vision": false,
|
122
|
-
},
|
123
|
-
{
|
124
|
-
"contextWindowTokens": 4000000,
|
125
|
-
"description": "Gemini 1.5 Flash 8B Experimental is an experimental, 8B parameter version of the [Gemini 1.5 Flash](/models/google/gemini-flash-1.5) model.
|
126
|
-
|
127
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
128
|
-
|
129
|
-
#multimodal
|
130
|
-
|
131
|
-
Note: This model is experimental and not suited for production use-cases. It may be removed or redirected to another model in the future.",
|
132
|
-
"displayName": "Google: Gemini Flash 8B 1.5 Experimental",
|
133
|
-
"enabled": false,
|
134
|
-
"functionCall": false,
|
135
|
-
"id": "google/gemini-flash-8b-1.5-exp",
|
136
|
-
"maxTokens": 32768,
|
137
|
-
"pricing": {
|
138
|
-
"input": 0,
|
139
|
-
"output": 0,
|
140
|
-
},
|
141
|
-
"reasoning": false,
|
142
|
-
"releasedAt": "2024-08-28",
|
143
|
-
"vision": true,
|
144
|
-
},
|
145
|
-
{
|
146
|
-
"contextWindowTokens": 4000000,
|
147
|
-
"description": "Gemini 1.5 Flash Experimental is an experimental version of the [Gemini 1.5 Flash](/models/google/gemini-flash-1.5) model.
|
148
|
-
|
149
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
150
|
-
|
151
|
-
#multimodal
|
34
|
+
The model was trained on synthetic data.
|
152
35
|
|
153
|
-
|
154
|
-
"displayName": "
|
155
|
-
"enabled": false,
|
156
|
-
"functionCall": false,
|
157
|
-
"id": "google/gemini-flash-1.5-exp",
|
158
|
-
"maxTokens": 32768,
|
159
|
-
"pricing": {
|
160
|
-
"input": 0,
|
161
|
-
"output": 0,
|
162
|
-
},
|
163
|
-
"reasoning": false,
|
164
|
-
"releasedAt": "2024-08-28",
|
165
|
-
"vision": true,
|
166
|
-
},
|
167
|
-
{
|
168
|
-
"contextWindowTokens": 8192,
|
169
|
-
"description": "Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b).",
|
170
|
-
"displayName": "Llama 3.1 Euryale 70B v2.2",
|
36
|
+
_These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/reflection-70b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
37
|
+
"displayName": "Reflection 70B (free)",
|
171
38
|
"enabled": false,
|
172
39
|
"functionCall": false,
|
173
|
-
"id": "
|
174
|
-
"maxTokens": undefined,
|
175
|
-
"pricing": {
|
176
|
-
"input": 1.5,
|
177
|
-
"output": 1.5,
|
178
|
-
},
|
179
|
-
"reasoning": false,
|
180
|
-
"releasedAt": "2024-08-28",
|
181
|
-
"vision": false,
|
182
|
-
},
|
183
|
-
{
|
184
|
-
"contextWindowTokens": 256000,
|
185
|
-
"description": "Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality.
|
186
|
-
|
187
|
-
It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis.
|
188
|
-
|
189
|
-
Built on a novel SSM-Transformer architecture, it outperforms larger models like Llama 3.1 70B on benchmarks while maintaining resource efficiency.
|
190
|
-
|
191
|
-
Read their [announcement](https://www.ai21.com/blog/announcing-jamba-model-family) to learn more.",
|
192
|
-
"displayName": "AI21: Jamba 1.5 Large",
|
193
|
-
"enabled": false,
|
194
|
-
"functionCall": true,
|
195
|
-
"id": "ai21/jamba-1-5-large",
|
196
|
-
"maxTokens": 4096,
|
197
|
-
"pricing": {
|
198
|
-
"input": 2,
|
199
|
-
"output": 8,
|
200
|
-
},
|
201
|
-
"reasoning": false,
|
202
|
-
"releasedAt": "2024-08-23",
|
203
|
-
"vision": false,
|
204
|
-
},
|
205
|
-
{
|
206
|
-
"contextWindowTokens": 256000,
|
207
|
-
"description": "Jamba 1.5 Mini is the world's first production-grade Mamba-based model, combining SSM and Transformer architectures for a 256K context window and high efficiency.
|
208
|
-
|
209
|
-
It works with 9 languages and can handle various writing and analysis tasks as well as or better than similar small models.
|
210
|
-
|
211
|
-
This model uses less computer memory and works faster with longer texts than previous designs.
|
212
|
-
|
213
|
-
Read their [announcement](https://www.ai21.com/blog/announcing-jamba-model-family) to learn more.",
|
214
|
-
"displayName": "AI21: Jamba 1.5 Mini",
|
215
|
-
"enabled": false,
|
216
|
-
"functionCall": true,
|
217
|
-
"id": "ai21/jamba-1-5-mini",
|
40
|
+
"id": "mattshumer/reflection-70b:free",
|
218
41
|
"maxTokens": 4096,
|
219
|
-
"pricing": {
|
220
|
-
"input": 0.2,
|
221
|
-
"output": 0.4,
|
222
|
-
},
|
223
|
-
"reasoning": false,
|
224
|
-
"releasedAt": "2024-08-23",
|
225
|
-
"vision": false,
|
226
|
-
},
|
227
|
-
{
|
228
|
-
"contextWindowTokens": 128000,
|
229
|
-
"description": "Phi-3.5 models are lightweight, state-of-the-art open models. These models were trained with Phi-3 datasets that include both synthetic data and the filtered, publicly available websites data, with a focus on high quality and reasoning-dense properties. Phi-3.5 Mini uses 3.8B parameters, and is a dense decoder-only transformer model using the same tokenizer as [Phi-3 Mini](/models/microsoft/phi-3-mini-128k-instruct).
|
230
|
-
|
231
|
-
The models underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. When assessed against benchmarks that test common sense, language understanding, math, code, long context and logical reasoning, Phi-3.5 models showcased robust and state-of-the-art performance among models with less than 13 billion parameters.",
|
232
|
-
"displayName": "Phi-3.5 Mini 128K Instruct",
|
233
|
-
"enabled": false,
|
234
|
-
"functionCall": true,
|
235
|
-
"id": "microsoft/phi-3.5-mini-128k-instruct",
|
236
|
-
"maxTokens": undefined,
|
237
|
-
"pricing": {
|
238
|
-
"input": 0.1,
|
239
|
-
"output": 0.1,
|
240
|
-
},
|
241
|
-
"reasoning": false,
|
242
|
-
"releasedAt": "2024-08-21",
|
243
|
-
"vision": false,
|
244
|
-
},
|
245
|
-
{
|
246
|
-
"contextWindowTokens": 131072,
|
247
|
-
"description": "Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.
|
248
|
-
|
249
|
-
Hermes 3 70B is a competitive, if not superior finetune of the [Llama-3.1 70B foundation model](/models/meta-llama/llama-3.1-70b-instruct), focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
|
250
|
-
|
251
|
-
The Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.",
|
252
|
-
"displayName": "Nous: Hermes 3 70B Instruct",
|
253
|
-
"enabled": false,
|
254
|
-
"functionCall": true,
|
255
|
-
"id": "nousresearch/hermes-3-llama-3.1-70b",
|
256
|
-
"maxTokens": undefined,
|
257
|
-
"pricing": {
|
258
|
-
"input": 0.4,
|
259
|
-
"output": 0.4,
|
260
|
-
},
|
261
|
-
"reasoning": false,
|
262
|
-
"releasedAt": "2024-08-18",
|
263
|
-
"vision": false,
|
264
|
-
},
|
265
|
-
{
|
266
|
-
"contextWindowTokens": 131072,
|
267
|
-
"description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.
|
268
|
-
|
269
|
-
Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
|
270
|
-
|
271
|
-
The Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.
|
272
|
-
|
273
|
-
Hermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.",
|
274
|
-
"displayName": "Nous: Hermes 3 405B Instruct",
|
275
|
-
"enabled": false,
|
276
|
-
"functionCall": true,
|
277
|
-
"id": "nousresearch/hermes-3-llama-3.1-405b",
|
278
|
-
"maxTokens": undefined,
|
279
|
-
"pricing": {
|
280
|
-
"input": 0,
|
281
|
-
"output": 0,
|
282
|
-
},
|
283
|
-
"reasoning": false,
|
284
|
-
"releasedAt": "2024-08-16",
|
285
|
-
"vision": false,
|
286
|
-
},
|
287
|
-
{
|
288
|
-
"contextWindowTokens": 128000,
|
289
|
-
"description": "Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board.
|
290
|
-
|
291
|
-
Hermes 3 405B is a frontier-level, full-parameter finetune of the Llama-3.1 405B foundation model, focused on aligning LLMs to the user, with powerful steering capabilities and control given to the end user.
|
292
|
-
|
293
|
-
The Hermes 3 series builds and expands on the Hermes 2 set of capabilities, including more powerful and reliable function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills.
|
294
|
-
|
295
|
-
Hermes 3 is competitive, if not superior, to Llama-3.1 Instruct models at general capabilities, with varying strengths and weaknesses attributable between the two.
|
296
|
-
|
297
|
-
_These are extended-context endpoints for [Hermes 3 405B Instruct](/models/nousresearch/hermes-3-llama-3.1-405b). They may have higher prices._",
|
298
|
-
"displayName": "Nous: Hermes 3 405B Instruct (extended)",
|
299
|
-
"enabled": false,
|
300
|
-
"functionCall": true,
|
301
|
-
"id": "nousresearch/hermes-3-llama-3.1-405b:extended",
|
302
|
-
"maxTokens": undefined,
|
303
42
|
"pricing": {
|
304
43
|
"input": 0,
|
305
44
|
"output": 0,
|
306
45
|
},
|
307
46
|
"reasoning": false,
|
308
|
-
"releasedAt": "2024-
|
309
|
-
"vision": false,
|
310
|
-
},
|
311
|
-
{
|
312
|
-
"contextWindowTokens": 127072,
|
313
|
-
"description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance. The model is built upon the Llama 3.1 405B and has internet access.",
|
314
|
-
"displayName": "Perplexity: Llama 3.1 Sonar 405B Online",
|
315
|
-
"enabled": false,
|
316
|
-
"functionCall": false,
|
317
|
-
"id": "perplexity/llama-3.1-sonar-huge-128k-online",
|
318
|
-
"maxTokens": undefined,
|
319
|
-
"pricing": {
|
320
|
-
"input": 5,
|
321
|
-
"output": 5,
|
322
|
-
},
|
323
|
-
"reasoning": false,
|
324
|
-
"releasedAt": "2024-08-14",
|
325
|
-
"vision": false,
|
326
|
-
},
|
327
|
-
{
|
328
|
-
"contextWindowTokens": 128000,
|
329
|
-
"description": "Dynamic model continuously updated to the current version of [GPT-4o](/models/openai/gpt-4o) in ChatGPT. Intended for research and evaluation.
|
330
|
-
|
331
|
-
Note: This model is experimental and not suited for production use-cases. It may be removed or redirected to another model in the future.",
|
332
|
-
"displayName": "OpenAI: ChatGPT-4o",
|
333
|
-
"enabled": false,
|
334
|
-
"functionCall": false,
|
335
|
-
"id": "openai/chatgpt-4o-latest",
|
336
|
-
"maxTokens": 16384,
|
337
|
-
"pricing": {
|
338
|
-
"input": 5,
|
339
|
-
"output": 15,
|
340
|
-
},
|
341
|
-
"reasoning": false,
|
342
|
-
"releasedAt": "2024-08-14",
|
343
|
-
"vision": true,
|
344
|
-
},
|
345
|
-
{
|
346
|
-
"contextWindowTokens": 8192,
|
347
|
-
"description": "Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge.
|
348
|
-
|
349
|
-
Created by [Sao10k](https://huggingface.co/Sao10k), this model aims to offer an improved experience over Stheno v3.2, with enhanced creativity and logical reasoning.
|
350
|
-
|
351
|
-
For best results, use with Llama 3 Instruct context template, temperature 1.4, and min_p 0.1.",
|
352
|
-
"displayName": "Llama 3 8B Lunaris",
|
353
|
-
"enabled": false,
|
354
|
-
"functionCall": false,
|
355
|
-
"id": "sao10k/l3-lunaris-8b",
|
356
|
-
"maxTokens": undefined,
|
357
|
-
"pricing": {
|
358
|
-
"input": 2,
|
359
|
-
"output": 2,
|
360
|
-
},
|
361
|
-
"reasoning": false,
|
362
|
-
"releasedAt": "2024-08-13",
|
363
|
-
"vision": false,
|
364
|
-
},
|
365
|
-
{
|
366
|
-
"contextWindowTokens": 12000,
|
367
|
-
"description": "Starcannon 12B is a creative roleplay and story writing model, using [nothingiisreal/mn-celeste-12b](https://openrouter.ai/models/nothingiisreal/mn-celeste-12b) as a base and [intervitens/mini-magnum-12b-v1.1](https://huggingface.co/intervitens/mini-magnum-12b-v1.1) merged in using the [TIES](https://arxiv.org/abs/2306.01708) method.
|
368
|
-
|
369
|
-
Although more similar to Magnum overall, the model remains very creative, with a pleasant writing style. It is recommended for people wanting more variety than Magnum, and yet more verbose prose than Celeste.",
|
370
|
-
"displayName": "Mistral Nemo 12B Starcannon",
|
371
|
-
"enabled": false,
|
372
|
-
"functionCall": false,
|
373
|
-
"id": "aetherwiing/mn-starcannon-12b",
|
374
|
-
"maxTokens": undefined,
|
375
|
-
"pricing": {
|
376
|
-
"input": 2,
|
377
|
-
"output": 2,
|
378
|
-
},
|
379
|
-
"reasoning": false,
|
380
|
-
"releasedAt": "2024-08-13",
|
47
|
+
"releasedAt": "2024-09-06",
|
381
48
|
"vision": false,
|
382
49
|
},
|
383
|
-
|
384
|
-
|
385
|
-
"description": "The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/).
|
386
|
-
|
387
|
-
GPT-4o ("o" for "omni") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.
|
50
|
+
]
|
51
|
+
`;
|
388
52
|
|
389
|
-
|
390
|
-
|
391
|
-
"enabled": false,
|
392
|
-
"functionCall": true,
|
393
|
-
"id": "openai/gpt-4o-2024-08-06",
|
394
|
-
"maxTokens": 16384,
|
395
|
-
"pricing": {
|
396
|
-
"input": 2.5,
|
397
|
-
"output": 10,
|
398
|
-
},
|
399
|
-
"reasoning": false,
|
400
|
-
"releasedAt": "2024-08-06",
|
401
|
-
"vision": true,
|
402
|
-
},
|
53
|
+
exports[`LobeOpenRouterAI > models > should handle fetch failure gracefully 1`] = `
|
54
|
+
[
|
403
55
|
{
|
404
56
|
"contextWindowTokens": 131072,
|
405
|
-
"description": "
|
406
|
-
|
407
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
408
|
-
|
409
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
410
|
-
"displayName": "Meta: Llama 3.1 405B (base)",
|
411
|
-
"enabled": false,
|
412
|
-
"functionCall": false,
|
413
|
-
"id": "meta-llama/llama-3.1-405b",
|
414
|
-
"maxTokens": undefined,
|
415
|
-
"pricing": {
|
416
|
-
"input": 2,
|
417
|
-
"output": 2,
|
418
|
-
},
|
419
|
-
"reasoning": false,
|
420
|
-
"releasedAt": "2024-08-02",
|
421
|
-
"vision": false,
|
422
|
-
},
|
423
|
-
{
|
424
|
-
"contextWindowTokens": 32000,
|
425
|
-
"description": "A specialized story writing and roleplaying model based on Mistral's NeMo 12B Instruct. Fine-tuned on curated datasets including Reddit Writing Prompts and Opus Instruct 25K.
|
426
|
-
|
427
|
-
This model excels at creative writing, offering improved NSFW capabilities, with smarter and more active narration. It demonstrates remarkable versatility in both SFW and NSFW scenarios, with strong Out of Character (OOC) steering capabilities, allowing fine-tuned control over narrative direction and character behavior.
|
428
|
-
|
429
|
-
Check out the model's [HuggingFace page](https://huggingface.co/nothingiisreal/MN-12B-Celeste-V1.9) for details on what parameters and prompts work best!",
|
430
|
-
"displayName": "Mistral Nemo 12B Celeste",
|
431
|
-
"enabled": false,
|
432
|
-
"functionCall": false,
|
433
|
-
"id": "nothingiisreal/mn-celeste-12b",
|
434
|
-
"maxTokens": undefined,
|
435
|
-
"pricing": {
|
436
|
-
"input": 1.5,
|
437
|
-
"output": 1.5,
|
438
|
-
},
|
439
|
-
"reasoning": false,
|
440
|
-
"releasedAt": "2024-08-02",
|
441
|
-
"vision": false,
|
442
|
-
},
|
443
|
-
{
|
444
|
-
"contextWindowTokens": 4000000,
|
445
|
-
"description": "Gemini 1.5 Pro (0827) is an experimental version of the [Gemini 1.5 Pro](/models/google/gemini-pro-1.5) model.
|
446
|
-
|
447
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
57
|
+
"description": "Reflection Llama-3.1 70B is trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course.
|
448
58
|
|
449
|
-
|
59
|
+
The model was trained on synthetic data.
|
450
60
|
|
451
|
-
|
452
|
-
"displayName": "
|
61
|
+
_These are free, rate-limited endpoints for [Reflection 70B](/models/mattshumer/reflection-70b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
62
|
+
"displayName": "Reflection 70B (free)",
|
453
63
|
"enabled": false,
|
454
64
|
"functionCall": false,
|
455
|
-
"id": "
|
456
|
-
"maxTokens":
|
65
|
+
"id": "mattshumer/reflection-70b:free",
|
66
|
+
"maxTokens": 4096,
|
457
67
|
"pricing": {
|
458
68
|
"input": 0,
|
459
69
|
"output": 0,
|
460
70
|
},
|
461
71
|
"reasoning": false,
|
462
|
-
"releasedAt": "2024-
|
463
|
-
"vision": true,
|
464
|
-
},
|
465
|
-
{
|
466
|
-
"contextWindowTokens": 127072,
|
467
|
-
"description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
468
|
-
|
469
|
-
This is the online version of the [offline chat model](/models/perplexity/llama-3.1-sonar-large-128k-chat). It is focused on delivering helpful, up-to-date, and factual responses. #online",
|
470
|
-
"displayName": "Perplexity: Llama 3.1 Sonar 70B Online",
|
471
|
-
"enabled": false,
|
472
|
-
"functionCall": false,
|
473
|
-
"id": "perplexity/llama-3.1-sonar-large-128k-online",
|
474
|
-
"maxTokens": undefined,
|
475
|
-
"pricing": {
|
476
|
-
"input": 1,
|
477
|
-
"output": 1,
|
478
|
-
},
|
479
|
-
"reasoning": false,
|
480
|
-
"releasedAt": "2024-08-01",
|
481
|
-
"vision": false,
|
482
|
-
},
|
483
|
-
{
|
484
|
-
"contextWindowTokens": 131072,
|
485
|
-
"description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
486
|
-
|
487
|
-
This is a normal offline LLM, but the [online version](/models/perplexity/llama-3.1-sonar-large-128k-online) of this model has Internet access.",
|
488
|
-
"displayName": "Perplexity: Llama 3.1 Sonar 70B",
|
489
|
-
"enabled": false,
|
490
|
-
"functionCall": false,
|
491
|
-
"id": "perplexity/llama-3.1-sonar-large-128k-chat",
|
492
|
-
"maxTokens": undefined,
|
493
|
-
"pricing": {
|
494
|
-
"input": 1,
|
495
|
-
"output": 1,
|
496
|
-
},
|
497
|
-
"reasoning": false,
|
498
|
-
"releasedAt": "2024-08-01",
|
499
|
-
"vision": false,
|
500
|
-
},
|
501
|
-
{
|
502
|
-
"contextWindowTokens": 127072,
|
503
|
-
"description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
504
|
-
|
505
|
-
This is the online version of the [offline chat model](/models/perplexity/llama-3.1-sonar-small-128k-chat). It is focused on delivering helpful, up-to-date, and factual responses. #online",
|
506
|
-
"displayName": "Perplexity: Llama 3.1 Sonar 8B Online",
|
507
|
-
"enabled": false,
|
508
|
-
"functionCall": false,
|
509
|
-
"id": "perplexity/llama-3.1-sonar-small-128k-online",
|
510
|
-
"maxTokens": undefined,
|
511
|
-
"pricing": {
|
512
|
-
"input": 0.2,
|
513
|
-
"output": 0.2,
|
514
|
-
},
|
515
|
-
"reasoning": false,
|
516
|
-
"releasedAt": "2024-08-01",
|
517
|
-
"vision": false,
|
518
|
-
},
|
519
|
-
{
|
520
|
-
"contextWindowTokens": 131072,
|
521
|
-
"description": "Llama 3.1 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
522
|
-
|
523
|
-
This is a normal offline LLM, but the [online version](/models/perplexity/llama-3.1-sonar-small-128k-online) of this model has Internet access.",
|
524
|
-
"displayName": "Perplexity: Llama 3.1 Sonar 8B",
|
525
|
-
"enabled": false,
|
526
|
-
"functionCall": false,
|
527
|
-
"id": "perplexity/llama-3.1-sonar-small-128k-chat",
|
528
|
-
"maxTokens": undefined,
|
529
|
-
"pricing": {
|
530
|
-
"input": 0.2,
|
531
|
-
"output": 0.2,
|
532
|
-
},
|
533
|
-
"reasoning": false,
|
534
|
-
"releasedAt": "2024-08-01",
|
535
|
-
"vision": false,
|
536
|
-
},
|
537
|
-
{
|
538
|
-
"contextWindowTokens": 131072,
|
539
|
-
"description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases.
|
540
|
-
|
541
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
542
|
-
|
543
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
544
|
-
"displayName": "Meta: Llama 3.1 70B Instruct",
|
545
|
-
"enabled": false,
|
546
|
-
"functionCall": true,
|
547
|
-
"id": "meta-llama/llama-3.1-70b-instruct",
|
548
|
-
"maxTokens": undefined,
|
549
|
-
"pricing": {
|
550
|
-
"input": 0.3,
|
551
|
-
"output": 0.3,
|
552
|
-
},
|
553
|
-
"reasoning": false,
|
554
|
-
"releasedAt": "2024-07-23",
|
555
|
-
"vision": false,
|
556
|
-
},
|
557
|
-
{
|
558
|
-
"contextWindowTokens": 131072,
|
559
|
-
"description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient.
|
560
|
-
|
561
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
562
|
-
|
563
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
564
|
-
|
565
|
-
_These are free, rate-limited endpoints for [Llama 3.1 8B Instruct](/models/meta-llama/llama-3.1-8b-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
566
|
-
"displayName": "Meta: Llama 3.1 8B Instruct (free)",
|
567
|
-
"enabled": true,
|
568
|
-
"functionCall": false,
|
569
|
-
"id": "meta-llama/llama-3.1-8b-instruct:free",
|
570
|
-
"maxTokens": 4096,
|
571
|
-
"pricing": {
|
572
|
-
"input": 0,
|
573
|
-
"output": 0,
|
574
|
-
},
|
575
|
-
"reasoning": false,
|
576
|
-
"releasedAt": "2024-07-23",
|
577
|
-
"vision": false,
|
578
|
-
},
|
579
|
-
{
|
580
|
-
"contextWindowTokens": 131072,
|
581
|
-
"description": "Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient.
|
582
|
-
|
583
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
584
|
-
|
585
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
586
|
-
"displayName": "Meta: Llama 3.1 8B Instruct",
|
587
|
-
"enabled": false,
|
588
|
-
"functionCall": true,
|
589
|
-
"id": "meta-llama/llama-3.1-8b-instruct",
|
590
|
-
"maxTokens": undefined,
|
591
|
-
"pricing": {
|
592
|
-
"input": 0.055,
|
593
|
-
"output": 0.055,
|
594
|
-
},
|
595
|
-
"reasoning": false,
|
596
|
-
"releasedAt": "2024-07-23",
|
597
|
-
"vision": false,
|
598
|
-
},
|
599
|
-
{
|
600
|
-
"contextWindowTokens": 131072,
|
601
|
-
"description": "The highly anticipated 400B class of Llama3 is here! Clocking in at 128k context with impressive eval scores, the Meta AI team continues to push the frontier of open-source LLMs.
|
602
|
-
|
603
|
-
Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 405B instruct-tuned version is optimized for high quality dialogue usecases.
|
604
|
-
|
605
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
606
|
-
|
607
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
608
|
-
"displayName": "Meta: Llama 3.1 405B Instruct",
|
609
|
-
"enabled": false,
|
610
|
-
"functionCall": true,
|
611
|
-
"id": "meta-llama/llama-3.1-405b-instruct",
|
612
|
-
"maxTokens": undefined,
|
613
|
-
"pricing": {
|
614
|
-
"input": 1.79,
|
615
|
-
"output": 1.79,
|
616
|
-
},
|
617
|
-
"reasoning": false,
|
618
|
-
"releasedAt": "2024-07-23",
|
619
|
-
"vision": false,
|
620
|
-
},
|
621
|
-
{
|
622
|
-
"contextWindowTokens": 8192,
|
623
|
-
"description": "Dolphin 2.9 is designed for instruction following, conversational, and coding. This model is a fine-tune of [Llama 3 70B](/models/meta-llama/llama-3-70b-instruct). It demonstrates improvements in instruction, conversation, coding, and function calling abilities, when compared to the original.
|
624
|
-
|
625
|
-
Uncensored and is stripped of alignment and bias, it requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at [erichartford.com/uncensored-models](https://erichartford.com/uncensored-models).
|
626
|
-
|
627
|
-
Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
628
|
-
"displayName": "Dolphin Llama 3 70B 🐬",
|
629
|
-
"enabled": false,
|
630
|
-
"functionCall": true,
|
631
|
-
"id": "cognitivecomputations/dolphin-llama-3-70b",
|
632
|
-
"maxTokens": undefined,
|
633
|
-
"pricing": {
|
634
|
-
"input": 0.35,
|
635
|
-
"output": 0.4,
|
636
|
-
},
|
637
|
-
"reasoning": false,
|
638
|
-
"releasedAt": "2024-07-19",
|
639
|
-
"vision": false,
|
640
|
-
},
|
641
|
-
{
|
642
|
-
"contextWindowTokens": 256000,
|
643
|
-
"description": "A 7.3B parameter Mamba-based model designed for code and reasoning tasks.
|
644
|
-
|
645
|
-
- Linear time inference, allowing for theoretically infinite sequence lengths
|
646
|
-
- 256k token context window
|
647
|
-
- Optimized for quick responses, especially beneficial for code productivity
|
648
|
-
- Performs comparably to state-of-the-art transformer models in code and reasoning tasks
|
649
|
-
- Available under the Apache 2.0 license for free use, modification, and distribution",
|
650
|
-
"displayName": "Mistral: Codestral Mamba",
|
651
|
-
"enabled": false,
|
652
|
-
"functionCall": true,
|
653
|
-
"id": "mistralai/codestral-mamba",
|
654
|
-
"maxTokens": undefined,
|
655
|
-
"pricing": {
|
656
|
-
"input": 0.25,
|
657
|
-
"output": 0.25,
|
658
|
-
},
|
659
|
-
"reasoning": false,
|
660
|
-
"releasedAt": "2024-07-19",
|
661
|
-
"vision": false,
|
662
|
-
},
|
663
|
-
{
|
664
|
-
"contextWindowTokens": 128000,
|
665
|
-
"description": "A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA.
|
666
|
-
|
667
|
-
The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese, Korean, Arabic, and Hindi.
|
668
|
-
|
669
|
-
It supports function calling and is released under the Apache 2.0 license.",
|
670
|
-
"displayName": "Mistral: Mistral Nemo",
|
671
|
-
"enabled": false,
|
672
|
-
"functionCall": true,
|
673
|
-
"id": "mistralai/mistral-nemo",
|
674
|
-
"maxTokens": undefined,
|
675
|
-
"pricing": {
|
676
|
-
"input": 0.13,
|
677
|
-
"output": 0.13,
|
678
|
-
},
|
679
|
-
"reasoning": false,
|
680
|
-
"releasedAt": "2024-07-19",
|
681
|
-
"vision": false,
|
682
|
-
},
|
683
|
-
{
|
684
|
-
"contextWindowTokens": 128000,
|
685
|
-
"description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.
|
686
|
-
|
687
|
-
As their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.
|
688
|
-
|
689
|
-
GPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).
|
690
|
-
|
691
|
-
Check out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.",
|
692
|
-
"displayName": "OpenAI: GPT-4o-mini (2024-07-18)",
|
693
|
-
"enabled": false,
|
694
|
-
"functionCall": true,
|
695
|
-
"id": "openai/gpt-4o-mini-2024-07-18",
|
696
|
-
"maxTokens": 16384,
|
697
|
-
"pricing": {
|
698
|
-
"input": 0.15,
|
699
|
-
"output": 0.6,
|
700
|
-
},
|
701
|
-
"reasoning": false,
|
702
|
-
"releasedAt": "2024-07-18",
|
703
|
-
"vision": true,
|
704
|
-
},
|
705
|
-
{
|
706
|
-
"contextWindowTokens": 128000,
|
707
|
-
"description": "GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs.
|
708
|
-
|
709
|
-
As their most advanced small model, it is many multiples more affordable than other recent frontier models, and more than 60% cheaper than [GPT-3.5 Turbo](/models/openai/gpt-3.5-turbo). It maintains SOTA intelligence, while being significantly more cost-effective.
|
710
|
-
|
711
|
-
GPT-4o mini achieves an 82% score on MMLU and presently ranks higher than GPT-4 on chat preferences [common leaderboards](https://arena.lmsys.org/).
|
712
|
-
|
713
|
-
Check out the [launch announcement](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) to learn more.",
|
714
|
-
"displayName": "OpenAI: GPT-4o-mini",
|
715
|
-
"enabled": true,
|
716
|
-
"functionCall": true,
|
717
|
-
"id": "openai/gpt-4o-mini",
|
718
|
-
"maxTokens": 16384,
|
719
|
-
"pricing": {
|
720
|
-
"input": 0.15,
|
721
|
-
"output": 0.6,
|
722
|
-
},
|
723
|
-
"reasoning": false,
|
724
|
-
"releasedAt": "2024-07-18",
|
725
|
-
"vision": true,
|
726
|
-
},
|
727
|
-
{
|
728
|
-
"contextWindowTokens": 32768,
|
729
|
-
"description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.
|
730
|
-
|
731
|
-
It features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.
|
732
|
-
|
733
|
-
For more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).
|
734
|
-
|
735
|
-
Usage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).
|
736
|
-
|
737
|
-
_These are free, rate-limited endpoints for [Qwen 2 7B Instruct](/models/qwen/qwen-2-7b-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
738
|
-
"displayName": "Qwen 2 7B Instruct (free)",
|
739
|
-
"enabled": true,
|
740
|
-
"functionCall": false,
|
741
|
-
"id": "qwen/qwen-2-7b-instruct:free",
|
742
|
-
"maxTokens": 4096,
|
743
|
-
"pricing": {
|
744
|
-
"input": 0,
|
745
|
-
"output": 0,
|
746
|
-
},
|
747
|
-
"reasoning": false,
|
748
|
-
"releasedAt": "2024-07-16",
|
749
|
-
"vision": false,
|
750
|
-
},
|
751
|
-
{
|
752
|
-
"contextWindowTokens": 32768,
|
753
|
-
"description": "Qwen2 7B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.
|
754
|
-
|
755
|
-
It features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.
|
756
|
-
|
757
|
-
For more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).
|
758
|
-
|
759
|
-
Usage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
|
760
|
-
"displayName": "Qwen 2 7B Instruct",
|
761
|
-
"enabled": false,
|
762
|
-
"functionCall": false,
|
763
|
-
"id": "qwen/qwen-2-7b-instruct",
|
764
|
-
"maxTokens": undefined,
|
765
|
-
"pricing": {
|
766
|
-
"input": 0.055,
|
767
|
-
"output": 0.055,
|
768
|
-
},
|
769
|
-
"reasoning": false,
|
770
|
-
"releasedAt": "2024-07-16",
|
771
|
-
"vision": false,
|
772
|
-
},
|
773
|
-
{
|
774
|
-
"contextWindowTokens": 8192,
|
775
|
-
"description": "Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini).
|
776
|
-
|
777
|
-
Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning.
|
778
|
-
|
779
|
-
See the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
|
780
|
-
"displayName": "Google: Gemma 2 27B",
|
781
|
-
"enabled": false,
|
782
|
-
"functionCall": false,
|
783
|
-
"id": "google/gemma-2-27b-it",
|
784
|
-
"maxTokens": undefined,
|
785
|
-
"pricing": {
|
786
|
-
"input": 0.27,
|
787
|
-
"output": 0.27,
|
788
|
-
},
|
789
|
-
"reasoning": false,
|
790
|
-
"releasedAt": "2024-07-13",
|
791
|
-
"vision": false,
|
792
|
-
},
|
793
|
-
{
|
794
|
-
"contextWindowTokens": 16384,
|
795
|
-
"description": "From the maker of [Goliath](https://openrouter.ai/models/alpindale/goliath-120b), Magnum 72B is the first in a new family of models designed to achieve the prose quality of the Claude 3 models, notably Opus & Sonnet.
|
796
|
-
|
797
|
-
The model is based on [Qwen2 72B](https://openrouter.ai/models/qwen/qwen-2-72b-instruct) and trained with 55 million tokens of highly curated roleplay (RP) data.",
|
798
|
-
"displayName": "Magnum 72B",
|
799
|
-
"enabled": false,
|
800
|
-
"functionCall": false,
|
801
|
-
"id": "alpindale/magnum-72b",
|
802
|
-
"maxTokens": 1024,
|
803
|
-
"pricing": {
|
804
|
-
"input": 3.75,
|
805
|
-
"output": 4.5,
|
806
|
-
},
|
807
|
-
"reasoning": false,
|
808
|
-
"releasedAt": "2024-07-11",
|
809
|
-
"vision": false,
|
810
|
-
},
|
811
|
-
{
|
812
|
-
"contextWindowTokens": 16384,
|
813
|
-
"description": "An experimental merge model based on Llama 3, exhibiting a very distinctive style of writing. It combines the the best of [Meta's Llama 3 8B](https://openrouter.ai/models/meta-llama/llama-3-8b-instruct) and Nous Research's [Hermes 2 Pro](https://openrouter.ai/models/nousresearch/hermes-2-pro-llama-3-8b).
|
814
|
-
|
815
|
-
Hermes-2 Θ (theta) was specifically designed with a few capabilities in mind: executing function calls, generating JSON output, and most remarkably, demonstrating metacognitive abilities (contemplating the nature of thought and recognizing the diversity of cognitive processes among individuals).",
|
816
|
-
"displayName": "Nous: Hermes 2 Theta 8B",
|
817
|
-
"enabled": false,
|
818
|
-
"functionCall": false,
|
819
|
-
"id": "nousresearch/hermes-2-theta-llama-3-8b",
|
820
|
-
"maxTokens": 2048,
|
821
|
-
"pricing": {
|
822
|
-
"input": 0.1875,
|
823
|
-
"output": 1.125,
|
824
|
-
},
|
825
|
-
"reasoning": false,
|
826
|
-
"releasedAt": "2024-07-11",
|
827
|
-
"vision": false,
|
828
|
-
},
|
829
|
-
{
|
830
|
-
"contextWindowTokens": 8192,
|
831
|
-
"description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.
|
832
|
-
|
833
|
-
Designed for a wide variety of tasks, it empowers developers and researchers to build innovative applications, while maintaining accessibility, safety, and cost-effectiveness.
|
834
|
-
|
835
|
-
See the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).
|
836
|
-
|
837
|
-
_These are free, rate-limited endpoints for [Gemma 2 9B](/models/google/gemma-2-9b-it). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
838
|
-
"displayName": "Google: Gemma 2 9B (free)",
|
839
|
-
"enabled": true,
|
840
|
-
"functionCall": false,
|
841
|
-
"id": "google/gemma-2-9b-it:free",
|
842
|
-
"maxTokens": 2048,
|
843
|
-
"pricing": {
|
844
|
-
"input": 0,
|
845
|
-
"output": 0,
|
846
|
-
},
|
847
|
-
"reasoning": false,
|
848
|
-
"releasedAt": "2024-06-28",
|
849
|
-
"vision": false,
|
850
|
-
},
|
851
|
-
{
|
852
|
-
"contextWindowTokens": 8192,
|
853
|
-
"description": "Gemma 2 9B by Google is an advanced, open-source language model that sets a new standard for efficiency and performance in its size class.
|
854
|
-
|
855
|
-
Designed for a wide variety of tasks, it empowers developers and researchers to build innovative applications, while maintaining accessibility, safety, and cost-effectiveness.
|
856
|
-
|
857
|
-
See the [launch announcement](https://blog.google/technology/developers/google-gemma-2/) for more details. Usage of Gemma is subject to Google's [Gemma Terms of Use](https://ai.google.dev/gemma/terms).",
|
858
|
-
"displayName": "Google: Gemma 2 9B",
|
859
|
-
"enabled": false,
|
860
|
-
"functionCall": false,
|
861
|
-
"id": "google/gemma-2-9b-it",
|
862
|
-
"maxTokens": undefined,
|
863
|
-
"pricing": {
|
864
|
-
"input": 0.06,
|
865
|
-
"output": 0.06,
|
866
|
-
},
|
867
|
-
"reasoning": false,
|
868
|
-
"releasedAt": "2024-06-28",
|
869
|
-
"vision": false,
|
870
|
-
},
|
871
|
-
{
|
872
|
-
"contextWindowTokens": 32000,
|
873
|
-
"description": "Stheno 8B 32K is a creative writing/roleplay model from [Sao10k](https://ko-fi.com/sao10k). It was trained at 8K context, then expanded to 32K context.
|
874
|
-
|
875
|
-
Compared to older Stheno version, this model is trained on:
|
876
|
-
- 2x the amount of creative writing samples
|
877
|
-
- Cleaned up roleplaying samples
|
878
|
-
- Fewer low quality samples",
|
879
|
-
"displayName": "Llama 3 Stheno 8B v3.3 32K",
|
880
|
-
"enabled": false,
|
881
|
-
"functionCall": false,
|
882
|
-
"id": "sao10k/l3-stheno-8b",
|
883
|
-
"maxTokens": undefined,
|
884
|
-
"pricing": {
|
885
|
-
"input": 0.25,
|
886
|
-
"output": 1.5,
|
887
|
-
},
|
888
|
-
"reasoning": false,
|
889
|
-
"releasedAt": "2024-06-27",
|
890
|
-
"vision": false,
|
891
|
-
},
|
892
|
-
{
|
893
|
-
"contextWindowTokens": 256000,
|
894
|
-
"description": "The Jamba-Instruct model, introduced by AI21 Labs, is an instruction-tuned variant of their hybrid SSM-Transformer Jamba model, specifically optimized for enterprise applications.
|
895
|
-
|
896
|
-
- 256K Context Window: It can process extensive information, equivalent to a 400-page novel, which is beneficial for tasks involving large documents such as financial reports or legal documents
|
897
|
-
- Safety and Accuracy: Jamba-Instruct is designed with enhanced safety features to ensure secure deployment in enterprise environments, reducing the risk and cost of implementation
|
898
|
-
|
899
|
-
Read their [announcement](https://www.ai21.com/blog/announcing-jamba) to learn more.
|
900
|
-
|
901
|
-
Jamba has a knowledge cutoff of February 2024.",
|
902
|
-
"displayName": "AI21: Jamba Instruct",
|
903
|
-
"enabled": false,
|
904
|
-
"functionCall": false,
|
905
|
-
"id": "ai21/jamba-instruct",
|
906
|
-
"maxTokens": 4096,
|
907
|
-
"pricing": {
|
908
|
-
"input": 0.5,
|
909
|
-
"output": 0.7,
|
910
|
-
},
|
911
|
-
"reasoning": false,
|
912
|
-
"releasedAt": "2024-06-25",
|
913
|
-
"vision": false,
|
914
|
-
},
|
915
|
-
{
|
916
|
-
"contextWindowTokens": 200000,
|
917
|
-
"description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:
|
918
|
-
|
919
|
-
- Coding: Autonomously writes, edits, and runs code with reasoning and troubleshooting
|
920
|
-
- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights
|
921
|
-
- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone
|
922
|
-
- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)
|
923
|
-
|
924
|
-
#multimodal",
|
925
|
-
"displayName": "Anthropic: Claude 3.5 Sonnet",
|
926
|
-
"enabled": true,
|
927
|
-
"functionCall": true,
|
928
|
-
"id": "anthropic/claude-3.5-sonnet",
|
929
|
-
"maxTokens": 8192,
|
930
|
-
"pricing": {
|
931
|
-
"input": 3,
|
932
|
-
"output": 15,
|
933
|
-
},
|
934
|
-
"reasoning": false,
|
935
|
-
"releasedAt": "2024-06-20",
|
936
|
-
"vision": true,
|
937
|
-
},
|
938
|
-
{
|
939
|
-
"contextWindowTokens": 200000,
|
940
|
-
"description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:
|
941
|
-
|
942
|
-
- Coding: Autonomously writes, edits, and runs code with reasoning and troubleshooting
|
943
|
-
- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights
|
944
|
-
- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone
|
945
|
-
- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)
|
946
|
-
|
947
|
-
#multimodal
|
948
|
-
|
949
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-3.5-sonnet) variant._",
|
950
|
-
"displayName": "Anthropic: Claude 3.5 Sonnet (self-moderated)",
|
951
|
-
"enabled": false,
|
952
|
-
"functionCall": true,
|
953
|
-
"id": "anthropic/claude-3.5-sonnet:beta",
|
954
|
-
"maxTokens": 8192,
|
955
|
-
"pricing": {
|
956
|
-
"input": 3,
|
957
|
-
"output": 15,
|
958
|
-
},
|
959
|
-
"reasoning": false,
|
960
|
-
"releasedAt": "2024-06-20",
|
961
|
-
"vision": true,
|
962
|
-
},
|
963
|
-
{
|
964
|
-
"contextWindowTokens": 8192,
|
965
|
-
"description": "Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k).
|
966
|
-
|
967
|
-
- Better prompt adherence.
|
968
|
-
- Better anatomy / spatial awareness.
|
969
|
-
- Adapts much better to unique and custom formatting / reply formats.
|
970
|
-
- Very creative, lots of unique swipes.
|
971
|
-
- Is not restrictive during roleplays.",
|
972
|
-
"displayName": "Llama 3 Euryale 70B v2.1",
|
973
|
-
"enabled": false,
|
974
|
-
"functionCall": false,
|
975
|
-
"id": "sao10k/l3-euryale-70b",
|
976
|
-
"maxTokens": undefined,
|
977
|
-
"pricing": {
|
978
|
-
"input": 0.35,
|
979
|
-
"output": 0.4,
|
980
|
-
},
|
981
|
-
"reasoning": false,
|
982
|
-
"releasedAt": "2024-06-18",
|
983
|
-
"vision": false,
|
984
|
-
},
|
985
|
-
{
|
986
|
-
"contextWindowTokens": 4000,
|
987
|
-
"description": "Phi-3 4K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.
|
988
|
-
|
989
|
-
At time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.
|
990
|
-
|
991
|
-
For 128k context length, try [Phi-3 Medium 128K](/models/microsoft/phi-3-medium-128k-instruct).",
|
992
|
-
"displayName": "Phi-3 Medium 4K Instruct",
|
993
|
-
"enabled": false,
|
994
|
-
"functionCall": false,
|
995
|
-
"id": "microsoft/phi-3-medium-4k-instruct",
|
996
|
-
"maxTokens": undefined,
|
997
|
-
"pricing": {
|
998
|
-
"input": 0.14,
|
999
|
-
"output": 0.14,
|
1000
|
-
},
|
1001
|
-
"reasoning": false,
|
1002
|
-
"releasedAt": "2024-06-15",
|
1003
|
-
"vision": false,
|
1004
|
-
},
|
1005
|
-
{
|
1006
|
-
"contextWindowTokens": 65536,
|
1007
|
-
"description": "Dolphin 2.9 is designed for instruction following, conversational, and coding. This model is a finetune of [Mixtral 8x22B Instruct](/models/mistralai/mixtral-8x22b-instruct). It features a 64k context length and was fine-tuned with a 16k sequence length using ChatML templates.
|
1008
|
-
|
1009
|
-
This model is a successor to [Dolphin Mixtral 8x7B](/models/cognitivecomputations/dolphin-mixtral-8x7b).
|
1010
|
-
|
1011
|
-
The model is uncensored and is stripped of alignment and bias. It requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at [erichartford.com/uncensored-models](https://erichartford.com/uncensored-models).
|
1012
|
-
|
1013
|
-
#moe #uncensored",
|
1014
|
-
"displayName": "Dolphin 2.9.2 Mixtral 8x22B 🐬",
|
1015
|
-
"enabled": false,
|
1016
|
-
"functionCall": false,
|
1017
|
-
"id": "cognitivecomputations/dolphin-mixtral-8x22b",
|
1018
|
-
"maxTokens": undefined,
|
1019
|
-
"pricing": {
|
1020
|
-
"input": 0.9,
|
1021
|
-
"output": 0.9,
|
1022
|
-
},
|
1023
|
-
"reasoning": false,
|
1024
|
-
"releasedAt": "2024-06-08",
|
1025
|
-
"vision": false,
|
1026
|
-
},
|
1027
|
-
{
|
1028
|
-
"contextWindowTokens": 32768,
|
1029
|
-
"description": "Qwen2 72B is a transformer-based model that excels in language understanding, multilingual capabilities, coding, mathematics, and reasoning.
|
1030
|
-
|
1031
|
-
It features SwiGLU activation, attention QKV bias, and group query attention. It is pretrained on extensive data with supervised finetuning and direct preference optimization.
|
1032
|
-
|
1033
|
-
For more details, see this [blog post](https://qwenlm.github.io/blog/qwen2/) and [GitHub repo](https://github.com/QwenLM/Qwen2).
|
1034
|
-
|
1035
|
-
Usage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
|
1036
|
-
"displayName": "Qwen 2 72B Instruct",
|
1037
|
-
"enabled": false,
|
1038
|
-
"functionCall": false,
|
1039
|
-
"id": "qwen/qwen-2-72b-instruct",
|
1040
|
-
"maxTokens": undefined,
|
1041
|
-
"pricing": {
|
1042
|
-
"input": 0.35,
|
1043
|
-
"output": 0.4,
|
1044
|
-
},
|
1045
|
-
"reasoning": false,
|
1046
|
-
"releasedAt": "2024-06-07",
|
1047
|
-
"vision": false,
|
1048
|
-
},
|
1049
|
-
{
|
1050
|
-
"contextWindowTokens": 8192,
|
1051
|
-
"description": "OpenChat 8B is a library of open-source language models, fine-tuned with "C-RLFT (Conditioned Reinforcement Learning Fine-Tuning)" - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.
|
1052
|
-
|
1053
|
-
It outperforms many similarly sized models including [Llama 3 8B Instruct](/models/meta-llama/llama-3-8b-instruct) and various fine-tuned models. It excels in general conversation, coding assistance, and mathematical reasoning.
|
1054
|
-
|
1055
|
-
- For OpenChat fine-tuned on Mistral 7B, check out [OpenChat 7B](/models/openchat/openchat-7b).
|
1056
|
-
- For OpenChat fine-tuned on Llama 8B, check out [OpenChat 8B](/models/openchat/openchat-8b).
|
1057
|
-
|
1058
|
-
#open-source",
|
1059
|
-
"displayName": "OpenChat 3.6 8B",
|
1060
|
-
"enabled": false,
|
1061
|
-
"functionCall": false,
|
1062
|
-
"id": "openchat/openchat-8b",
|
1063
|
-
"maxTokens": undefined,
|
1064
|
-
"pricing": {
|
1065
|
-
"input": 0.055,
|
1066
|
-
"output": 0.055,
|
1067
|
-
},
|
1068
|
-
"reasoning": false,
|
1069
|
-
"releasedAt": "2024-06-01",
|
1070
|
-
"vision": false,
|
1071
|
-
},
|
1072
|
-
{
|
1073
|
-
"contextWindowTokens": 8192,
|
1074
|
-
"description": "Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.",
|
1075
|
-
"displayName": "NousResearch: Hermes 2 Pro - Llama-3 8B",
|
1076
|
-
"enabled": false,
|
1077
|
-
"functionCall": false,
|
1078
|
-
"id": "nousresearch/hermes-2-pro-llama-3-8b",
|
1079
|
-
"maxTokens": undefined,
|
1080
|
-
"pricing": {
|
1081
|
-
"input": 0.14,
|
1082
|
-
"output": 0.14,
|
1083
|
-
},
|
1084
|
-
"reasoning": false,
|
1085
|
-
"releasedAt": "2024-05-27",
|
1086
|
-
"vision": false,
|
1087
|
-
},
|
1088
|
-
{
|
1089
|
-
"contextWindowTokens": 32768,
|
1090
|
-
"description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
|
1091
|
-
|
1092
|
-
An improved version of [Mistral 7B Instruct v0.2](/models/mistralai/mistral-7b-instruct-v0.2), with the following changes:
|
1093
|
-
|
1094
|
-
- Extended vocabulary to 32768
|
1095
|
-
- Supports v3 Tokenizer
|
1096
|
-
- Supports function calling
|
1097
|
-
|
1098
|
-
NOTE: Support for function calling depends on the provider.",
|
1099
|
-
"displayName": "Mistral: Mistral 7B Instruct v0.3",
|
1100
|
-
"enabled": false,
|
1101
|
-
"functionCall": true,
|
1102
|
-
"id": "mistralai/mistral-7b-instruct-v0.3",
|
1103
|
-
"maxTokens": undefined,
|
1104
|
-
"pricing": {
|
1105
|
-
"input": 0.055,
|
1106
|
-
"output": 0.055,
|
1107
|
-
},
|
1108
|
-
"reasoning": false,
|
1109
|
-
"releasedAt": "2024-05-27",
|
1110
|
-
"vision": false,
|
1111
|
-
},
|
1112
|
-
{
|
1113
|
-
"contextWindowTokens": 32768,
|
1114
|
-
"description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
|
1115
|
-
|
1116
|
-
*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*
|
1117
|
-
|
1118
|
-
_These are free, rate-limited endpoints for [Mistral 7B Instruct](/models/mistralai/mistral-7b-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
1119
|
-
"displayName": "Mistral: Mistral 7B Instruct (free)",
|
1120
|
-
"enabled": false,
|
1121
|
-
"functionCall": false,
|
1122
|
-
"id": "mistralai/mistral-7b-instruct:free",
|
1123
|
-
"maxTokens": 4096,
|
1124
|
-
"pricing": {
|
1125
|
-
"input": 0,
|
1126
|
-
"output": 0,
|
1127
|
-
},
|
1128
|
-
"reasoning": false,
|
1129
|
-
"releasedAt": "2024-05-27",
|
1130
|
-
"vision": false,
|
1131
|
-
},
|
1132
|
-
{
|
1133
|
-
"contextWindowTokens": 32768,
|
1134
|
-
"description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
|
1135
|
-
|
1136
|
-
*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*",
|
1137
|
-
"displayName": "Mistral: Mistral 7B Instruct",
|
1138
|
-
"enabled": false,
|
1139
|
-
"functionCall": true,
|
1140
|
-
"id": "mistralai/mistral-7b-instruct",
|
1141
|
-
"maxTokens": undefined,
|
1142
|
-
"pricing": {
|
1143
|
-
"input": 0.055,
|
1144
|
-
"output": 0.055,
|
1145
|
-
},
|
1146
|
-
"reasoning": false,
|
1147
|
-
"releasedAt": "2024-05-27",
|
1148
|
-
"vision": false,
|
1149
|
-
},
|
1150
|
-
{
|
1151
|
-
"contextWindowTokens": 32768,
|
1152
|
-
"description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
|
1153
|
-
|
1154
|
-
*Mistral 7B Instruct has multiple version variants, and this is intended to be the latest version.*
|
1155
|
-
|
1156
|
-
_These are higher-throughput endpoints for [Mistral 7B Instruct](/models/mistralai/mistral-7b-instruct). They may have higher prices._",
|
1157
|
-
"displayName": "Mistral: Mistral 7B Instruct (nitro)",
|
1158
|
-
"enabled": false,
|
1159
|
-
"functionCall": false,
|
1160
|
-
"id": "mistralai/mistral-7b-instruct:nitro",
|
1161
|
-
"maxTokens": undefined,
|
1162
|
-
"pricing": {
|
1163
|
-
"input": 0.07,
|
1164
|
-
"output": 0.07,
|
1165
|
-
},
|
1166
|
-
"reasoning": false,
|
1167
|
-
"releasedAt": "2024-05-27",
|
1168
|
-
"vision": false,
|
1169
|
-
},
|
1170
|
-
{
|
1171
|
-
"contextWindowTokens": 128000,
|
1172
|
-
"description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.
|
1173
|
-
|
1174
|
-
At time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.
|
1175
|
-
|
1176
|
-
_These are free, rate-limited endpoints for [Phi-3 Mini 128K Instruct](/models/microsoft/phi-3-mini-128k-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
1177
|
-
"displayName": "Phi-3 Mini 128K Instruct (free)",
|
1178
|
-
"enabled": false,
|
1179
|
-
"functionCall": true,
|
1180
|
-
"id": "microsoft/phi-3-mini-128k-instruct:free",
|
1181
|
-
"maxTokens": 4096,
|
1182
|
-
"pricing": {
|
1183
|
-
"input": 0,
|
1184
|
-
"output": 0,
|
1185
|
-
},
|
1186
|
-
"reasoning": false,
|
1187
|
-
"releasedAt": "2024-05-26",
|
1188
|
-
"vision": false,
|
1189
|
-
},
|
1190
|
-
{
|
1191
|
-
"contextWindowTokens": 128000,
|
1192
|
-
"description": "Phi-3 Mini is a powerful 3.8B parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.
|
1193
|
-
|
1194
|
-
At time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. This model is static, trained on an offline dataset with an October 2023 cutoff date.",
|
1195
|
-
"displayName": "Phi-3 Mini 128K Instruct",
|
1196
|
-
"enabled": false,
|
1197
|
-
"functionCall": true,
|
1198
|
-
"id": "microsoft/phi-3-mini-128k-instruct",
|
1199
|
-
"maxTokens": undefined,
|
1200
|
-
"pricing": {
|
1201
|
-
"input": 0.1,
|
1202
|
-
"output": 0.1,
|
1203
|
-
},
|
1204
|
-
"reasoning": false,
|
1205
|
-
"releasedAt": "2024-05-26",
|
1206
|
-
"vision": false,
|
1207
|
-
},
|
1208
|
-
{
|
1209
|
-
"contextWindowTokens": 128000,
|
1210
|
-
"description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.
|
1211
|
-
|
1212
|
-
At time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.
|
1213
|
-
|
1214
|
-
For 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).
|
1215
|
-
|
1216
|
-
_These are free, rate-limited endpoints for [Phi-3 Medium 128K Instruct](/models/microsoft/phi-3-medium-128k-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
1217
|
-
"displayName": "Phi-3 Medium 128K Instruct (free)",
|
1218
|
-
"enabled": false,
|
1219
|
-
"functionCall": true,
|
1220
|
-
"id": "microsoft/phi-3-medium-128k-instruct:free",
|
1221
|
-
"maxTokens": 4096,
|
1222
|
-
"pricing": {
|
1223
|
-
"input": 0,
|
1224
|
-
"output": 0,
|
1225
|
-
},
|
1226
|
-
"reasoning": false,
|
1227
|
-
"releasedAt": "2024-05-24",
|
1228
|
-
"vision": false,
|
1229
|
-
},
|
1230
|
-
{
|
1231
|
-
"contextWindowTokens": 128000,
|
1232
|
-
"description": "Phi-3 128K Medium is a powerful 14-billion parameter model designed for advanced language understanding, reasoning, and instruction following. Optimized through supervised fine-tuning and preference adjustments, it excels in tasks involving common sense, mathematics, logical reasoning, and code processing.
|
1233
|
-
|
1234
|
-
At time of release, Phi-3 Medium demonstrated state-of-the-art performance among lightweight models. In the MMLU-Pro eval, the model even comes close to a Llama3 70B level of performance.
|
1235
|
-
|
1236
|
-
For 4k context length, try [Phi-3 Medium 4K](/models/microsoft/phi-3-medium-4k-instruct).",
|
1237
|
-
"displayName": "Phi-3 Medium 128K Instruct",
|
1238
|
-
"enabled": false,
|
1239
|
-
"functionCall": true,
|
1240
|
-
"id": "microsoft/phi-3-medium-128k-instruct",
|
1241
|
-
"maxTokens": undefined,
|
1242
|
-
"pricing": {
|
1243
|
-
"input": 1,
|
1244
|
-
"output": 1,
|
1245
|
-
},
|
1246
|
-
"reasoning": false,
|
1247
|
-
"releasedAt": "2024-05-24",
|
1248
|
-
"vision": false,
|
1249
|
-
},
|
1250
|
-
{
|
1251
|
-
"contextWindowTokens": 8192,
|
1252
|
-
"description": "The NeverSleep team is back, with a Llama 3 70B finetune trained on their curated roleplay data. Striking a balance between eRP and RP, Lumimaid was designed to be serious, yet uncensored when necessary.
|
1253
|
-
|
1254
|
-
To enhance it's overall intelligence and chat capability, roughly 40% of the training data was not roleplay. This provides a breadth of knowledge to access, while still keeping roleplay as the primary strength.
|
1255
|
-
|
1256
|
-
Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
1257
|
-
"displayName": "Llama 3 Lumimaid 70B",
|
1258
|
-
"enabled": false,
|
1259
|
-
"functionCall": false,
|
1260
|
-
"id": "neversleep/llama-3-lumimaid-70b",
|
1261
|
-
"maxTokens": 2048,
|
1262
|
-
"pricing": {
|
1263
|
-
"input": 3.375,
|
1264
|
-
"output": 4.5,
|
1265
|
-
},
|
1266
|
-
"reasoning": false,
|
1267
|
-
"releasedAt": "2024-05-16",
|
1268
|
-
"vision": false,
|
1269
|
-
},
|
1270
|
-
{
|
1271
|
-
"contextWindowTokens": 4000000,
|
1272
|
-
"description": "Gemini 1.5 Flash is a foundation model that performs well at a variety of multimodal tasks such as visual understanding, classification, summarization, and creating content from image, audio and video. It's adept at processing visual and text inputs such as photographs, documents, infographics, and screenshots.
|
1273
|
-
|
1274
|
-
Gemini 1.5 Flash is designed for high-volume, high-frequency tasks where cost and latency matter. On most common tasks, Flash achieves comparable quality to other Gemini Pro models at a significantly reduced cost. Flash is well-suited for applications like chat assistants and on-demand content generation where speed and scale matter.
|
1275
|
-
|
1276
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
1277
|
-
|
1278
|
-
#multimodal",
|
1279
|
-
"displayName": "Google: Gemini Flash 1.5",
|
1280
|
-
"enabled": true,
|
1281
|
-
"functionCall": true,
|
1282
|
-
"id": "google/gemini-flash-1.5",
|
1283
|
-
"maxTokens": 32768,
|
1284
|
-
"pricing": {
|
1285
|
-
"input": 0.0375,
|
1286
|
-
"output": 0.15,
|
1287
|
-
},
|
1288
|
-
"reasoning": false,
|
1289
|
-
"releasedAt": "2024-05-14",
|
1290
|
-
"vision": true,
|
1291
|
-
},
|
1292
|
-
{
|
1293
|
-
"contextWindowTokens": 128000,
|
1294
|
-
"description": "DeepSeek-Coder-V2, an open-source Mixture-of-Experts (MoE) code language model. It is further pre-trained from an intermediate checkpoint of DeepSeek-V2 with additional 6 trillion tokens.
|
1295
|
-
|
1296
|
-
The original V1 model was trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese. It was pre-trained on project-level code corpus by employing a extra fill-in-the-blank task.",
|
1297
|
-
"displayName": "DeepSeek-Coder-V2",
|
1298
|
-
"enabled": false,
|
1299
|
-
"functionCall": false,
|
1300
|
-
"id": "deepseek/deepseek-coder",
|
1301
|
-
"maxTokens": 4096,
|
1302
|
-
"pricing": {
|
1303
|
-
"input": 0.14,
|
1304
|
-
"output": 0.28,
|
1305
|
-
},
|
1306
|
-
"reasoning": false,
|
1307
|
-
"releasedAt": "2024-05-14",
|
1308
|
-
"vision": false,
|
1309
|
-
},
|
1310
|
-
{
|
1311
|
-
"contextWindowTokens": 128000,
|
1312
|
-
"description": "DeepSeek-V2 Chat is a conversational finetune of DeepSeek-V2, a Mixture-of-Experts (MoE) language model. It comprises 236B total parameters, of which 21B are activated for each token.
|
1313
|
-
|
1314
|
-
Compared with DeepSeek 67B, DeepSeek-V2 achieves stronger performance, and meanwhile saves 42.5% of training costs, reduces the KV cache by 93.3%, and boosts the maximum generation throughput to 5.76 times.
|
1315
|
-
|
1316
|
-
DeepSeek-V2 achieves remarkable performance on both standard benchmarks and open-ended generation evaluations.",
|
1317
|
-
"displayName": "DeepSeek-V2 Chat",
|
1318
|
-
"enabled": true,
|
1319
|
-
"functionCall": true,
|
1320
|
-
"id": "deepseek/deepseek-chat",
|
1321
|
-
"maxTokens": 4096,
|
1322
|
-
"pricing": {
|
1323
|
-
"input": 0.14,
|
1324
|
-
"output": 0.28,
|
1325
|
-
},
|
1326
|
-
"reasoning": false,
|
1327
|
-
"releasedAt": "2024-05-14",
|
1328
|
-
"vision": false,
|
1329
|
-
},
|
1330
|
-
{
|
1331
|
-
"contextWindowTokens": 28000,
|
1332
|
-
"description": "Llama3 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
1333
|
-
|
1334
|
-
This is the online version of the [offline chat model](/models/perplexity/llama-3-sonar-large-32k-chat). It is focused on delivering helpful, up-to-date, and factual responses. #online",
|
1335
|
-
"displayName": "Perplexity: Llama3 Sonar 70B Online",
|
1336
|
-
"enabled": false,
|
1337
|
-
"functionCall": false,
|
1338
|
-
"id": "perplexity/llama-3-sonar-large-32k-online",
|
1339
|
-
"maxTokens": undefined,
|
1340
|
-
"pricing": {
|
1341
|
-
"input": 1,
|
1342
|
-
"output": 1,
|
1343
|
-
},
|
1344
|
-
"reasoning": false,
|
1345
|
-
"releasedAt": "2024-05-14",
|
1346
|
-
"vision": false,
|
1347
|
-
},
|
1348
|
-
{
|
1349
|
-
"contextWindowTokens": 32768,
|
1350
|
-
"description": "Llama3 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
1351
|
-
|
1352
|
-
This is a normal offline LLM, but the [online version](/models/perplexity/llama-3-sonar-large-32k-online) of this model has Internet access.",
|
1353
|
-
"displayName": "Perplexity: Llama3 Sonar 70B",
|
1354
|
-
"enabled": false,
|
1355
|
-
"functionCall": false,
|
1356
|
-
"id": "perplexity/llama-3-sonar-large-32k-chat",
|
1357
|
-
"maxTokens": undefined,
|
1358
|
-
"pricing": {
|
1359
|
-
"input": 1,
|
1360
|
-
"output": 1,
|
1361
|
-
},
|
1362
|
-
"reasoning": false,
|
1363
|
-
"releasedAt": "2024-05-14",
|
1364
|
-
"vision": false,
|
1365
|
-
},
|
1366
|
-
{
|
1367
|
-
"contextWindowTokens": 28000,
|
1368
|
-
"description": "Llama3 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
1369
|
-
|
1370
|
-
This is the online version of the [offline chat model](/models/perplexity/llama-3-sonar-small-32k-chat). It is focused on delivering helpful, up-to-date, and factual responses. #online",
|
1371
|
-
"displayName": "Perplexity: Llama3 Sonar 8B Online",
|
1372
|
-
"enabled": false,
|
1373
|
-
"functionCall": false,
|
1374
|
-
"id": "perplexity/llama-3-sonar-small-32k-online",
|
1375
|
-
"maxTokens": undefined,
|
1376
|
-
"pricing": {
|
1377
|
-
"input": 0.2,
|
1378
|
-
"output": 0.2,
|
1379
|
-
},
|
1380
|
-
"reasoning": false,
|
1381
|
-
"releasedAt": "2024-05-14",
|
1382
|
-
"vision": false,
|
1383
|
-
},
|
1384
|
-
{
|
1385
|
-
"contextWindowTokens": 32768,
|
1386
|
-
"description": "Llama3 Sonar is Perplexity's latest model family. It surpasses their earlier Sonar models in cost-efficiency, speed, and performance.
|
1387
|
-
|
1388
|
-
This is a normal offline LLM, but the [online version](/models/perplexity/llama-3-sonar-small-32k-online) of this model has Internet access.",
|
1389
|
-
"displayName": "Perplexity: Llama3 Sonar 8B",
|
1390
|
-
"enabled": false,
|
1391
|
-
"functionCall": false,
|
1392
|
-
"id": "perplexity/llama-3-sonar-small-32k-chat",
|
1393
|
-
"maxTokens": undefined,
|
1394
|
-
"pricing": {
|
1395
|
-
"input": 0.2,
|
1396
|
-
"output": 0.2,
|
1397
|
-
},
|
1398
|
-
"reasoning": false,
|
1399
|
-
"releasedAt": "2024-05-14",
|
1400
|
-
"vision": false,
|
1401
|
-
},
|
1402
|
-
{
|
1403
|
-
"contextWindowTokens": 8192,
|
1404
|
-
"description": "This safeguard model has 8B parameters and is based on the Llama 3 family. Just like is predecessor, [LlamaGuard 1](https://huggingface.co/meta-llama/LlamaGuard-7b), it can do both prompt and response classification.
|
1405
|
-
|
1406
|
-
LlamaGuard 2 acts as a normal LLM would, generating text that indicates whether the given input/output is safe/unsafe. If deemed unsafe, it will also share the content categories violated.
|
1407
|
-
|
1408
|
-
For best results, please use raw prompt input or the \`/completions\` endpoint, instead of the chat API.
|
1409
|
-
|
1410
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1411
|
-
|
1412
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
1413
|
-
"displayName": "Meta: LlamaGuard 2 8B",
|
1414
|
-
"enabled": false,
|
1415
|
-
"functionCall": false,
|
1416
|
-
"id": "meta-llama/llama-guard-2-8b",
|
1417
|
-
"maxTokens": undefined,
|
1418
|
-
"pricing": {
|
1419
|
-
"input": 0.18,
|
1420
|
-
"output": 0.18,
|
1421
|
-
},
|
1422
|
-
"reasoning": false,
|
1423
|
-
"releasedAt": "2024-05-13",
|
1424
|
-
"vision": false,
|
1425
|
-
},
|
1426
|
-
{
|
1427
|
-
"contextWindowTokens": 128000,
|
1428
|
-
"description": "GPT-4o ("o" for "omni") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.
|
1429
|
-
|
1430
|
-
For benchmarking against other models, it was briefly called ["im-also-a-good-gpt2-chatbot"](https://twitter.com/LiamFedus/status/1790064963966370209)",
|
1431
|
-
"displayName": "OpenAI: GPT-4o (2024-05-13)",
|
1432
|
-
"enabled": false,
|
1433
|
-
"functionCall": true,
|
1434
|
-
"id": "openai/gpt-4o-2024-05-13",
|
1435
|
-
"maxTokens": 4096,
|
1436
|
-
"pricing": {
|
1437
|
-
"input": 5,
|
1438
|
-
"output": 15,
|
1439
|
-
},
|
1440
|
-
"reasoning": false,
|
1441
|
-
"releasedAt": "2024-05-13",
|
1442
|
-
"vision": true,
|
1443
|
-
},
|
1444
|
-
{
|
1445
|
-
"contextWindowTokens": 128000,
|
1446
|
-
"description": "GPT-4o ("o" for "omni") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as fast and 50% more cost-effective. GPT-4o also offers improved performance in processing non-English languages and enhanced visual capabilities.
|
1447
|
-
|
1448
|
-
For benchmarking against other models, it was briefly called ["im-also-a-good-gpt2-chatbot"](https://twitter.com/LiamFedus/status/1790064963966370209)",
|
1449
|
-
"displayName": "OpenAI: GPT-4o",
|
1450
|
-
"enabled": true,
|
1451
|
-
"functionCall": true,
|
1452
|
-
"id": "openai/gpt-4o",
|
1453
|
-
"maxTokens": 4096,
|
1454
|
-
"pricing": {
|
1455
|
-
"input": 5,
|
1456
|
-
"output": 15,
|
1457
|
-
},
|
1458
|
-
"reasoning": false,
|
1459
|
-
"releasedAt": "2024-05-13",
|
1460
|
-
"vision": true,
|
1461
|
-
},
|
1462
|
-
{
|
1463
|
-
"contextWindowTokens": 128000,
|
1464
|
-
"description": "GPT-4o Extended is an experimental variant of GPT-4o with an extended max output tokens. This model supports only text input to text output.
|
1465
|
-
|
1466
|
-
_These are extended-context endpoints for [GPT-4o](/models/openai/gpt-4o). They may have higher prices._",
|
1467
|
-
"displayName": "OpenAI: GPT-4o (extended)",
|
1468
|
-
"enabled": false,
|
1469
|
-
"functionCall": true,
|
1470
|
-
"id": "openai/gpt-4o:extended",
|
1471
|
-
"maxTokens": 64000,
|
1472
|
-
"pricing": {
|
1473
|
-
"input": 6,
|
1474
|
-
"output": 18,
|
1475
|
-
},
|
1476
|
-
"reasoning": false,
|
1477
|
-
"releasedAt": "2024-05-13",
|
1478
|
-
"vision": false,
|
1479
|
-
},
|
1480
|
-
{
|
1481
|
-
"contextWindowTokens": 32768,
|
1482
|
-
"description": "Qwen1.5 72B is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:
|
1483
|
-
|
1484
|
-
- Significant performance improvement in human preference for chat models
|
1485
|
-
- Multilingual support of both base and chat models
|
1486
|
-
- Stable support of 32K context length for models of all sizes
|
1487
|
-
|
1488
|
-
For more details, see this [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5).
|
1489
|
-
|
1490
|
-
Usage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
|
1491
|
-
"displayName": "Qwen 1.5 72B Chat",
|
1492
|
-
"enabled": false,
|
1493
|
-
"functionCall": false,
|
1494
|
-
"id": "qwen/qwen-72b-chat",
|
1495
|
-
"maxTokens": undefined,
|
1496
|
-
"pricing": {
|
1497
|
-
"input": 0.81,
|
1498
|
-
"output": 0.81,
|
1499
|
-
},
|
1500
|
-
"reasoning": false,
|
1501
|
-
"releasedAt": "2024-05-09",
|
1502
|
-
"vision": false,
|
1503
|
-
},
|
1504
|
-
{
|
1505
|
-
"contextWindowTokens": 32768,
|
1506
|
-
"description": "Qwen1.5 110B is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:
|
1507
|
-
|
1508
|
-
- Significant performance improvement in human preference for chat models
|
1509
|
-
- Multilingual support of both base and chat models
|
1510
|
-
- Stable support of 32K context length for models of all sizes
|
1511
|
-
|
1512
|
-
For more details, see this [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5).
|
1513
|
-
|
1514
|
-
Usage of this model is subject to [Tongyi Qianwen LICENSE AGREEMENT](https://huggingface.co/Qwen/Qwen1.5-110B-Chat/blob/main/LICENSE).",
|
1515
|
-
"displayName": "Qwen 1.5 110B Chat",
|
1516
|
-
"enabled": false,
|
1517
|
-
"functionCall": false,
|
1518
|
-
"id": "qwen/qwen-110b-chat",
|
1519
|
-
"maxTokens": undefined,
|
1520
|
-
"pricing": {
|
1521
|
-
"input": 1.62,
|
1522
|
-
"output": 1.62,
|
1523
|
-
},
|
1524
|
-
"reasoning": false,
|
1525
|
-
"releasedAt": "2024-05-09",
|
1526
|
-
"vision": false,
|
1527
|
-
},
|
1528
|
-
{
|
1529
|
-
"contextWindowTokens": 24576,
|
1530
|
-
"description": "The NeverSleep team is back, with a Llama 3 8B finetune trained on their curated roleplay data. Striking a balance between eRP and RP, Lumimaid was designed to be serious, yet uncensored when necessary.
|
1531
|
-
|
1532
|
-
To enhance it's overall intelligence and chat capability, roughly 40% of the training data was not roleplay. This provides a breadth of knowledge to access, while still keeping roleplay as the primary strength.
|
1533
|
-
|
1534
|
-
Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
1535
|
-
"displayName": "Llama 3 Lumimaid 8B",
|
1536
|
-
"enabled": false,
|
1537
|
-
"functionCall": false,
|
1538
|
-
"id": "neversleep/llama-3-lumimaid-8b",
|
1539
|
-
"maxTokens": undefined,
|
1540
|
-
"pricing": {
|
1541
|
-
"input": 0.1875,
|
1542
|
-
"output": 1.125,
|
1543
|
-
},
|
1544
|
-
"reasoning": false,
|
1545
|
-
"releasedAt": "2024-05-04",
|
1546
|
-
"vision": false,
|
1547
|
-
},
|
1548
|
-
{
|
1549
|
-
"contextWindowTokens": 24576,
|
1550
|
-
"description": "The NeverSleep team is back, with a Llama 3 8B finetune trained on their curated roleplay data. Striking a balance between eRP and RP, Lumimaid was designed to be serious, yet uncensored when necessary.
|
1551
|
-
|
1552
|
-
To enhance it's overall intelligence and chat capability, roughly 40% of the training data was not roleplay. This provides a breadth of knowledge to access, while still keeping roleplay as the primary strength.
|
1553
|
-
|
1554
|
-
Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
1555
|
-
|
1556
|
-
_These are extended-context endpoints for [Llama 3 Lumimaid 8B](/models/neversleep/llama-3-lumimaid-8b). They may have higher prices._",
|
1557
|
-
"displayName": "Llama 3 Lumimaid 8B (extended)",
|
1558
|
-
"enabled": false,
|
1559
|
-
"functionCall": false,
|
1560
|
-
"id": "neversleep/llama-3-lumimaid-8b:extended",
|
1561
|
-
"maxTokens": 2048,
|
1562
|
-
"pricing": {
|
1563
|
-
"input": 0.1875,
|
1564
|
-
"output": 1.125,
|
1565
|
-
},
|
1566
|
-
"reasoning": false,
|
1567
|
-
"releasedAt": "2024-05-04",
|
1568
|
-
"vision": false,
|
1569
|
-
},
|
1570
|
-
{
|
1571
|
-
"contextWindowTokens": 8192,
|
1572
|
-
"description": "Creative writing model, routed with permission. It's fast, it keeps the conversation going, and it stays in character.
|
1573
|
-
|
1574
|
-
If you submit a raw prompt, you can use Alpaca or Vicuna formats.",
|
1575
|
-
"displayName": "Fimbulvetr 11B v2",
|
1576
|
-
"enabled": false,
|
1577
|
-
"functionCall": false,
|
1578
|
-
"id": "sao10k/fimbulvetr-11b-v2",
|
1579
|
-
"maxTokens": 2048,
|
1580
|
-
"pricing": {
|
1581
|
-
"input": 0.375,
|
1582
|
-
"output": 1.5,
|
1583
|
-
},
|
1584
|
-
"reasoning": false,
|
1585
|
-
"releasedAt": "2024-04-21",
|
1586
|
-
"vision": false,
|
1587
|
-
},
|
1588
|
-
{
|
1589
|
-
"contextWindowTokens": 8192,
|
1590
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases.
|
1591
|
-
|
1592
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1593
|
-
|
1594
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
1595
|
-
"displayName": "Meta: Llama 3 70B Instruct",
|
1596
|
-
"enabled": false,
|
1597
|
-
"functionCall": true,
|
1598
|
-
"id": "meta-llama/llama-3-70b-instruct",
|
1599
|
-
"maxTokens": undefined,
|
1600
|
-
"pricing": {
|
1601
|
-
"input": 0.35,
|
1602
|
-
"output": 0.4,
|
1603
|
-
},
|
1604
|
-
"reasoning": false,
|
1605
|
-
"releasedAt": "2024-04-18",
|
1606
|
-
"vision": false,
|
1607
|
-
},
|
1608
|
-
{
|
1609
|
-
"contextWindowTokens": 8192,
|
1610
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases.
|
1611
|
-
|
1612
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1613
|
-
|
1614
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
1615
|
-
|
1616
|
-
_These are higher-throughput endpoints for [Llama 3 70B Instruct](/models/meta-llama/llama-3-70b-instruct). They may have higher prices._",
|
1617
|
-
"displayName": "Meta: Llama 3 70B Instruct (nitro)",
|
1618
|
-
"enabled": false,
|
1619
|
-
"functionCall": false,
|
1620
|
-
"id": "meta-llama/llama-3-70b-instruct:nitro",
|
1621
|
-
"maxTokens": undefined,
|
1622
|
-
"pricing": {
|
1623
|
-
"input": 0.792,
|
1624
|
-
"output": 0.792,
|
1625
|
-
},
|
1626
|
-
"reasoning": false,
|
1627
|
-
"releasedAt": "2024-04-18",
|
1628
|
-
"vision": false,
|
1629
|
-
},
|
1630
|
-
{
|
1631
|
-
"contextWindowTokens": 8192,
|
1632
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.
|
1633
|
-
|
1634
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1635
|
-
|
1636
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
1637
|
-
|
1638
|
-
_These are free, rate-limited endpoints for [Llama 3 8B Instruct](/models/meta-llama/llama-3-8b-instruct). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
1639
|
-
"displayName": "Meta: Llama 3 8B Instruct (free)",
|
1640
|
-
"enabled": false,
|
1641
|
-
"functionCall": false,
|
1642
|
-
"id": "meta-llama/llama-3-8b-instruct:free",
|
1643
|
-
"maxTokens": 4096,
|
1644
|
-
"pricing": {
|
1645
|
-
"input": 0,
|
1646
|
-
"output": 0,
|
1647
|
-
},
|
1648
|
-
"reasoning": false,
|
1649
|
-
"releasedAt": "2024-04-18",
|
1650
|
-
"vision": false,
|
1651
|
-
},
|
1652
|
-
{
|
1653
|
-
"contextWindowTokens": 8192,
|
1654
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.
|
1655
|
-
|
1656
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1657
|
-
|
1658
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
1659
|
-
"displayName": "Meta: Llama 3 8B Instruct",
|
1660
|
-
"enabled": false,
|
1661
|
-
"functionCall": true,
|
1662
|
-
"id": "meta-llama/llama-3-8b-instruct",
|
1663
|
-
"maxTokens": undefined,
|
1664
|
-
"pricing": {
|
1665
|
-
"input": 0.055,
|
1666
|
-
"output": 0.055,
|
1667
|
-
},
|
1668
|
-
"reasoning": false,
|
1669
|
-
"releasedAt": "2024-04-18",
|
1670
|
-
"vision": false,
|
1671
|
-
},
|
1672
|
-
{
|
1673
|
-
"contextWindowTokens": 8192,
|
1674
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.
|
1675
|
-
|
1676
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1677
|
-
|
1678
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
1679
|
-
|
1680
|
-
_These are higher-throughput endpoints for [Llama 3 8B Instruct](/models/meta-llama/llama-3-8b-instruct). They may have higher prices._",
|
1681
|
-
"displayName": "Meta: Llama 3 8B Instruct (nitro)",
|
1682
|
-
"enabled": false,
|
1683
|
-
"functionCall": false,
|
1684
|
-
"id": "meta-llama/llama-3-8b-instruct:nitro",
|
1685
|
-
"maxTokens": undefined,
|
1686
|
-
"pricing": {
|
1687
|
-
"input": 0.162,
|
1688
|
-
"output": 0.162,
|
1689
|
-
},
|
1690
|
-
"reasoning": false,
|
1691
|
-
"releasedAt": "2024-04-18",
|
1692
|
-
"vision": false,
|
1693
|
-
},
|
1694
|
-
{
|
1695
|
-
"contextWindowTokens": 16384,
|
1696
|
-
"description": "Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases.
|
1697
|
-
|
1698
|
-
It has demonstrated strong performance compared to leading closed-source models in human evaluations.
|
1699
|
-
|
1700
|
-
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
|
1701
|
-
|
1702
|
-
_These are extended-context endpoints for [Llama 3 8B Instruct](/models/meta-llama/llama-3-8b-instruct). They may have higher prices._",
|
1703
|
-
"displayName": "Meta: Llama 3 8B Instruct (extended)",
|
1704
|
-
"enabled": false,
|
1705
|
-
"functionCall": false,
|
1706
|
-
"id": "meta-llama/llama-3-8b-instruct:extended",
|
1707
|
-
"maxTokens": 2048,
|
1708
|
-
"pricing": {
|
1709
|
-
"input": 0.1875,
|
1710
|
-
"output": 1.125,
|
1711
|
-
},
|
1712
|
-
"reasoning": false,
|
1713
|
-
"releasedAt": "2024-04-18",
|
1714
|
-
"vision": false,
|
1715
|
-
},
|
1716
|
-
{
|
1717
|
-
"contextWindowTokens": 65536,
|
1718
|
-
"description": "Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include:
|
1719
|
-
- strong math, coding, and reasoning
|
1720
|
-
- large context length (64k)
|
1721
|
-
- fluency in English, French, Italian, German, and Spanish
|
1722
|
-
|
1723
|
-
See benchmarks on the launch announcement [here](https://mistral.ai/news/mixtral-8x22b/).
|
1724
|
-
#moe",
|
1725
|
-
"displayName": "Mistral: Mixtral 8x22B Instruct",
|
1726
|
-
"enabled": false,
|
1727
|
-
"functionCall": true,
|
1728
|
-
"id": "mistralai/mixtral-8x22b-instruct",
|
1729
|
-
"maxTokens": undefined,
|
1730
|
-
"pricing": {
|
1731
|
-
"input": 0.65,
|
1732
|
-
"output": 0.65,
|
1733
|
-
},
|
1734
|
-
"reasoning": false,
|
1735
|
-
"releasedAt": "2024-04-17",
|
1736
|
-
"vision": false,
|
1737
|
-
},
|
1738
|
-
{
|
1739
|
-
"contextWindowTokens": 32000,
|
1740
|
-
"description": "WizardLM-2 7B is the smaller variant of Microsoft AI's latest Wizard model. It is the fastest and achieves comparable performance with existing 10x larger opensource leading models
|
1741
|
-
|
1742
|
-
It is a finetune of [Mistral 7B Instruct](/models/mistralai/mistral-7b-instruct), using the same technique as [WizardLM-2 8x22B](/models/microsoft/wizardlm-2-8x22b).
|
1743
|
-
|
1744
|
-
To read more about the model release, [click here](https://wizardlm.github.io/WizardLM2/).
|
1745
|
-
|
1746
|
-
#moe",
|
1747
|
-
"displayName": "WizardLM-2 7B",
|
1748
|
-
"enabled": false,
|
1749
|
-
"functionCall": false,
|
1750
|
-
"id": "microsoft/wizardlm-2-7b",
|
1751
|
-
"maxTokens": undefined,
|
1752
|
-
"pricing": {
|
1753
|
-
"input": 0.055,
|
1754
|
-
"output": 0.055,
|
1755
|
-
},
|
1756
|
-
"reasoning": false,
|
1757
|
-
"releasedAt": "2024-04-16",
|
1758
|
-
"vision": false,
|
1759
|
-
},
|
1760
|
-
{
|
1761
|
-
"contextWindowTokens": 65536,
|
1762
|
-
"description": "WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models.
|
1763
|
-
|
1764
|
-
It is an instruct finetune of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b).
|
1765
|
-
|
1766
|
-
To read more about the model release, [click here](https://wizardlm.github.io/WizardLM2/).
|
1767
|
-
|
1768
|
-
#moe",
|
1769
|
-
"displayName": "WizardLM-2 8x22B",
|
1770
|
-
"enabled": false,
|
1771
|
-
"functionCall": false,
|
1772
|
-
"id": "microsoft/wizardlm-2-8x22b",
|
1773
|
-
"maxTokens": undefined,
|
1774
|
-
"pricing": {
|
1775
|
-
"input": 0.5,
|
1776
|
-
"output": 0.5,
|
1777
|
-
},
|
1778
|
-
"reasoning": false,
|
1779
|
-
"releasedAt": "2024-04-16",
|
1780
|
-
"vision": false,
|
1781
|
-
},
|
1782
|
-
{
|
1783
|
-
"contextWindowTokens": 4000000,
|
1784
|
-
"description": "Google's latest multimodal model, supporting image and video in text or chat prompts.
|
1785
|
-
|
1786
|
-
Optimized for language tasks including:
|
1787
|
-
|
1788
|
-
- Code generation
|
1789
|
-
- Text generation
|
1790
|
-
- Text editing
|
1791
|
-
- Problem solving
|
1792
|
-
- Recommendations
|
1793
|
-
- Information extraction
|
1794
|
-
- Data extraction or generation
|
1795
|
-
- AI agents
|
1796
|
-
|
1797
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
1798
|
-
|
1799
|
-
#multimodal",
|
1800
|
-
"displayName": "Google: Gemini Pro 1.5",
|
1801
|
-
"enabled": true,
|
1802
|
-
"functionCall": true,
|
1803
|
-
"id": "google/gemini-pro-1.5",
|
1804
|
-
"maxTokens": 32768,
|
1805
|
-
"pricing": {
|
1806
|
-
"input": 2.5,
|
1807
|
-
"output": 7.5,
|
1808
|
-
},
|
1809
|
-
"reasoning": false,
|
1810
|
-
"releasedAt": "2024-04-09",
|
1811
|
-
"vision": true,
|
1812
|
-
},
|
1813
|
-
{
|
1814
|
-
"contextWindowTokens": 128000,
|
1815
|
-
"description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.
|
1816
|
-
|
1817
|
-
Training data: up to December 2023.",
|
1818
|
-
"displayName": "OpenAI: GPT-4 Turbo",
|
1819
|
-
"enabled": false,
|
1820
|
-
"functionCall": true,
|
1821
|
-
"id": "openai/gpt-4-turbo",
|
1822
|
-
"maxTokens": 4096,
|
1823
|
-
"pricing": {
|
1824
|
-
"input": 10,
|
1825
|
-
"output": 30,
|
1826
|
-
},
|
1827
|
-
"reasoning": false,
|
1828
|
-
"releasedAt": "2024-04-09",
|
1829
|
-
"vision": true,
|
1830
|
-
},
|
1831
|
-
{
|
1832
|
-
"contextWindowTokens": 128000,
|
1833
|
-
"description": "Command R+ is a new, 104B-parameter LLM from Cohere. It's useful for roleplay, general consumer usecases, and Retrieval Augmented Generation (RAG).
|
1834
|
-
|
1835
|
-
It offers multilingual support for ten key languages to facilitate global business operations. See benchmarks and the launch post [here](https://txt.cohere.com/command-r-plus-microsoft-azure/).
|
1836
|
-
|
1837
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
1838
|
-
"displayName": "Cohere: Command R+",
|
1839
|
-
"enabled": false,
|
1840
|
-
"functionCall": true,
|
1841
|
-
"id": "cohere/command-r-plus",
|
1842
|
-
"maxTokens": 4000,
|
1843
|
-
"pricing": {
|
1844
|
-
"input": 3,
|
1845
|
-
"output": 15,
|
1846
|
-
},
|
1847
|
-
"reasoning": false,
|
1848
|
-
"releasedAt": "2024-04-04",
|
1849
|
-
"vision": false,
|
1850
|
-
},
|
1851
|
-
{
|
1852
|
-
"contextWindowTokens": 32768,
|
1853
|
-
"description": "DBRX is a new open source large language model developed by Databricks. At 132B, it outperforms existing open source LLMs like Llama 2 70B and [Mixtral-8x7b](/models/mistralai/mixtral-8x7b) on standard industry benchmarks for language understanding, programming, math, and logic.
|
1854
|
-
|
1855
|
-
It uses a fine-grained mixture-of-experts (MoE) architecture. 36B parameters are active on any input. It was pre-trained on 12T tokens of text and code data. Compared to other open MoE models like Mixtral-8x7B and Grok-1, DBRX is fine-grained, meaning it uses a larger number of smaller experts.
|
1856
|
-
|
1857
|
-
See the launch announcement and benchmark results [here](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm).
|
1858
|
-
|
1859
|
-
#moe",
|
1860
|
-
"displayName": "Databricks: DBRX 132B Instruct",
|
1861
|
-
"enabled": false,
|
1862
|
-
"functionCall": false,
|
1863
|
-
"id": "databricks/dbrx-instruct",
|
1864
|
-
"maxTokens": undefined,
|
1865
|
-
"pricing": {
|
1866
|
-
"input": 1.08,
|
1867
|
-
"output": 1.08,
|
1868
|
-
},
|
1869
|
-
"reasoning": false,
|
1870
|
-
"releasedAt": "2024-03-29",
|
1871
|
-
"vision": false,
|
1872
|
-
},
|
1873
|
-
{
|
1874
|
-
"contextWindowTokens": 4096,
|
1875
|
-
"description": "A merge with a complex family tree, this model was crafted for roleplaying and storytelling. Midnight Rose is a successor to Rogue Rose and Aurora Nights and improves upon them both. It wants to produce lengthy output by default and is the best creative writing merge produced so far by sophosympatheia.
|
1876
|
-
|
1877
|
-
Descending from earlier versions of Midnight Rose and [Wizard Tulu Dolphin 70B](https://huggingface.co/sophosympatheia/Wizard-Tulu-Dolphin-70B-v1.0), it inherits the best qualities of each.",
|
1878
|
-
"displayName": "Midnight Rose 70B",
|
1879
|
-
"enabled": false,
|
1880
|
-
"functionCall": false,
|
1881
|
-
"id": "sophosympatheia/midnight-rose-70b",
|
1882
|
-
"maxTokens": undefined,
|
1883
|
-
"pricing": {
|
1884
|
-
"input": 0.8,
|
1885
|
-
"output": 0.8,
|
1886
|
-
},
|
1887
|
-
"reasoning": false,
|
1888
|
-
"releasedAt": "2024-03-22",
|
1889
|
-
"vision": false,
|
1890
|
-
},
|
1891
|
-
{
|
1892
|
-
"contextWindowTokens": 128000,
|
1893
|
-
"description": "Command-R is a 35B parameter model that performs conversational language tasks at a higher quality, more reliably, and with a longer context than previous models. It can be used for complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.
|
1894
|
-
|
1895
|
-
Read the launch post [here](https://txt.cohere.com/command-r/).
|
1896
|
-
|
1897
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
1898
|
-
"displayName": "Cohere: Command R",
|
1899
|
-
"enabled": false,
|
1900
|
-
"functionCall": true,
|
1901
|
-
"id": "cohere/command-r",
|
1902
|
-
"maxTokens": 4000,
|
1903
|
-
"pricing": {
|
1904
|
-
"input": 0.5,
|
1905
|
-
"output": 1.5,
|
1906
|
-
},
|
1907
|
-
"reasoning": false,
|
1908
|
-
"releasedAt": "2024-03-14",
|
1909
|
-
"vision": false,
|
1910
|
-
},
|
1911
|
-
{
|
1912
|
-
"contextWindowTokens": 4096,
|
1913
|
-
"description": "Command is an instruction-following conversational model that performs language tasks with high quality, more reliably and with a longer context than our base generative models.
|
1914
|
-
|
1915
|
-
Use of this model is subject to Cohere's [Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy).",
|
1916
|
-
"displayName": "Cohere: Command",
|
1917
|
-
"enabled": false,
|
1918
|
-
"functionCall": false,
|
1919
|
-
"id": "cohere/command",
|
1920
|
-
"maxTokens": 4000,
|
1921
|
-
"pricing": {
|
1922
|
-
"input": 1,
|
1923
|
-
"output": 2,
|
1924
|
-
},
|
1925
|
-
"reasoning": false,
|
1926
|
-
"releasedAt": "2024-03-14",
|
1927
|
-
"vision": false,
|
1928
|
-
},
|
1929
|
-
{
|
1930
|
-
"contextWindowTokens": 200000,
|
1931
|
-
"description": "Claude 3 Haiku is Anthropic's fastest and most compact model for
|
1932
|
-
near-instant responsiveness. Quick and accurate targeted performance.
|
1933
|
-
|
1934
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)
|
1935
|
-
|
1936
|
-
#multimodal",
|
1937
|
-
"displayName": "Anthropic: Claude 3 Haiku",
|
1938
|
-
"enabled": true,
|
1939
|
-
"functionCall": true,
|
1940
|
-
"id": "anthropic/claude-3-haiku",
|
1941
|
-
"maxTokens": 4096,
|
1942
|
-
"pricing": {
|
1943
|
-
"input": 0.25,
|
1944
|
-
"output": 1.25,
|
1945
|
-
},
|
1946
|
-
"reasoning": false,
|
1947
|
-
"releasedAt": "2024-03-13",
|
1948
|
-
"vision": true,
|
1949
|
-
},
|
1950
|
-
{
|
1951
|
-
"contextWindowTokens": 200000,
|
1952
|
-
"description": "Claude 3 Haiku is Anthropic's fastest and most compact model for
|
1953
|
-
near-instant responsiveness. Quick and accurate targeted performance.
|
1954
|
-
|
1955
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)
|
1956
|
-
|
1957
|
-
#multimodal
|
1958
|
-
|
1959
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-3-haiku) variant._",
|
1960
|
-
"displayName": "Anthropic: Claude 3 Haiku (self-moderated)",
|
1961
|
-
"enabled": false,
|
1962
|
-
"functionCall": true,
|
1963
|
-
"id": "anthropic/claude-3-haiku:beta",
|
1964
|
-
"maxTokens": 4096,
|
1965
|
-
"pricing": {
|
1966
|
-
"input": 0.25,
|
1967
|
-
"output": 1.25,
|
1968
|
-
},
|
1969
|
-
"reasoning": false,
|
1970
|
-
"releasedAt": "2024-03-13",
|
1971
|
-
"vision": true,
|
1972
|
-
},
|
1973
|
-
{
|
1974
|
-
"contextWindowTokens": 200000,
|
1975
|
-
"description": "Claude 3 Sonnet is an ideal balance of intelligence and speed for enterprise workloads. Maximum utility at a lower price, dependable, balanced for scaled deployments.
|
1976
|
-
|
1977
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)
|
1978
|
-
|
1979
|
-
#multimodal",
|
1980
|
-
"displayName": "Anthropic: Claude 3 Sonnet",
|
1981
|
-
"enabled": false,
|
1982
|
-
"functionCall": true,
|
1983
|
-
"id": "anthropic/claude-3-sonnet",
|
1984
|
-
"maxTokens": 4096,
|
1985
|
-
"pricing": {
|
1986
|
-
"input": 3,
|
1987
|
-
"output": 15,
|
1988
|
-
},
|
1989
|
-
"reasoning": false,
|
1990
|
-
"releasedAt": "2024-03-05",
|
1991
|
-
"vision": true,
|
1992
|
-
},
|
1993
|
-
{
|
1994
|
-
"contextWindowTokens": 200000,
|
1995
|
-
"description": "Claude 3 Sonnet is an ideal balance of intelligence and speed for enterprise workloads. Maximum utility at a lower price, dependable, balanced for scaled deployments.
|
1996
|
-
|
1997
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)
|
1998
|
-
|
1999
|
-
#multimodal
|
2000
|
-
|
2001
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-3-sonnet) variant._",
|
2002
|
-
"displayName": "Anthropic: Claude 3 Sonnet (self-moderated)",
|
2003
|
-
"enabled": false,
|
2004
|
-
"functionCall": true,
|
2005
|
-
"id": "anthropic/claude-3-sonnet:beta",
|
2006
|
-
"maxTokens": 4096,
|
2007
|
-
"pricing": {
|
2008
|
-
"input": 3,
|
2009
|
-
"output": 15,
|
2010
|
-
},
|
2011
|
-
"reasoning": false,
|
2012
|
-
"releasedAt": "2024-03-05",
|
2013
|
-
"vision": true,
|
2014
|
-
},
|
2015
|
-
{
|
2016
|
-
"contextWindowTokens": 200000,
|
2017
|
-
"description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks. It boasts top-level performance, intelligence, fluency, and understanding.
|
2018
|
-
|
2019
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)
|
2020
|
-
|
2021
|
-
#multimodal",
|
2022
|
-
"displayName": "Anthropic: Claude 3 Opus",
|
2023
|
-
"enabled": true,
|
2024
|
-
"functionCall": true,
|
2025
|
-
"id": "anthropic/claude-3-opus",
|
2026
|
-
"maxTokens": 4096,
|
2027
|
-
"pricing": {
|
2028
|
-
"input": 15,
|
2029
|
-
"output": 75,
|
2030
|
-
},
|
2031
|
-
"reasoning": false,
|
2032
|
-
"releasedAt": "2024-03-05",
|
2033
|
-
"vision": true,
|
2034
|
-
},
|
2035
|
-
{
|
2036
|
-
"contextWindowTokens": 200000,
|
2037
|
-
"description": "Claude 3 Opus is Anthropic's most powerful model for highly complex tasks. It boasts top-level performance, intelligence, fluency, and understanding.
|
2038
|
-
|
2039
|
-
See the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-family)
|
2040
|
-
|
2041
|
-
#multimodal
|
2042
|
-
|
2043
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-3-opus) variant._",
|
2044
|
-
"displayName": "Anthropic: Claude 3 Opus (self-moderated)",
|
2045
|
-
"enabled": false,
|
2046
|
-
"functionCall": true,
|
2047
|
-
"id": "anthropic/claude-3-opus:beta",
|
2048
|
-
"maxTokens": 4096,
|
2049
|
-
"pricing": {
|
2050
|
-
"input": 15,
|
2051
|
-
"output": 75,
|
2052
|
-
},
|
2053
|
-
"reasoning": false,
|
2054
|
-
"releasedAt": "2024-03-05",
|
2055
|
-
"vision": true,
|
2056
|
-
},
|
2057
|
-
{
|
2058
|
-
"contextWindowTokens": 128000,
|
2059
|
-
"description": "This is Mistral AI's flagship model, Mistral Large 2 (version \`mistral-large-2407\`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/).
|
2060
|
-
|
2061
|
-
It is fluent in English, French, Spanish, German, and Italian, with high grammatical accuracy, and its long context window allows precise information recall from large documents.",
|
2062
|
-
"displayName": "Mistral Large",
|
2063
|
-
"enabled": false,
|
2064
|
-
"functionCall": true,
|
2065
|
-
"id": "mistralai/mistral-large",
|
2066
|
-
"maxTokens": undefined,
|
2067
|
-
"pricing": {
|
2068
|
-
"input": 3,
|
2069
|
-
"output": 9,
|
2070
|
-
},
|
2071
|
-
"reasoning": false,
|
2072
|
-
"releasedAt": "2024-02-26",
|
2073
|
-
"vision": false,
|
2074
|
-
},
|
2075
|
-
{
|
2076
|
-
"contextWindowTokens": 128000,
|
2077
|
-
"description": "The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023.
|
2078
|
-
|
2079
|
-
**Note:** heavily rate limited by OpenAI while in preview.",
|
2080
|
-
"displayName": "OpenAI: GPT-4 Turbo Preview",
|
2081
|
-
"enabled": false,
|
2082
|
-
"functionCall": true,
|
2083
|
-
"id": "openai/gpt-4-turbo-preview",
|
2084
|
-
"maxTokens": 4096,
|
2085
|
-
"pricing": {
|
2086
|
-
"input": 10,
|
2087
|
-
"output": 30,
|
2088
|
-
},
|
2089
|
-
"reasoning": false,
|
2090
|
-
"releasedAt": "2024-01-25",
|
2091
|
-
"vision": false,
|
2092
|
-
},
|
2093
|
-
{
|
2094
|
-
"contextWindowTokens": 4095,
|
2095
|
-
"description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.
|
2096
|
-
|
2097
|
-
Training data up to Sep 2021.",
|
2098
|
-
"displayName": "OpenAI: GPT-3.5 Turbo (older v0613)",
|
2099
|
-
"enabled": false,
|
2100
|
-
"functionCall": true,
|
2101
|
-
"id": "openai/gpt-3.5-turbo-0613",
|
2102
|
-
"maxTokens": 4096,
|
2103
|
-
"pricing": {
|
2104
|
-
"input": 1,
|
2105
|
-
"output": 2,
|
2106
|
-
},
|
2107
|
-
"reasoning": false,
|
2108
|
-
"releasedAt": "2024-01-25",
|
2109
|
-
"vision": false,
|
2110
|
-
},
|
2111
|
-
{
|
2112
|
-
"contextWindowTokens": 32768,
|
2113
|
-
"description": "Nous Hermes 2 Mixtral 8x7B DPO is the new flagship Nous Research model trained over the [Mixtral 8x7B MoE LLM](/models/mistralai/mixtral-8x7b).
|
2114
|
-
|
2115
|
-
The model was trained on over 1,000,000 entries of primarily [GPT-4](/models/openai/gpt-4) generated data, as well as other high quality data from open datasets across the AI landscape, achieving state of the art performance on a variety of tasks.
|
2116
|
-
|
2117
|
-
#moe",
|
2118
|
-
"displayName": "Nous: Hermes 2 Mixtral 8x7B DPO",
|
2119
|
-
"enabled": false,
|
2120
|
-
"functionCall": false,
|
2121
|
-
"id": "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
2122
|
-
"maxTokens": undefined,
|
2123
|
-
"pricing": {
|
2124
|
-
"input": 0.45,
|
2125
|
-
"output": 0.45,
|
2126
|
-
},
|
2127
|
-
"reasoning": false,
|
2128
|
-
"releasedAt": "2024-01-16",
|
2129
|
-
"vision": false,
|
2130
|
-
},
|
2131
|
-
{
|
2132
|
-
"contextWindowTokens": 32000,
|
2133
|
-
"description": "This is Mistral AI's closed-source, medium-sided model. It's powered by a closed-source prototype and excels at reasoning, code, JSON, chat, and more. In benchmarks, it compares with many of the flagship models of other companies.",
|
2134
|
-
"displayName": "Mistral Medium",
|
2135
|
-
"enabled": false,
|
2136
|
-
"functionCall": true,
|
2137
|
-
"id": "mistralai/mistral-medium",
|
2138
|
-
"maxTokens": undefined,
|
2139
|
-
"pricing": {
|
2140
|
-
"input": 2.7,
|
2141
|
-
"output": 8.1,
|
2142
|
-
},
|
2143
|
-
"reasoning": false,
|
2144
|
-
"releasedAt": "2024-01-10",
|
2145
|
-
"vision": false,
|
2146
|
-
},
|
2147
|
-
{
|
2148
|
-
"contextWindowTokens": 32000,
|
2149
|
-
"description": "This model is currently powered by Mixtral-8X7B-v0.1, a sparse mixture of experts model with 12B active parameters. It has better reasoning, exhibits more capabilities, can produce and reason about code, and is multiligual, supporting English, French, German, Italian, and Spanish.
|
2150
|
-
#moe",
|
2151
|
-
"displayName": "Mistral Small",
|
2152
|
-
"enabled": false,
|
2153
|
-
"functionCall": true,
|
2154
|
-
"id": "mistralai/mistral-small",
|
2155
|
-
"maxTokens": undefined,
|
2156
|
-
"pricing": {
|
2157
|
-
"input": 2,
|
2158
|
-
"output": 6,
|
2159
|
-
},
|
2160
|
-
"reasoning": false,
|
2161
|
-
"releasedAt": "2024-01-10",
|
2162
|
-
"vision": false,
|
2163
|
-
},
|
2164
|
-
{
|
2165
|
-
"contextWindowTokens": 32000,
|
2166
|
-
"description": "This model is currently powered by Mistral-7B-v0.2, and incorporates a "better" fine-tuning than [Mistral 7B](/models/mistralai/mistral-7b-instruct-v0.1), inspired by community work. It's best used for large batch processing tasks where cost is a significant factor but reasoning capabilities are not crucial.",
|
2167
|
-
"displayName": "Mistral Tiny",
|
2168
|
-
"enabled": false,
|
2169
|
-
"functionCall": true,
|
2170
|
-
"id": "mistralai/mistral-tiny",
|
2171
|
-
"maxTokens": undefined,
|
2172
|
-
"pricing": {
|
2173
|
-
"input": 0.25,
|
2174
|
-
"output": 0.25,
|
2175
|
-
},
|
2176
|
-
"reasoning": false,
|
2177
|
-
"releasedAt": "2024-01-10",
|
2178
|
-
"vision": false,
|
2179
|
-
},
|
2180
|
-
{
|
2181
|
-
"contextWindowTokens": 4096,
|
2182
|
-
"description": "A 75/25 merge of [Chronos 13b v2](https://huggingface.co/elinas/chronos-13b-v2) and [Nous Hermes Llama2 13b](/models/nousresearch/nous-hermes-llama2-13b). This offers the imaginative writing style of Chronos while retaining coherency. Outputs are long and use exceptional prose. #merge",
|
2183
|
-
"displayName": "Chronos Hermes 13B v2",
|
2184
|
-
"enabled": false,
|
2185
|
-
"functionCall": false,
|
2186
|
-
"id": "austism/chronos-hermes-13b",
|
2187
|
-
"maxTokens": undefined,
|
2188
|
-
"pricing": {
|
2189
|
-
"input": 0.13,
|
2190
|
-
"output": 0.13,
|
2191
|
-
},
|
2192
|
-
"reasoning": false,
|
2193
|
-
"releasedAt": "2024-01-05",
|
2194
|
-
"vision": false,
|
2195
|
-
},
|
2196
|
-
{
|
2197
|
-
"contextWindowTokens": 4096,
|
2198
|
-
"description": "Nous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape.
|
2199
|
-
|
2200
|
-
Nous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes.",
|
2201
|
-
"displayName": "Nous: Hermes 2 Yi 34B",
|
2202
|
-
"enabled": false,
|
2203
|
-
"functionCall": false,
|
2204
|
-
"id": "nousresearch/nous-hermes-yi-34b",
|
2205
|
-
"maxTokens": undefined,
|
2206
|
-
"pricing": {
|
2207
|
-
"input": 0.72,
|
2208
|
-
"output": 0.72,
|
2209
|
-
},
|
2210
|
-
"reasoning": false,
|
2211
|
-
"releasedAt": "2024-01-02",
|
2212
|
-
"vision": false,
|
2213
|
-
},
|
2214
|
-
{
|
2215
|
-
"contextWindowTokens": 32768,
|
2216
|
-
"description": "A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
|
2217
|
-
|
2218
|
-
An improved version of [Mistral 7B Instruct](/modelsmistralai/mistral-7b-instruct-v0.1), with the following changes:
|
2219
|
-
|
2220
|
-
- 32k context window (vs 8k context in v0.1)
|
2221
|
-
- Rope-theta = 1e6
|
2222
|
-
- No Sliding-Window Attention",
|
2223
|
-
"displayName": "Mistral: Mistral 7B Instruct v0.2",
|
2224
|
-
"enabled": false,
|
2225
|
-
"functionCall": false,
|
2226
|
-
"id": "mistralai/mistral-7b-instruct-v0.2",
|
2227
|
-
"maxTokens": undefined,
|
2228
|
-
"pricing": {
|
2229
|
-
"input": 0.055,
|
2230
|
-
"output": 0.055,
|
2231
|
-
},
|
2232
|
-
"reasoning": false,
|
2233
|
-
"releasedAt": "2023-12-28",
|
2234
|
-
"vision": false,
|
2235
|
-
},
|
2236
|
-
{
|
2237
|
-
"contextWindowTokens": 32768,
|
2238
|
-
"description": "This is a 16k context fine-tune of [Mixtral-8x7b](/models/mistralai/mixtral-8x7b). It excels in coding tasks due to extensive training with coding data and is known for its obedience, although it lacks DPO tuning.
|
2239
|
-
|
2240
|
-
The model is uncensored and is stripped of alignment and bias. It requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at [erichartford.com/uncensored-models](https://erichartford.com/uncensored-models).
|
2241
|
-
|
2242
|
-
#moe #uncensored",
|
2243
|
-
"displayName": "Dolphin 2.6 Mixtral 8x7B 🐬",
|
2244
|
-
"enabled": false,
|
2245
|
-
"functionCall": false,
|
2246
|
-
"id": "cognitivecomputations/dolphin-mixtral-8x7b",
|
2247
|
-
"maxTokens": undefined,
|
2248
|
-
"pricing": {
|
2249
|
-
"input": 0.5,
|
2250
|
-
"output": 0.5,
|
2251
|
-
},
|
2252
|
-
"reasoning": false,
|
2253
|
-
"releasedAt": "2023-12-21",
|
2254
|
-
"vision": false,
|
2255
|
-
},
|
2256
|
-
{
|
2257
|
-
"contextWindowTokens": 65536,
|
2258
|
-
"description": "Google's flagship multimodal model, supporting image and video in text or chat prompts for a text or code response.
|
2259
|
-
|
2260
|
-
See the benchmarks and prompting guidelines from [Deepmind](https://deepmind.google/technologies/gemini/).
|
2261
|
-
|
2262
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).
|
2263
|
-
|
2264
|
-
#multimodal",
|
2265
|
-
"displayName": "Google: Gemini Pro Vision 1.0",
|
2266
|
-
"enabled": false,
|
2267
|
-
"functionCall": true,
|
2268
|
-
"id": "google/gemini-pro-vision",
|
2269
|
-
"maxTokens": 8192,
|
2270
|
-
"pricing": {
|
2271
|
-
"input": 0.125,
|
2272
|
-
"output": 0.375,
|
2273
|
-
},
|
2274
|
-
"reasoning": false,
|
2275
|
-
"releasedAt": "2023-12-13",
|
2276
|
-
"vision": true,
|
2277
|
-
},
|
2278
|
-
{
|
2279
|
-
"contextWindowTokens": 131040,
|
2280
|
-
"description": "Google's flagship text generation model. Designed to handle natural language tasks, multiturn text and code chat, and code generation.
|
2281
|
-
|
2282
|
-
See the benchmarks and prompting guidelines from [Deepmind](https://deepmind.google/technologies/gemini/).
|
2283
|
-
|
2284
|
-
Usage of Gemini is subject to Google's [Gemini Terms of Use](https://ai.google.dev/terms).",
|
2285
|
-
"displayName": "Google: Gemini Pro 1.0",
|
2286
|
-
"enabled": false,
|
2287
|
-
"functionCall": true,
|
2288
|
-
"id": "google/gemini-pro",
|
2289
|
-
"maxTokens": 32768,
|
2290
|
-
"pricing": {
|
2291
|
-
"input": 0.125,
|
2292
|
-
"output": 0.375,
|
2293
|
-
},
|
2294
|
-
"reasoning": false,
|
2295
|
-
"releasedAt": "2023-12-13",
|
2296
|
-
"vision": false,
|
2297
|
-
},
|
2298
|
-
{
|
2299
|
-
"contextWindowTokens": 32768,
|
2300
|
-
"description": "A pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion parameters.
|
2301
|
-
|
2302
|
-
Instruct model fine-tuned by Mistral. #moe",
|
2303
|
-
"displayName": "Mixtral 8x7B Instruct",
|
2304
|
-
"enabled": false,
|
2305
|
-
"functionCall": true,
|
2306
|
-
"id": "mistralai/mixtral-8x7b-instruct",
|
2307
|
-
"maxTokens": undefined,
|
2308
|
-
"pricing": {
|
2309
|
-
"input": 0.24,
|
2310
|
-
"output": 0.24,
|
2311
|
-
},
|
2312
|
-
"reasoning": false,
|
2313
|
-
"releasedAt": "2023-12-10",
|
2314
|
-
"vision": false,
|
2315
|
-
},
|
2316
|
-
{
|
2317
|
-
"contextWindowTokens": 32768,
|
2318
|
-
"description": "A pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion parameters.
|
2319
|
-
|
2320
|
-
Instruct model fine-tuned by Mistral. #moe
|
2321
|
-
|
2322
|
-
_These are higher-throughput endpoints for [Mixtral 8x7B Instruct](/models/mistralai/mixtral-8x7b-instruct). They may have higher prices._",
|
2323
|
-
"displayName": "Mixtral 8x7B Instruct (nitro)",
|
2324
|
-
"enabled": false,
|
2325
|
-
"functionCall": false,
|
2326
|
-
"id": "mistralai/mixtral-8x7b-instruct:nitro",
|
2327
|
-
"maxTokens": undefined,
|
2328
|
-
"pricing": {
|
2329
|
-
"input": 0.54,
|
2330
|
-
"output": 0.54,
|
2331
|
-
},
|
2332
|
-
"reasoning": false,
|
2333
|
-
"releasedAt": "2023-12-10",
|
2334
|
-
"vision": false,
|
2335
|
-
},
|
2336
|
-
{
|
2337
|
-
"contextWindowTokens": 32768,
|
2338
|
-
"description": "A pretrained generative Sparse Mixture of Experts, by Mistral AI. Incorporates 8 experts (feed-forward networks) for a total of 47B parameters. Base model (not fine-tuned for instructions) - see [Mixtral 8x7B Instruct](/models/mistralai/mixtral-8x7b-instruct) for an instruct-tuned model.
|
2339
|
-
|
2340
|
-
#moe",
|
2341
|
-
"displayName": "Mixtral 8x7B (base)",
|
2342
|
-
"enabled": false,
|
2343
|
-
"functionCall": false,
|
2344
|
-
"id": "mistralai/mixtral-8x7b",
|
2345
|
-
"maxTokens": undefined,
|
2346
|
-
"pricing": {
|
2347
|
-
"input": 0.54,
|
2348
|
-
"output": 0.54,
|
2349
|
-
},
|
2350
|
-
"reasoning": false,
|
2351
|
-
"releasedAt": "2023-12-10",
|
2352
|
-
"vision": false,
|
2353
|
-
},
|
2354
|
-
{
|
2355
|
-
"contextWindowTokens": 32768,
|
2356
|
-
"description": "This is the chat model variant of the [StripedHyena series](/models?q=stripedhyena) developed by Together in collaboration with Nous Research.
|
2357
|
-
|
2358
|
-
StripedHyena uses a new architecture that competes with traditional Transformers, particularly in long-context data processing. It combines attention mechanisms with gated convolutions for improved speed, efficiency, and scaling. This model marks a significant advancement in AI architecture for sequence modeling tasks.",
|
2359
|
-
"displayName": "StripedHyena Nous 7B",
|
2360
|
-
"enabled": false,
|
2361
|
-
"functionCall": false,
|
2362
|
-
"id": "togethercomputer/stripedhyena-nous-7b",
|
2363
|
-
"maxTokens": undefined,
|
2364
|
-
"pricing": {
|
2365
|
-
"input": 0.18,
|
2366
|
-
"output": 0.18,
|
2367
|
-
},
|
2368
|
-
"reasoning": false,
|
2369
|
-
"releasedAt": "2023-12-09",
|
2370
|
-
"vision": false,
|
2371
|
-
},
|
2372
|
-
{
|
2373
|
-
"contextWindowTokens": 32768,
|
2374
|
-
"description": "From the creator of [MythoMax](/models/gryphe/mythomax-l2-13b), merges a suite of models to reduce word anticipation, ministrations, and other undesirable words in ChatGPT roleplaying data.
|
2375
|
-
|
2376
|
-
It combines [Neural Chat 7B](/models/intel/neural-chat-7b), Airoboros 7b, [Toppy M 7B](/models/undi95/toppy-m-7b), [Zepher 7b beta](/models/huggingfaceh4/zephyr-7b-beta), [Nous Capybara 34B](/models/nousresearch/nous-capybara-34b), [OpenHeremes 2.5](/models/teknium/openhermes-2.5-mistral-7b), and many others.
|
2377
|
-
|
2378
|
-
#merge
|
2379
|
-
|
2380
|
-
_These are free, rate-limited endpoints for [MythoMist 7B](/models/gryphe/mythomist-7b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
2381
|
-
"displayName": "MythoMist 7B (free)",
|
2382
|
-
"enabled": false,
|
2383
|
-
"functionCall": false,
|
2384
|
-
"id": "gryphe/mythomist-7b:free",
|
2385
|
-
"maxTokens": 4096,
|
2386
|
-
"pricing": {
|
2387
|
-
"input": 0,
|
2388
|
-
"output": 0,
|
2389
|
-
},
|
2390
|
-
"reasoning": false,
|
2391
|
-
"releasedAt": "2023-12-07",
|
2392
|
-
"vision": false,
|
2393
|
-
},
|
2394
|
-
{
|
2395
|
-
"contextWindowTokens": 32768,
|
2396
|
-
"description": "From the creator of [MythoMax](/models/gryphe/mythomax-l2-13b), merges a suite of models to reduce word anticipation, ministrations, and other undesirable words in ChatGPT roleplaying data.
|
2397
|
-
|
2398
|
-
It combines [Neural Chat 7B](/models/intel/neural-chat-7b), Airoboros 7b, [Toppy M 7B](/models/undi95/toppy-m-7b), [Zepher 7b beta](/models/huggingfaceh4/zephyr-7b-beta), [Nous Capybara 34B](/models/nousresearch/nous-capybara-34b), [OpenHeremes 2.5](/models/teknium/openhermes-2.5-mistral-7b), and many others.
|
2399
|
-
|
2400
|
-
#merge",
|
2401
|
-
"displayName": "MythoMist 7B",
|
2402
|
-
"enabled": false,
|
2403
|
-
"functionCall": false,
|
2404
|
-
"id": "gryphe/mythomist-7b",
|
2405
|
-
"maxTokens": 2048,
|
2406
|
-
"pricing": {
|
2407
|
-
"input": 0.375,
|
2408
|
-
"output": 0.375,
|
2409
|
-
},
|
2410
|
-
"reasoning": false,
|
2411
|
-
"releasedAt": "2023-12-07",
|
2412
|
-
"vision": false,
|
2413
|
-
},
|
2414
|
-
{
|
2415
|
-
"contextWindowTokens": 8192,
|
2416
|
-
"description": "OpenChat 7B is a library of open-source language models, fine-tuned with "C-RLFT (Conditioned Reinforcement Learning Fine-Tuning)" - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.
|
2417
|
-
|
2418
|
-
- For OpenChat fine-tuned on Mistral 7B, check out [OpenChat 7B](/models/openchat/openchat-7b).
|
2419
|
-
- For OpenChat fine-tuned on Llama 8B, check out [OpenChat 8B](/models/openchat/openchat-8b).
|
2420
|
-
|
2421
|
-
#open-source
|
2422
|
-
|
2423
|
-
_These are free, rate-limited endpoints for [OpenChat 3.5 7B](/models/openchat/openchat-7b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
2424
|
-
"displayName": "OpenChat 3.5 7B (free)",
|
2425
|
-
"enabled": false,
|
2426
|
-
"functionCall": false,
|
2427
|
-
"id": "openchat/openchat-7b:free",
|
2428
|
-
"maxTokens": 4096,
|
2429
|
-
"pricing": {
|
2430
|
-
"input": 0,
|
2431
|
-
"output": 0,
|
2432
|
-
},
|
2433
|
-
"reasoning": false,
|
2434
|
-
"releasedAt": "2023-11-28",
|
2435
|
-
"vision": false,
|
2436
|
-
},
|
2437
|
-
{
|
2438
|
-
"contextWindowTokens": 8192,
|
2439
|
-
"description": "OpenChat 7B is a library of open-source language models, fine-tuned with "C-RLFT (Conditioned Reinforcement Learning Fine-Tuning)" - a strategy inspired by offline reinforcement learning. It has been trained on mixed-quality data without preference labels.
|
2440
|
-
|
2441
|
-
- For OpenChat fine-tuned on Mistral 7B, check out [OpenChat 7B](/models/openchat/openchat-7b).
|
2442
|
-
- For OpenChat fine-tuned on Llama 8B, check out [OpenChat 8B](/models/openchat/openchat-8b).
|
2443
|
-
|
2444
|
-
#open-source",
|
2445
|
-
"displayName": "OpenChat 3.5 7B",
|
2446
|
-
"enabled": false,
|
2447
|
-
"functionCall": false,
|
2448
|
-
"id": "openchat/openchat-7b",
|
2449
|
-
"maxTokens": undefined,
|
2450
|
-
"pricing": {
|
2451
|
-
"input": 0.055,
|
2452
|
-
"output": 0.055,
|
2453
|
-
},
|
2454
|
-
"reasoning": false,
|
2455
|
-
"releasedAt": "2023-11-28",
|
2456
|
-
"vision": false,
|
2457
|
-
},
|
2458
|
-
{
|
2459
|
-
"contextWindowTokens": 8192,
|
2460
|
-
"description": "A collab between IkariDev and Undi. This merge is suitable for RP, ERP, and general knowledge.
|
2461
|
-
|
2462
|
-
#merge #uncensored",
|
2463
|
-
"displayName": "Noromaid 20B",
|
2464
|
-
"enabled": false,
|
2465
|
-
"functionCall": false,
|
2466
|
-
"id": "neversleep/noromaid-20b",
|
2467
|
-
"maxTokens": 2048,
|
2468
|
-
"pricing": {
|
2469
|
-
"input": 1.5,
|
2470
|
-
"output": 2.25,
|
2471
|
-
},
|
2472
|
-
"reasoning": false,
|
2473
|
-
"releasedAt": "2023-11-26",
|
2474
|
-
"vision": false,
|
2475
|
-
},
|
2476
|
-
{
|
2477
|
-
"contextWindowTokens": 100000,
|
2478
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.",
|
2479
|
-
"displayName": "Anthropic: Claude Instant v1.1",
|
2480
|
-
"enabled": false,
|
2481
|
-
"functionCall": false,
|
2482
|
-
"id": "anthropic/claude-instant-1.1",
|
2483
|
-
"maxTokens": 2048,
|
2484
|
-
"pricing": {
|
2485
|
-
"input": 0.8,
|
2486
|
-
"output": 2.4,
|
2487
|
-
},
|
2488
|
-
"reasoning": false,
|
2489
|
-
"releasedAt": "2023-11-22",
|
2490
|
-
"vision": false,
|
2491
|
-
},
|
2492
|
-
{
|
2493
|
-
"contextWindowTokens": 200000,
|
2494
|
-
"description": "Claude 2 delivers advancements in key capabilities for enterprises—including an industry-leading 200K token context window, significant reductions in rates of model hallucination, system prompts and a new beta feature: tool use.",
|
2495
|
-
"displayName": "Anthropic: Claude v2.1",
|
2496
|
-
"enabled": false,
|
2497
|
-
"functionCall": false,
|
2498
|
-
"id": "anthropic/claude-2.1",
|
2499
|
-
"maxTokens": 4096,
|
2500
|
-
"pricing": {
|
2501
|
-
"input": 8,
|
2502
|
-
"output": 24,
|
2503
|
-
},
|
2504
|
-
"reasoning": false,
|
2505
|
-
"releasedAt": "2023-11-22",
|
2506
|
-
"vision": false,
|
2507
|
-
},
|
2508
|
-
{
|
2509
|
-
"contextWindowTokens": 200000,
|
2510
|
-
"description": "Claude 2 delivers advancements in key capabilities for enterprises—including an industry-leading 200K token context window, significant reductions in rates of model hallucination, system prompts and a new beta feature: tool use.
|
2511
|
-
|
2512
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-2.1) variant._",
|
2513
|
-
"displayName": "Anthropic: Claude v2.1 (self-moderated)",
|
2514
|
-
"enabled": false,
|
2515
|
-
"functionCall": false,
|
2516
|
-
"id": "anthropic/claude-2.1:beta",
|
2517
|
-
"maxTokens": 4096,
|
2518
|
-
"pricing": {
|
2519
|
-
"input": 8,
|
2520
|
-
"output": 24,
|
2521
|
-
},
|
2522
|
-
"reasoning": false,
|
2523
|
-
"releasedAt": "2023-11-22",
|
2524
|
-
"vision": false,
|
2525
|
-
},
|
2526
|
-
{
|
2527
|
-
"contextWindowTokens": 200000,
|
2528
|
-
"description": "Claude 2 delivers advancements in key capabilities for enterprises—including an industry-leading 200K token context window, significant reductions in rates of model hallucination, system prompts and a new beta feature: tool use.",
|
2529
|
-
"displayName": "Anthropic: Claude v2",
|
2530
|
-
"enabled": false,
|
2531
|
-
"functionCall": false,
|
2532
|
-
"id": "anthropic/claude-2",
|
2533
|
-
"maxTokens": 4096,
|
2534
|
-
"pricing": {
|
2535
|
-
"input": 8,
|
2536
|
-
"output": 24,
|
2537
|
-
},
|
2538
|
-
"reasoning": false,
|
2539
|
-
"releasedAt": "2023-11-22",
|
2540
|
-
"vision": false,
|
2541
|
-
},
|
2542
|
-
{
|
2543
|
-
"contextWindowTokens": 200000,
|
2544
|
-
"description": "Claude 2 delivers advancements in key capabilities for enterprises—including an industry-leading 200K token context window, significant reductions in rates of model hallucination, system prompts and a new beta feature: tool use.
|
2545
|
-
|
2546
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-2) variant._",
|
2547
|
-
"displayName": "Anthropic: Claude v2 (self-moderated)",
|
2548
|
-
"enabled": false,
|
2549
|
-
"functionCall": false,
|
2550
|
-
"id": "anthropic/claude-2:beta",
|
2551
|
-
"maxTokens": 4096,
|
2552
|
-
"pricing": {
|
2553
|
-
"input": 8,
|
2554
|
-
"output": 24,
|
2555
|
-
},
|
2556
|
-
"reasoning": false,
|
2557
|
-
"releasedAt": "2023-11-22",
|
2558
|
-
"vision": false,
|
2559
|
-
},
|
2560
|
-
{
|
2561
|
-
"contextWindowTokens": 4096,
|
2562
|
-
"description": "A continuation of [OpenHermes 2 model](/models/teknium/openhermes-2-mistral-7b), trained on additional code datasets.
|
2563
|
-
Potentially the most interesting finding from training on a good ratio (est. of around 7-14% of the total dataset) of code instruction was that it has boosted several non-code benchmarks, including TruthfulQA, AGIEval, and GPT4All suite. It did however reduce BigBench benchmark score, but the net gain overall is significant.",
|
2564
|
-
"displayName": "OpenHermes 2.5 Mistral 7B",
|
2565
|
-
"enabled": false,
|
2566
|
-
"functionCall": false,
|
2567
|
-
"id": "teknium/openhermes-2.5-mistral-7b",
|
2568
|
-
"maxTokens": undefined,
|
2569
|
-
"pricing": {
|
2570
|
-
"input": 0.17,
|
2571
|
-
"output": 0.17,
|
2572
|
-
},
|
2573
|
-
"reasoning": false,
|
2574
|
-
"releasedAt": "2023-11-20",
|
2575
|
-
"vision": false,
|
2576
|
-
},
|
2577
|
-
{
|
2578
|
-
"contextWindowTokens": 128000,
|
2579
|
-
"description": "Ability to understand images, in addition to all other [GPT-4 Turbo capabilties](/models/openai/gpt-4-turbo). Training data: up to Apr 2023.
|
2580
|
-
|
2581
|
-
**Note:** heavily rate limited by OpenAI while in preview.
|
2582
|
-
|
2583
|
-
#multimodal",
|
2584
|
-
"displayName": "OpenAI: GPT-4 Vision",
|
2585
|
-
"enabled": false,
|
2586
|
-
"functionCall": false,
|
2587
|
-
"id": "openai/gpt-4-vision-preview",
|
2588
|
-
"maxTokens": 4096,
|
2589
|
-
"pricing": {
|
2590
|
-
"input": 10,
|
2591
|
-
"output": 30,
|
2592
|
-
},
|
2593
|
-
"reasoning": false,
|
2594
|
-
"releasedAt": "2023-11-13",
|
2595
|
-
"vision": true,
|
2596
|
-
},
|
2597
|
-
{
|
2598
|
-
"contextWindowTokens": 4096,
|
2599
|
-
"description": "A Mythomax/MLewd_13B-style merge of selected 70B models.
|
2600
|
-
A multi-model merge of several LLaMA2 70B finetunes for roleplaying and creative work. The goal was to create a model that combines creativity with intelligence for an enhanced experience.
|
2601
|
-
|
2602
|
-
#merge #uncensored",
|
2603
|
-
"displayName": "lzlv 70B",
|
2604
|
-
"enabled": false,
|
2605
|
-
"functionCall": false,
|
2606
|
-
"id": "lizpreciatior/lzlv-70b-fp16-hf",
|
2607
|
-
"maxTokens": undefined,
|
2608
|
-
"pricing": {
|
2609
|
-
"input": 0.35,
|
2610
|
-
"output": 0.4,
|
2611
|
-
},
|
2612
|
-
"reasoning": false,
|
2613
|
-
"releasedAt": "2023-11-12",
|
2614
|
-
"vision": false,
|
2615
|
-
},
|
2616
|
-
{
|
2617
|
-
"contextWindowTokens": 6144,
|
2618
|
-
"description": "A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale.
|
2619
|
-
|
2620
|
-
Credits to
|
2621
|
-
- [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge the model - [mergekit](https://github.com/cg123/mergekit).
|
2622
|
-
- [@Undi95](https://huggingface.co/Undi95) for helping with the merge ratios.
|
2623
|
-
|
2624
|
-
#merge",
|
2625
|
-
"displayName": "Goliath 120B",
|
2626
|
-
"enabled": false,
|
2627
|
-
"functionCall": false,
|
2628
|
-
"id": "alpindale/goliath-120b",
|
2629
|
-
"maxTokens": 400,
|
2630
|
-
"pricing": {
|
2631
|
-
"input": 9.375,
|
2632
|
-
"output": 9.375,
|
2633
|
-
},
|
2634
|
-
"reasoning": false,
|
2635
|
-
"releasedAt": "2023-11-10",
|
2636
|
-
"vision": false,
|
2637
|
-
},
|
2638
|
-
{
|
2639
|
-
"contextWindowTokens": 4096,
|
2640
|
-
"description": "A wild 7B parameter model that merges several models using the new task_arithmetic merge method from mergekit.
|
2641
|
-
List of merged models:
|
2642
|
-
- NousResearch/Nous-Capybara-7B-V1.9
|
2643
|
-
- [HuggingFaceH4/zephyr-7b-beta](/models/huggingfaceh4/zephyr-7b-beta)
|
2644
|
-
- lemonilia/AshhLimaRP-Mistral-7B
|
2645
|
-
- Vulkane/120-Days-of-Sodom-LoRA-Mistral-7b
|
2646
|
-
- Undi95/Mistral-pippa-sharegpt-7b-qlora
|
2647
|
-
|
2648
|
-
#merge #uncensored
|
2649
|
-
|
2650
|
-
_These are free, rate-limited endpoints for [Toppy M 7B](/models/undi95/toppy-m-7b). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
2651
|
-
"displayName": "Toppy M 7B (free)",
|
2652
|
-
"enabled": false,
|
2653
|
-
"functionCall": false,
|
2654
|
-
"id": "undi95/toppy-m-7b:free",
|
2655
|
-
"maxTokens": 2048,
|
2656
|
-
"pricing": {
|
2657
|
-
"input": 0,
|
2658
|
-
"output": 0,
|
2659
|
-
},
|
2660
|
-
"reasoning": false,
|
2661
|
-
"releasedAt": "2023-11-10",
|
2662
|
-
"vision": false,
|
2663
|
-
},
|
2664
|
-
{
|
2665
|
-
"contextWindowTokens": 4096,
|
2666
|
-
"description": "A wild 7B parameter model that merges several models using the new task_arithmetic merge method from mergekit.
|
2667
|
-
List of merged models:
|
2668
|
-
- NousResearch/Nous-Capybara-7B-V1.9
|
2669
|
-
- [HuggingFaceH4/zephyr-7b-beta](/models/huggingfaceh4/zephyr-7b-beta)
|
2670
|
-
- lemonilia/AshhLimaRP-Mistral-7B
|
2671
|
-
- Vulkane/120-Days-of-Sodom-LoRA-Mistral-7b
|
2672
|
-
- Undi95/Mistral-pippa-sharegpt-7b-qlora
|
2673
|
-
|
2674
|
-
#merge #uncensored",
|
2675
|
-
"displayName": "Toppy M 7B",
|
2676
|
-
"enabled": false,
|
2677
|
-
"functionCall": false,
|
2678
|
-
"id": "undi95/toppy-m-7b",
|
2679
|
-
"maxTokens": undefined,
|
2680
|
-
"pricing": {
|
2681
|
-
"input": 0.07,
|
2682
|
-
"output": 0.07,
|
2683
|
-
},
|
2684
|
-
"reasoning": false,
|
2685
|
-
"releasedAt": "2023-11-10",
|
2686
|
-
"vision": false,
|
2687
|
-
},
|
2688
|
-
{
|
2689
|
-
"contextWindowTokens": 4096,
|
2690
|
-
"description": "A wild 7B parameter model that merges several models using the new task_arithmetic merge method from mergekit.
|
2691
|
-
List of merged models:
|
2692
|
-
- NousResearch/Nous-Capybara-7B-V1.9
|
2693
|
-
- [HuggingFaceH4/zephyr-7b-beta](/models/huggingfaceh4/zephyr-7b-beta)
|
2694
|
-
- lemonilia/AshhLimaRP-Mistral-7B
|
2695
|
-
- Vulkane/120-Days-of-Sodom-LoRA-Mistral-7b
|
2696
|
-
- Undi95/Mistral-pippa-sharegpt-7b-qlora
|
2697
|
-
|
2698
|
-
#merge #uncensored
|
2699
|
-
|
2700
|
-
_These are higher-throughput endpoints for [Toppy M 7B](/models/undi95/toppy-m-7b). They may have higher prices._",
|
2701
|
-
"displayName": "Toppy M 7B (nitro)",
|
2702
|
-
"enabled": false,
|
2703
|
-
"functionCall": false,
|
2704
|
-
"id": "undi95/toppy-m-7b:nitro",
|
2705
|
-
"maxTokens": undefined,
|
2706
|
-
"pricing": {
|
2707
|
-
"input": 0.07,
|
2708
|
-
"output": 0.07,
|
2709
|
-
},
|
2710
|
-
"reasoning": false,
|
2711
|
-
"releasedAt": "2023-11-10",
|
2712
|
-
"vision": false,
|
2713
|
-
},
|
2714
|
-
{
|
2715
|
-
"contextWindowTokens": 200000,
|
2716
|
-
"description": "Depending on their size, subject, and complexity, your prompts will be sent to [Llama 3 70B Instruct](/models/meta-llama/llama-3-70b-instruct), [Claude 3.5 Sonnet (self-moderated)](/models/anthropic/claude-3.5-sonnet:beta) or [GPT-4o](/models/openai/gpt-4o). To see which model was used, visit [Activity](/activity).
|
2717
|
-
|
2718
|
-
A major redesign of this router is coming soon. Stay tuned on [Discord](https://discord.gg/fVyRaUDgxW) for updates.",
|
2719
|
-
"displayName": "Auto (best for prompt)",
|
2720
|
-
"enabled": true,
|
2721
|
-
"functionCall": false,
|
2722
|
-
"id": "openrouter/auto",
|
2723
|
-
"maxTokens": undefined,
|
2724
|
-
"pricing": {
|
2725
|
-
"input": undefined,
|
2726
|
-
"output": undefined,
|
2727
|
-
},
|
2728
|
-
"reasoning": false,
|
2729
|
-
"releasedAt": "2023-11-08",
|
2730
|
-
"vision": false,
|
2731
|
-
},
|
2732
|
-
{
|
2733
|
-
"contextWindowTokens": 128000,
|
2734
|
-
"description": "The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.
|
2735
|
-
|
2736
|
-
Training data: up to April 2023.",
|
2737
|
-
"displayName": "OpenAI: GPT-4 Turbo (older v1106)",
|
2738
|
-
"enabled": false,
|
2739
|
-
"functionCall": true,
|
2740
|
-
"id": "openai/gpt-4-1106-preview",
|
2741
|
-
"maxTokens": 4096,
|
2742
|
-
"pricing": {
|
2743
|
-
"input": 10,
|
2744
|
-
"output": 30,
|
2745
|
-
},
|
2746
|
-
"reasoning": false,
|
2747
|
-
"releasedAt": "2023-11-06",
|
2748
|
-
"vision": false,
|
2749
|
-
},
|
2750
|
-
{
|
2751
|
-
"contextWindowTokens": 16385,
|
2752
|
-
"description": "An older GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Sep 2021.",
|
2753
|
-
"displayName": "OpenAI: GPT-3.5 Turbo 16k (older v1106)",
|
2754
|
-
"enabled": false,
|
2755
|
-
"functionCall": true,
|
2756
|
-
"id": "openai/gpt-3.5-turbo-1106",
|
2757
|
-
"maxTokens": 4096,
|
2758
|
-
"pricing": {
|
2759
|
-
"input": 1,
|
2760
|
-
"output": 2,
|
2761
|
-
},
|
2762
|
-
"reasoning": false,
|
2763
|
-
"releasedAt": "2023-11-06",
|
2764
|
-
"vision": false,
|
2765
|
-
},
|
2766
|
-
{
|
2767
|
-
"contextWindowTokens": 131040,
|
2768
|
-
"description": "PaLM 2 fine-tuned for chatbot conversations that help with code-related questions.",
|
2769
|
-
"displayName": "Google: PaLM 2 Code Chat 32k",
|
2770
|
-
"enabled": false,
|
2771
|
-
"functionCall": false,
|
2772
|
-
"id": "google/palm-2-codechat-bison-32k",
|
2773
|
-
"maxTokens": 32768,
|
2774
|
-
"pricing": {
|
2775
|
-
"input": 0.25,
|
2776
|
-
"output": 0.5,
|
2777
|
-
},
|
2778
|
-
"reasoning": false,
|
2779
|
-
"releasedAt": "2023-11-03",
|
2780
|
-
"vision": false,
|
2781
|
-
},
|
2782
|
-
{
|
2783
|
-
"contextWindowTokens": 131040,
|
2784
|
-
"description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.",
|
2785
|
-
"displayName": "Google: PaLM 2 Chat 32k",
|
2786
|
-
"enabled": false,
|
2787
|
-
"functionCall": false,
|
2788
|
-
"id": "google/palm-2-chat-bison-32k",
|
2789
|
-
"maxTokens": 32768,
|
2790
|
-
"pricing": {
|
2791
|
-
"input": 0.25,
|
2792
|
-
"output": 0.5,
|
2793
|
-
},
|
2794
|
-
"reasoning": false,
|
2795
|
-
"releasedAt": "2023-11-03",
|
2796
|
-
"vision": false,
|
2797
|
-
},
|
2798
|
-
{
|
2799
|
-
"contextWindowTokens": 4096,
|
2800
|
-
"description": "A Llama 2 70B fine-tune using synthetic data (the Airoboros dataset).
|
2801
|
-
|
2802
|
-
Currently based on [jondurbin/airoboros-l2-70b](https://huggingface.co/jondurbin/airoboros-l2-70b-2.2.1), but might get updated in the future.",
|
2803
|
-
"displayName": "Airoboros 70B",
|
2804
|
-
"enabled": false,
|
2805
|
-
"functionCall": false,
|
2806
|
-
"id": "jondurbin/airoboros-l2-70b",
|
2807
|
-
"maxTokens": undefined,
|
2808
|
-
"pricing": {
|
2809
|
-
"input": 0.5,
|
2810
|
-
"output": 0.5,
|
2811
|
-
},
|
2812
|
-
"reasoning": false,
|
2813
|
-
"releasedAt": "2023-10-29",
|
2814
|
-
"vision": false,
|
2815
|
-
},
|
2816
|
-
{
|
2817
|
-
"contextWindowTokens": 8192,
|
2818
|
-
"description": "Xwin-LM aims to develop and open-source alignment tech for LLMs. Our first release, built-upon on the [Llama2](/models/\${Model.Llama_2_13B_Chat}) base models, ranked TOP-1 on AlpacaEval. Notably, it's the first to surpass [GPT-4](/models/\${Model.GPT_4}) on this benchmark. The project will be continuously updated.",
|
2819
|
-
"displayName": "Xwin 70B",
|
2820
|
-
"enabled": false,
|
2821
|
-
"functionCall": false,
|
2822
|
-
"id": "xwin-lm/xwin-lm-70b",
|
2823
|
-
"maxTokens": 400,
|
2824
|
-
"pricing": {
|
2825
|
-
"input": 3.75,
|
2826
|
-
"output": 3.75,
|
2827
|
-
},
|
2828
|
-
"reasoning": false,
|
2829
|
-
"releasedAt": "2023-10-15",
|
2830
|
-
"vision": false,
|
2831
|
-
},
|
2832
|
-
{
|
2833
|
-
"contextWindowTokens": 4096,
|
2834
|
-
"description": "A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.",
|
2835
|
-
"displayName": "Mistral: Mistral 7B Instruct v0.1",
|
2836
|
-
"enabled": false,
|
2837
|
-
"functionCall": true,
|
2838
|
-
"id": "mistralai/mistral-7b-instruct-v0.1",
|
2839
|
-
"maxTokens": undefined,
|
2840
|
-
"pricing": {
|
2841
|
-
"input": 0.055,
|
2842
|
-
"output": 0.055,
|
2843
|
-
},
|
2844
|
-
"reasoning": false,
|
2845
|
-
"releasedAt": "2023-09-28",
|
2846
|
-
"vision": false,
|
2847
|
-
},
|
2848
|
-
{
|
2849
|
-
"contextWindowTokens": 4095,
|
2850
|
-
"description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.",
|
2851
|
-
"displayName": "OpenAI: GPT-3.5 Turbo Instruct",
|
2852
|
-
"enabled": false,
|
2853
|
-
"functionCall": false,
|
2854
|
-
"id": "openai/gpt-3.5-turbo-instruct",
|
2855
|
-
"maxTokens": 4096,
|
2856
|
-
"pricing": {
|
2857
|
-
"input": 1.5,
|
2858
|
-
"output": 2,
|
2859
|
-
},
|
2860
|
-
"reasoning": false,
|
2861
|
-
"releasedAt": "2023-09-28",
|
2862
|
-
"vision": false,
|
2863
|
-
},
|
2864
|
-
{
|
2865
|
-
"contextWindowTokens": 8192,
|
2866
|
-
"description": "A blend of the new Pygmalion-13b and MythoMax. #merge",
|
2867
|
-
"displayName": "Pygmalion: Mythalion 13B",
|
2868
|
-
"enabled": false,
|
2869
|
-
"functionCall": false,
|
2870
|
-
"id": "pygmalionai/mythalion-13b",
|
2871
|
-
"maxTokens": 400,
|
2872
|
-
"pricing": {
|
2873
|
-
"input": 1.125,
|
2874
|
-
"output": 1.125,
|
2875
|
-
},
|
2876
|
-
"reasoning": false,
|
2877
|
-
"releasedAt": "2023-09-02",
|
2878
|
-
"vision": false,
|
2879
|
-
},
|
2880
|
-
{
|
2881
|
-
"contextWindowTokens": 32767,
|
2882
|
-
"description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.",
|
2883
|
-
"displayName": "OpenAI: GPT-4 32k (older v0314)",
|
2884
|
-
"enabled": false,
|
2885
|
-
"functionCall": true,
|
2886
|
-
"id": "openai/gpt-4-32k-0314",
|
2887
|
-
"maxTokens": 4096,
|
2888
|
-
"pricing": {
|
2889
|
-
"input": 60,
|
2890
|
-
"output": 120,
|
2891
|
-
},
|
2892
|
-
"reasoning": false,
|
2893
|
-
"releasedAt": "2023-08-28",
|
2894
|
-
"vision": false,
|
2895
|
-
},
|
2896
|
-
{
|
2897
|
-
"contextWindowTokens": 32767,
|
2898
|
-
"description": "GPT-4-32k is an extended version of GPT-4, with the same capabilities but quadrupled context length, allowing for processing up to 40 pages of text in a single pass. This is particularly beneficial for handling longer content like interacting with PDFs without an external vector database. Training data: up to Sep 2021.",
|
2899
|
-
"displayName": "OpenAI: GPT-4 32k",
|
2900
|
-
"enabled": false,
|
2901
|
-
"functionCall": true,
|
2902
|
-
"id": "openai/gpt-4-32k",
|
2903
|
-
"maxTokens": 4096,
|
2904
|
-
"pricing": {
|
2905
|
-
"input": 60,
|
2906
|
-
"output": 120,
|
2907
|
-
},
|
2908
|
-
"reasoning": false,
|
2909
|
-
"releasedAt": "2023-08-28",
|
2910
|
-
"vision": false,
|
2911
|
-
},
|
2912
|
-
{
|
2913
|
-
"contextWindowTokens": 16385,
|
2914
|
-
"description": "This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up to Sep 2021.",
|
2915
|
-
"displayName": "OpenAI: GPT-3.5 Turbo 16k",
|
2916
|
-
"enabled": false,
|
2917
|
-
"functionCall": true,
|
2918
|
-
"id": "openai/gpt-3.5-turbo-16k",
|
2919
|
-
"maxTokens": 4096,
|
2920
|
-
"pricing": {
|
2921
|
-
"input": 3,
|
2922
|
-
"output": 4,
|
2923
|
-
},
|
2924
|
-
"reasoning": false,
|
2925
|
-
"releasedAt": "2023-08-28",
|
2926
|
-
"vision": false,
|
2927
|
-
},
|
2928
|
-
{
|
2929
|
-
"contextWindowTokens": 4096,
|
2930
|
-
"description": "A state-of-the-art language model fine-tuned on over 300k instructions by Nous Research, with Teknium and Emozilla leading the fine tuning process.",
|
2931
|
-
"displayName": "Nous: Hermes 13B",
|
2932
|
-
"enabled": false,
|
2933
|
-
"functionCall": false,
|
2934
|
-
"id": "nousresearch/nous-hermes-llama2-13b",
|
2935
|
-
"maxTokens": undefined,
|
2936
|
-
"pricing": {
|
2937
|
-
"input": 0.17,
|
2938
|
-
"output": 0.17,
|
2939
|
-
},
|
2940
|
-
"reasoning": false,
|
2941
|
-
"releasedAt": "2023-08-20",
|
2942
|
-
"vision": false,
|
2943
|
-
},
|
2944
|
-
{
|
2945
|
-
"contextWindowTokens": 4096,
|
2946
|
-
"description": "Zephyr is a series of language models that are trained to act as helpful assistants. Zephyr-7B-β is the second model in the series, and is a fine-tuned version of [mistralai/Mistral-7B-v0.1](/models/mistralai/mistral-7b-instruct-v0.1) that was trained on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO).
|
2947
|
-
|
2948
|
-
_These are free, rate-limited endpoints for [Zephyr 7B](/models/huggingfaceh4/zephyr-7b-beta). Outputs may be cached. Read about rate limits [here](/docs/limits)._",
|
2949
|
-
"displayName": "Hugging Face: Zephyr 7B (free)",
|
2950
|
-
"enabled": false,
|
2951
|
-
"functionCall": false,
|
2952
|
-
"id": "huggingfaceh4/zephyr-7b-beta:free",
|
2953
|
-
"maxTokens": 2048,
|
2954
|
-
"pricing": {
|
2955
|
-
"input": 0,
|
2956
|
-
"output": 0,
|
2957
|
-
},
|
2958
|
-
"reasoning": false,
|
2959
|
-
"releasedAt": "2023-08-02",
|
2960
|
-
"vision": false,
|
2961
|
-
},
|
2962
|
-
{
|
2963
|
-
"contextWindowTokens": 8000,
|
2964
|
-
"description": "An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.",
|
2965
|
-
"displayName": "Mancer: Weaver (alpha)",
|
2966
|
-
"enabled": false,
|
2967
|
-
"functionCall": false,
|
2968
|
-
"id": "mancer/weaver",
|
2969
|
-
"maxTokens": 1000,
|
2970
|
-
"pricing": {
|
2971
|
-
"input": 1.875,
|
2972
|
-
"output": 2.25,
|
2973
|
-
},
|
2974
|
-
"reasoning": false,
|
2975
|
-
"releasedAt": "2023-08-02",
|
2976
|
-
"vision": false,
|
2977
|
-
},
|
2978
|
-
{
|
2979
|
-
"contextWindowTokens": 100000,
|
2980
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.",
|
2981
|
-
"displayName": "Anthropic: Claude Instant v1.0",
|
2982
|
-
"enabled": false,
|
2983
|
-
"functionCall": false,
|
2984
|
-
"id": "anthropic/claude-instant-1.0",
|
2985
|
-
"maxTokens": 4096,
|
2986
|
-
"pricing": {
|
2987
|
-
"input": 0.8,
|
2988
|
-
"output": 2.4,
|
2989
|
-
},
|
2990
|
-
"reasoning": false,
|
2991
|
-
"releasedAt": "2023-07-28",
|
2992
|
-
"vision": false,
|
2993
|
-
},
|
2994
|
-
{
|
2995
|
-
"contextWindowTokens": 100000,
|
2996
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.",
|
2997
|
-
"displayName": "Anthropic: Claude v1.2",
|
2998
|
-
"enabled": false,
|
2999
|
-
"functionCall": false,
|
3000
|
-
"id": "anthropic/claude-1.2",
|
3001
|
-
"maxTokens": 4096,
|
3002
|
-
"pricing": {
|
3003
|
-
"input": 8,
|
3004
|
-
"output": 24,
|
3005
|
-
},
|
3006
|
-
"reasoning": false,
|
3007
|
-
"releasedAt": "2023-07-28",
|
3008
|
-
"vision": false,
|
3009
|
-
},
|
3010
|
-
{
|
3011
|
-
"contextWindowTokens": 100000,
|
3012
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.",
|
3013
|
-
"displayName": "Anthropic: Claude v1",
|
3014
|
-
"enabled": false,
|
3015
|
-
"functionCall": false,
|
3016
|
-
"id": "anthropic/claude-1",
|
3017
|
-
"maxTokens": 4096,
|
3018
|
-
"pricing": {
|
3019
|
-
"input": 8,
|
3020
|
-
"output": 24,
|
3021
|
-
},
|
3022
|
-
"reasoning": false,
|
3023
|
-
"releasedAt": "2023-07-28",
|
3024
|
-
"vision": false,
|
3025
|
-
},
|
3026
|
-
{
|
3027
|
-
"contextWindowTokens": 100000,
|
3028
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.",
|
3029
|
-
"displayName": "Anthropic: Claude Instant v1",
|
3030
|
-
"enabled": false,
|
3031
|
-
"functionCall": false,
|
3032
|
-
"id": "anthropic/claude-instant-1",
|
3033
|
-
"maxTokens": 4096,
|
3034
|
-
"pricing": {
|
3035
|
-
"input": 0.8,
|
3036
|
-
"output": 2.4,
|
3037
|
-
},
|
3038
|
-
"reasoning": false,
|
3039
|
-
"releasedAt": "2023-07-28",
|
3040
|
-
"vision": false,
|
3041
|
-
},
|
3042
|
-
{
|
3043
|
-
"contextWindowTokens": 100000,
|
3044
|
-
"description": "Anthropic's model for low-latency, high throughput text generation. Supports hundreds of pages of text.
|
3045
|
-
|
3046
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-instant-1) variant._",
|
3047
|
-
"displayName": "Anthropic: Claude Instant v1 (self-moderated)",
|
3048
|
-
"enabled": false,
|
3049
|
-
"functionCall": false,
|
3050
|
-
"id": "anthropic/claude-instant-1:beta",
|
3051
|
-
"maxTokens": 4096,
|
3052
|
-
"pricing": {
|
3053
|
-
"input": 0.8,
|
3054
|
-
"output": 2.4,
|
3055
|
-
},
|
3056
|
-
"reasoning": false,
|
3057
|
-
"releasedAt": "2023-07-28",
|
3058
|
-
"vision": false,
|
3059
|
-
},
|
3060
|
-
{
|
3061
|
-
"contextWindowTokens": 100000,
|
3062
|
-
"description": "Anthropic's flagship model. Superior performance on tasks that require complex reasoning. Supports hundreds of pages of text.",
|
3063
|
-
"displayName": "Anthropic: Claude v2.0",
|
3064
|
-
"enabled": false,
|
3065
|
-
"functionCall": false,
|
3066
|
-
"id": "anthropic/claude-2.0",
|
3067
|
-
"maxTokens": 4096,
|
3068
|
-
"pricing": {
|
3069
|
-
"input": 8,
|
3070
|
-
"output": 24,
|
3071
|
-
},
|
3072
|
-
"reasoning": false,
|
3073
|
-
"releasedAt": "2023-07-28",
|
3074
|
-
"vision": false,
|
3075
|
-
},
|
3076
|
-
{
|
3077
|
-
"contextWindowTokens": 100000,
|
3078
|
-
"description": "Anthropic's flagship model. Superior performance on tasks that require complex reasoning. Supports hundreds of pages of text.
|
3079
|
-
|
3080
|
-
_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/models/anthropic/claude-2.0) variant._",
|
3081
|
-
"displayName": "Anthropic: Claude v2.0 (self-moderated)",
|
3082
|
-
"enabled": false,
|
3083
|
-
"functionCall": false,
|
3084
|
-
"id": "anthropic/claude-2.0:beta",
|
3085
|
-
"maxTokens": 4096,
|
3086
|
-
"pricing": {
|
3087
|
-
"input": 8,
|
3088
|
-
"output": 24,
|
3089
|
-
},
|
3090
|
-
"reasoning": false,
|
3091
|
-
"releasedAt": "2023-07-28",
|
3092
|
-
"vision": false,
|
3093
|
-
},
|
3094
|
-
{
|
3095
|
-
"contextWindowTokens": 4096,
|
3096
|
-
"description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge",
|
3097
|
-
"displayName": "ReMM SLERP 13B",
|
3098
|
-
"enabled": false,
|
3099
|
-
"functionCall": false,
|
3100
|
-
"id": "undi95/remm-slerp-l2-13b",
|
3101
|
-
"maxTokens": 400,
|
3102
|
-
"pricing": {
|
3103
|
-
"input": 1.125,
|
3104
|
-
"output": 1.125,
|
3105
|
-
},
|
3106
|
-
"reasoning": false,
|
3107
|
-
"releasedAt": "2023-07-22",
|
3108
|
-
"vision": false,
|
3109
|
-
},
|
3110
|
-
{
|
3111
|
-
"contextWindowTokens": 6144,
|
3112
|
-
"description": "A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge
|
3113
|
-
|
3114
|
-
_These are extended-context endpoints for [ReMM SLERP 13B](/models/undi95/remm-slerp-l2-13b). They may have higher prices._",
|
3115
|
-
"displayName": "ReMM SLERP 13B (extended)",
|
3116
|
-
"enabled": false,
|
3117
|
-
"functionCall": false,
|
3118
|
-
"id": "undi95/remm-slerp-l2-13b:extended",
|
3119
|
-
"maxTokens": 400,
|
3120
|
-
"pricing": {
|
3121
|
-
"input": 1.125,
|
3122
|
-
"output": 1.125,
|
3123
|
-
},
|
3124
|
-
"reasoning": false,
|
3125
|
-
"releasedAt": "2023-07-22",
|
3126
|
-
"vision": false,
|
3127
|
-
},
|
3128
|
-
{
|
3129
|
-
"contextWindowTokens": 28672,
|
3130
|
-
"description": "PaLM 2 fine-tuned for chatbot conversations that help with code-related questions.",
|
3131
|
-
"displayName": "Google: PaLM 2 Code Chat",
|
3132
|
-
"enabled": false,
|
3133
|
-
"functionCall": false,
|
3134
|
-
"id": "google/palm-2-codechat-bison",
|
3135
|
-
"maxTokens": 4096,
|
3136
|
-
"pricing": {
|
3137
|
-
"input": 0.25,
|
3138
|
-
"output": 0.5,
|
3139
|
-
},
|
3140
|
-
"reasoning": false,
|
3141
|
-
"releasedAt": "2023-07-20",
|
3142
|
-
"vision": false,
|
3143
|
-
},
|
3144
|
-
{
|
3145
|
-
"contextWindowTokens": 36864,
|
3146
|
-
"description": "PaLM 2 is a language model by Google with improved multilingual, reasoning and coding capabilities.",
|
3147
|
-
"displayName": "Google: PaLM 2 Chat",
|
3148
|
-
"enabled": false,
|
3149
|
-
"functionCall": false,
|
3150
|
-
"id": "google/palm-2-chat-bison",
|
3151
|
-
"maxTokens": 4096,
|
3152
|
-
"pricing": {
|
3153
|
-
"input": 0.25,
|
3154
|
-
"output": 0.5,
|
3155
|
-
},
|
3156
|
-
"reasoning": false,
|
3157
|
-
"releasedAt": "2023-07-20",
|
3158
|
-
"vision": false,
|
3159
|
-
},
|
3160
|
-
{
|
3161
|
-
"contextWindowTokens": 4096,
|
3162
|
-
"description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge",
|
3163
|
-
"displayName": "MythoMax 13B",
|
3164
|
-
"enabled": false,
|
3165
|
-
"functionCall": false,
|
3166
|
-
"id": "gryphe/mythomax-l2-13b",
|
3167
|
-
"maxTokens": undefined,
|
3168
|
-
"pricing": {
|
3169
|
-
"input": 0.1,
|
3170
|
-
"output": 0.1,
|
3171
|
-
},
|
3172
|
-
"reasoning": false,
|
3173
|
-
"releasedAt": "2023-07-02",
|
3174
|
-
"vision": false,
|
3175
|
-
},
|
3176
|
-
{
|
3177
|
-
"contextWindowTokens": 4096,
|
3178
|
-
"description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge
|
3179
|
-
|
3180
|
-
_These are higher-throughput endpoints for [MythoMax 13B](/models/gryphe/mythomax-l2-13b). They may have higher prices._",
|
3181
|
-
"displayName": "MythoMax 13B (nitro)",
|
3182
|
-
"enabled": false,
|
3183
|
-
"functionCall": false,
|
3184
|
-
"id": "gryphe/mythomax-l2-13b:nitro",
|
3185
|
-
"maxTokens": undefined,
|
3186
|
-
"pricing": {
|
3187
|
-
"input": 0.2,
|
3188
|
-
"output": 0.2,
|
3189
|
-
},
|
3190
|
-
"reasoning": false,
|
3191
|
-
"releasedAt": "2023-07-02",
|
3192
|
-
"vision": false,
|
3193
|
-
},
|
3194
|
-
{
|
3195
|
-
"contextWindowTokens": 8192,
|
3196
|
-
"description": "One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge
|
3197
|
-
|
3198
|
-
_These are extended-context endpoints for [MythoMax 13B](/models/gryphe/mythomax-l2-13b). They may have higher prices._",
|
3199
|
-
"displayName": "MythoMax 13B (extended)",
|
3200
|
-
"enabled": false,
|
3201
|
-
"functionCall": false,
|
3202
|
-
"id": "gryphe/mythomax-l2-13b:extended",
|
3203
|
-
"maxTokens": 400,
|
3204
|
-
"pricing": {
|
3205
|
-
"input": 1.125,
|
3206
|
-
"output": 1.125,
|
3207
|
-
},
|
3208
|
-
"reasoning": false,
|
3209
|
-
"releasedAt": "2023-07-02",
|
3210
|
-
"vision": false,
|
3211
|
-
},
|
3212
|
-
{
|
3213
|
-
"contextWindowTokens": 4096,
|
3214
|
-
"description": "A 13 billion parameter language model from Meta, fine tuned for chat completions",
|
3215
|
-
"displayName": "Meta: Llama v2 13B Chat",
|
3216
|
-
"enabled": false,
|
3217
|
-
"functionCall": false,
|
3218
|
-
"id": "meta-llama/llama-2-13b-chat",
|
3219
|
-
"maxTokens": undefined,
|
3220
|
-
"pricing": {
|
3221
|
-
"input": 0.27,
|
3222
|
-
"output": 0.27,
|
3223
|
-
},
|
3224
|
-
"reasoning": false,
|
3225
|
-
"releasedAt": "2023-06-20",
|
3226
|
-
"vision": false,
|
3227
|
-
},
|
3228
|
-
{
|
3229
|
-
"contextWindowTokens": 8191,
|
3230
|
-
"description": "GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021.",
|
3231
|
-
"displayName": "OpenAI: GPT-4 (older v0314)",
|
3232
|
-
"enabled": false,
|
3233
|
-
"functionCall": true,
|
3234
|
-
"id": "openai/gpt-4-0314",
|
3235
|
-
"maxTokens": 4096,
|
3236
|
-
"pricing": {
|
3237
|
-
"input": 30,
|
3238
|
-
"output": 60,
|
3239
|
-
},
|
3240
|
-
"reasoning": false,
|
3241
|
-
"releasedAt": "2023-05-28",
|
3242
|
-
"vision": false,
|
3243
|
-
},
|
3244
|
-
{
|
3245
|
-
"contextWindowTokens": 8191,
|
3246
|
-
"description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning capabilities. Training data: up to Sep 2021.",
|
3247
|
-
"displayName": "OpenAI: GPT-4",
|
3248
|
-
"enabled": false,
|
3249
|
-
"functionCall": true,
|
3250
|
-
"id": "openai/gpt-4",
|
3251
|
-
"maxTokens": 4096,
|
3252
|
-
"pricing": {
|
3253
|
-
"input": 30,
|
3254
|
-
"output": 60,
|
3255
|
-
},
|
3256
|
-
"reasoning": false,
|
3257
|
-
"releasedAt": "2023-05-28",
|
3258
|
-
"vision": false,
|
3259
|
-
},
|
3260
|
-
{
|
3261
|
-
"contextWindowTokens": 4095,
|
3262
|
-
"description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.
|
3263
|
-
|
3264
|
-
Training data up to Sep 2021.",
|
3265
|
-
"displayName": "OpenAI: GPT-3.5 Turbo (older v0301)",
|
3266
|
-
"enabled": false,
|
3267
|
-
"functionCall": false,
|
3268
|
-
"id": "openai/gpt-3.5-turbo-0301",
|
3269
|
-
"maxTokens": 4096,
|
3270
|
-
"pricing": {
|
3271
|
-
"input": 1,
|
3272
|
-
"output": 2,
|
3273
|
-
},
|
3274
|
-
"reasoning": false,
|
3275
|
-
"releasedAt": "2023-05-28",
|
3276
|
-
"vision": false,
|
3277
|
-
},
|
3278
|
-
{
|
3279
|
-
"contextWindowTokens": 16385,
|
3280
|
-
"description": "The latest GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Sep 2021.
|
3281
|
-
|
3282
|
-
This version has a higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls.",
|
3283
|
-
"displayName": "OpenAI: GPT-3.5 Turbo 16k",
|
3284
|
-
"enabled": false,
|
3285
|
-
"functionCall": true,
|
3286
|
-
"id": "openai/gpt-3.5-turbo-0125",
|
3287
|
-
"maxTokens": 4096,
|
3288
|
-
"pricing": {
|
3289
|
-
"input": 0.5,
|
3290
|
-
"output": 1.5,
|
3291
|
-
},
|
3292
|
-
"reasoning": false,
|
3293
|
-
"releasedAt": "2023-05-28",
|
3294
|
-
"vision": false,
|
3295
|
-
},
|
3296
|
-
{
|
3297
|
-
"contextWindowTokens": 16385,
|
3298
|
-
"description": "GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.
|
3299
|
-
|
3300
|
-
Training data up to Sep 2021.",
|
3301
|
-
"displayName": "OpenAI: GPT-3.5 Turbo",
|
3302
|
-
"enabled": false,
|
3303
|
-
"functionCall": true,
|
3304
|
-
"id": "openai/gpt-3.5-turbo",
|
3305
|
-
"maxTokens": 4096,
|
3306
|
-
"pricing": {
|
3307
|
-
"input": 0.5,
|
3308
|
-
"output": 1.5,
|
3309
|
-
},
|
3310
|
-
"reasoning": false,
|
3311
|
-
"releasedAt": "2023-05-28",
|
72
|
+
"releasedAt": "2024-09-06",
|
3312
73
|
"vision": false,
|
3313
74
|
},
|
3314
75
|
]
|