@lobehub/chat 1.111.1 → 1.111.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/rules/code-review.mdc +2 -19
- package/.cursor/rules/cursor-ux.mdc +0 -72
- package/.cursor/rules/project-introduce.mdc +5 -5
- package/.cursor/rules/react-component.mdc +92 -73
- package/.cursor/rules/rules-attach.mdc +28 -61
- package/.cursor/rules/system-role.mdc +8 -20
- package/.cursor/rules/typescript.mdc +55 -14
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +5 -0
- package/package.json +1 -1
- package/packages/types/src/aiModel.ts +67 -46
- package/packages/types/src/llm.ts +3 -3
- package/src/app/[variants]/(main)/discover/(detail)/model/[...slugs]/features/Details/Overview/ProviderList/index.tsx +23 -12
- package/src/app/[variants]/(main)/discover/(detail)/provider/[...slugs]/features/Details/Overview/ModelList/index.tsx +23 -10
- package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelItem.tsx +21 -12
- package/src/config/aiModels/ai21.ts +8 -4
- package/src/config/aiModels/ai360.ts +28 -14
- package/src/config/aiModels/aihubmix.ts +174 -86
- package/src/config/aiModels/anthropic.ts +97 -38
- package/src/config/aiModels/azure.ts +54 -32
- package/src/config/aiModels/azureai.ts +63 -37
- package/src/config/aiModels/baichuan.ts +24 -12
- package/src/config/aiModels/bedrock.ts +60 -30
- package/src/config/aiModels/cohere.ts +60 -30
- package/src/config/aiModels/deepseek.ts +10 -6
- package/src/config/aiModels/fireworksai.ts +88 -44
- package/src/config/aiModels/giteeai.ts +1 -1
- package/src/config/aiModels/github.ts +44 -26
- package/src/config/aiModels/google.ts +119 -68
- package/src/config/aiModels/groq.ts +48 -24
- package/src/config/aiModels/higress.ts +617 -310
- package/src/config/aiModels/hunyuan.ts +105 -54
- package/src/config/aiModels/infiniai.ts +104 -52
- package/src/config/aiModels/internlm.ts +16 -8
- package/src/config/aiModels/jina.ts +4 -2
- package/src/config/aiModels/minimax.ts +11 -10
- package/src/config/aiModels/mistral.ts +40 -20
- package/src/config/aiModels/moonshot.ts +42 -22
- package/src/config/aiModels/novita.ts +196 -98
- package/src/config/aiModels/openai.ts +270 -137
- package/src/config/aiModels/openrouter.ts +205 -100
- package/src/config/aiModels/perplexity.ts +36 -6
- package/src/config/aiModels/ppio.ts +76 -38
- package/src/config/aiModels/qwen.ts +257 -133
- package/src/config/aiModels/sambanova.ts +56 -28
- package/src/config/aiModels/sensenova.ts +100 -50
- package/src/config/aiModels/siliconcloud.ts +224 -112
- package/src/config/aiModels/stepfun.ts +44 -22
- package/src/config/aiModels/taichu.ts +8 -4
- package/src/config/aiModels/tencentcloud.ts +12 -6
- package/src/config/aiModels/upstage.ts +8 -4
- package/src/config/aiModels/v0.ts +15 -12
- package/src/config/aiModels/vertexai.ts +49 -27
- package/src/config/aiModels/volcengine.ts +110 -51
- package/src/config/aiModels/wenxin.ts +179 -73
- package/src/config/aiModels/xai.ts +33 -19
- package/src/config/aiModels/zeroone.ts +48 -24
- package/src/config/aiModels/zhipu.ts +118 -69
- package/src/config/modelProviders/ai21.ts +0 -8
- package/src/config/modelProviders/ai360.ts +0 -20
- package/src/config/modelProviders/anthropic.ts +0 -56
- package/src/config/modelProviders/baichuan.ts +0 -30
- package/src/config/modelProviders/bedrock.ts +0 -74
- package/src/config/modelProviders/deepseek.ts +0 -13
- package/src/config/modelProviders/fireworksai.ts +0 -88
- package/src/config/modelProviders/google.ts +0 -59
- package/src/config/modelProviders/groq.ts +0 -48
- package/src/config/modelProviders/higress.ts +0 -727
- package/src/config/modelProviders/hunyuan.ts +0 -45
- package/src/config/modelProviders/infiniai.ts +0 -60
- package/src/config/modelProviders/internlm.ts +0 -8
- package/src/config/modelProviders/mistral.ts +0 -48
- package/src/config/modelProviders/modelscope.ts +2 -1
- package/src/config/modelProviders/openai.ts +5 -100
- package/src/config/modelProviders/openrouter.ts +0 -77
- package/src/config/modelProviders/ppio.ts +0 -95
- package/src/config/modelProviders/qwen.ts +0 -165
- package/src/config/modelProviders/sensenova.ts +0 -45
- package/src/config/modelProviders/siliconcloud.ts +0 -266
- package/src/config/modelProviders/stepfun.ts +0 -60
- package/src/config/modelProviders/taichu.ts +0 -10
- package/src/config/modelProviders/wenxin.ts +0 -90
- package/src/config/modelProviders/xai.ts +0 -16
- package/src/config/modelProviders/zeroone.ts +0 -60
- package/src/config/modelProviders/zhipu.ts +0 -80
- package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +4 -3
- package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +25 -15
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +7 -5
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +6 -5
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +54 -8
- package/src/server/routers/lambda/agent.ts +2 -2
- package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +0 -28
- package/src/server/services/discover/index.ts +7 -6
- package/src/server/services/user/index.ts +1 -2
- package/src/utils/__snapshots__/parseModels.test.ts.snap +28 -4
- package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -8
- package/src/utils/parseModels.test.ts +60 -9
- package/src/utils/pricing.test.ts +183 -0
- package/src/utils/pricing.ts +90 -0
@@ -9,8 +9,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
9
9
|
enabled: true,
|
10
10
|
id: 'accounts/fireworks/models/llama-v3p3-70b-instruct',
|
11
11
|
pricing: {
|
12
|
-
|
13
|
-
|
12
|
+
units: [
|
13
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
14
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
15
|
+
],
|
14
16
|
},
|
15
17
|
type: 'chat',
|
16
18
|
},
|
@@ -22,8 +24,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
22
24
|
enabled: true,
|
23
25
|
id: 'accounts/fireworks/models/llama-v3p2-3b-instruct',
|
24
26
|
pricing: {
|
25
|
-
|
26
|
-
|
27
|
+
units: [
|
28
|
+
{ name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
|
29
|
+
{ name: 'textOutput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
|
30
|
+
],
|
27
31
|
},
|
28
32
|
type: 'chat',
|
29
33
|
},
|
@@ -38,8 +42,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
38
42
|
enabled: true,
|
39
43
|
id: 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct',
|
40
44
|
pricing: {
|
41
|
-
|
42
|
-
|
45
|
+
units: [
|
46
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
47
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
48
|
+
],
|
43
49
|
},
|
44
50
|
type: 'chat',
|
45
51
|
},
|
@@ -54,8 +60,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
54
60
|
enabled: true,
|
55
61
|
id: 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct',
|
56
62
|
pricing: {
|
57
|
-
|
58
|
-
|
63
|
+
units: [
|
64
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
65
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
66
|
+
],
|
59
67
|
},
|
60
68
|
type: 'chat',
|
61
69
|
},
|
@@ -66,8 +74,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
66
74
|
displayName: 'Llama 3.1 8B Instruct',
|
67
75
|
id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
|
68
76
|
pricing: {
|
69
|
-
|
70
|
-
|
77
|
+
units: [
|
78
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
79
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
80
|
+
],
|
71
81
|
},
|
72
82
|
type: 'chat',
|
73
83
|
},
|
@@ -81,8 +91,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
81
91
|
displayName: 'Llama 3.1 70B Instruct',
|
82
92
|
id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
|
83
93
|
pricing: {
|
84
|
-
|
85
|
-
|
94
|
+
units: [
|
95
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
96
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
97
|
+
],
|
86
98
|
},
|
87
99
|
type: 'chat',
|
88
100
|
},
|
@@ -96,8 +108,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
96
108
|
displayName: 'Llama 3.1 405B Instruct',
|
97
109
|
id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
|
98
110
|
pricing: {
|
99
|
-
|
100
|
-
|
111
|
+
units: [
|
112
|
+
{ name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
113
|
+
{ name: 'textOutput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
114
|
+
],
|
101
115
|
},
|
102
116
|
type: 'chat',
|
103
117
|
},
|
@@ -108,8 +122,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
108
122
|
displayName: 'Llama 3 8B Instruct',
|
109
123
|
id: 'accounts/fireworks/models/llama-v3-8b-instruct',
|
110
124
|
pricing: {
|
111
|
-
|
112
|
-
|
125
|
+
units: [
|
126
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
127
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
128
|
+
],
|
113
129
|
},
|
114
130
|
type: 'chat',
|
115
131
|
},
|
@@ -120,8 +136,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
120
136
|
displayName: 'Llama 3 70B Instruct',
|
121
137
|
id: 'accounts/fireworks/models/llama-v3-70b-instruct',
|
122
138
|
pricing: {
|
123
|
-
|
124
|
-
|
139
|
+
units: [
|
140
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
141
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
142
|
+
],
|
125
143
|
},
|
126
144
|
type: 'chat',
|
127
145
|
},
|
@@ -132,8 +150,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
132
150
|
displayName: 'Llama 3 8B Instruct (HF version)',
|
133
151
|
id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
|
134
152
|
pricing: {
|
135
|
-
|
136
|
-
|
153
|
+
units: [
|
154
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
155
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
156
|
+
],
|
137
157
|
},
|
138
158
|
type: 'chat',
|
139
159
|
},
|
@@ -144,8 +164,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
144
164
|
enabled: true,
|
145
165
|
id: 'accounts/fireworks/models/mistral-small-24b-instruct-2501',
|
146
166
|
pricing: {
|
147
|
-
|
148
|
-
|
167
|
+
units: [
|
168
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
169
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
170
|
+
],
|
149
171
|
},
|
150
172
|
type: 'chat',
|
151
173
|
},
|
@@ -156,8 +178,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
156
178
|
displayName: 'Mixtral MoE 8x7B Instruct',
|
157
179
|
id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
|
158
180
|
pricing: {
|
159
|
-
|
160
|
-
|
181
|
+
units: [
|
182
|
+
{ name: 'textInput', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
|
183
|
+
{ name: 'textOutput', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
|
184
|
+
],
|
161
185
|
},
|
162
186
|
type: 'chat',
|
163
187
|
},
|
@@ -171,8 +195,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
171
195
|
displayName: 'Mixtral MoE 8x22B Instruct',
|
172
196
|
id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
|
173
197
|
pricing: {
|
174
|
-
|
175
|
-
|
198
|
+
units: [
|
199
|
+
{ name: 'textInput', rate: 1.2, strategy: 'fixed', unit: 'millionTokens' },
|
200
|
+
{ name: 'textOutput', rate: 1.2, strategy: 'fixed', unit: 'millionTokens' },
|
201
|
+
],
|
176
202
|
},
|
177
203
|
type: 'chat',
|
178
204
|
},
|
@@ -187,8 +213,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
187
213
|
enabled: true,
|
188
214
|
id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
|
189
215
|
pricing: {
|
190
|
-
|
191
|
-
|
216
|
+
units: [
|
217
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
218
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
219
|
+
],
|
192
220
|
},
|
193
221
|
type: 'chat',
|
194
222
|
},
|
@@ -199,8 +227,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
199
227
|
displayName: 'MythoMax L2 13b',
|
200
228
|
id: 'accounts/fireworks/models/mythomax-l2-13b',
|
201
229
|
pricing: {
|
202
|
-
|
203
|
-
|
230
|
+
units: [
|
231
|
+
{ name: 'textInput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
232
|
+
{ name: 'textOutput', rate: 0.2, strategy: 'fixed', unit: 'millionTokens' },
|
233
|
+
],
|
204
234
|
},
|
205
235
|
type: 'chat',
|
206
236
|
},
|
@@ -212,8 +242,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
212
242
|
enabled: true,
|
213
243
|
id: 'accounts/fireworks/models/deepseek-v3',
|
214
244
|
pricing: {
|
215
|
-
|
216
|
-
|
245
|
+
units: [
|
246
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
247
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
248
|
+
],
|
217
249
|
},
|
218
250
|
type: 'chat',
|
219
251
|
},
|
@@ -228,8 +260,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
228
260
|
enabled: true,
|
229
261
|
id: 'accounts/fireworks/models/deepseek-r1',
|
230
262
|
pricing: {
|
231
|
-
|
232
|
-
|
263
|
+
units: [
|
264
|
+
{ name: 'textInput', rate: 8, strategy: 'fixed', unit: 'millionTokens' },
|
265
|
+
{ name: 'textOutput', rate: 8, strategy: 'fixed', unit: 'millionTokens' },
|
266
|
+
],
|
233
267
|
},
|
234
268
|
type: 'chat',
|
235
269
|
},
|
@@ -244,8 +278,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
244
278
|
enabled: true,
|
245
279
|
id: 'accounts/fireworks/models/qwen-qwq-32b-preview',
|
246
280
|
pricing: {
|
247
|
-
|
248
|
-
|
281
|
+
units: [
|
282
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
283
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
284
|
+
],
|
249
285
|
},
|
250
286
|
type: 'chat',
|
251
287
|
},
|
@@ -257,8 +293,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
257
293
|
enabled: true,
|
258
294
|
id: 'accounts/fireworks/models/qwen2p5-72b-instruct',
|
259
295
|
pricing: {
|
260
|
-
|
261
|
-
|
296
|
+
units: [
|
297
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
298
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
299
|
+
],
|
262
300
|
},
|
263
301
|
type: 'chat',
|
264
302
|
},
|
@@ -272,8 +310,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
272
310
|
enabled: true,
|
273
311
|
id: 'accounts/fireworks/models/qwen2-vl-72b-instruct',
|
274
312
|
pricing: {
|
275
|
-
|
276
|
-
|
313
|
+
units: [
|
314
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
315
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
316
|
+
],
|
277
317
|
},
|
278
318
|
type: 'chat',
|
279
319
|
},
|
@@ -285,8 +325,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
285
325
|
enabled: true,
|
286
326
|
id: 'accounts/fireworks/models/qwen2p5-coder-32b-instruct',
|
287
327
|
pricing: {
|
288
|
-
|
289
|
-
|
328
|
+
units: [
|
329
|
+
{ name: 'textInput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
330
|
+
{ name: 'textOutput', rate: 0.9, strategy: 'fixed', unit: 'millionTokens' },
|
331
|
+
],
|
290
332
|
},
|
291
333
|
type: 'chat',
|
292
334
|
},
|
@@ -298,8 +340,10 @@ const fireworksaiChatModels: AIChatModelCard[] = [
|
|
298
340
|
enabled: true,
|
299
341
|
id: 'accounts/yi-01-ai/models/yi-large',
|
300
342
|
pricing: {
|
301
|
-
|
302
|
-
|
343
|
+
units: [
|
344
|
+
{ name: 'textInput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
345
|
+
{ name: 'textOutput', rate: 3, strategy: 'fixed', unit: 'millionTokens' },
|
346
|
+
],
|
303
347
|
},
|
304
348
|
type: 'chat',
|
305
349
|
},
|
@@ -14,9 +14,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
14
14
|
id: 'openai/o3',
|
15
15
|
maxOutput: 100_000,
|
16
16
|
pricing: {
|
17
|
-
|
18
|
-
|
19
|
-
|
17
|
+
units: [
|
18
|
+
{ name: 'textInput_cacheRead', rate: 2.5, strategy: 'fixed', unit: 'millionTokens' },
|
19
|
+
{ name: 'textInput', rate: 10, strategy: 'fixed', unit: 'millionTokens' },
|
20
|
+
{ name: 'textOutput', rate: 40, strategy: 'fixed', unit: 'millionTokens' },
|
21
|
+
],
|
20
22
|
},
|
21
23
|
releasedAt: '2025-04-17',
|
22
24
|
type: 'chat',
|
@@ -35,9 +37,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
35
37
|
id: 'openai/o4-mini',
|
36
38
|
maxOutput: 100_000,
|
37
39
|
pricing: {
|
38
|
-
|
39
|
-
|
40
|
-
|
40
|
+
units: [
|
41
|
+
{ name: 'textInput_cacheRead', rate: 0.275, strategy: 'fixed', unit: 'millionTokens' },
|
42
|
+
{ name: 'textInput', rate: 1.1, strategy: 'fixed', unit: 'millionTokens' },
|
43
|
+
{ name: 'textOutput', rate: 4.4, strategy: 'fixed', unit: 'millionTokens' },
|
44
|
+
],
|
41
45
|
},
|
42
46
|
releasedAt: '2025-04-17',
|
43
47
|
type: 'chat',
|
@@ -54,9 +58,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
54
58
|
id: 'openai/gpt-4.1',
|
55
59
|
maxOutput: 32_768,
|
56
60
|
pricing: {
|
57
|
-
|
58
|
-
|
59
|
-
|
61
|
+
units: [
|
62
|
+
{ name: 'textInput_cacheRead', rate: 0.5, strategy: 'fixed', unit: 'millionTokens' },
|
63
|
+
{ name: 'textInput', rate: 2, strategy: 'fixed', unit: 'millionTokens' },
|
64
|
+
{ name: 'textOutput', rate: 8, strategy: 'fixed', unit: 'millionTokens' },
|
65
|
+
],
|
60
66
|
},
|
61
67
|
releasedAt: '2025-04-14',
|
62
68
|
type: 'chat',
|
@@ -74,9 +80,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
74
80
|
id: 'openai/gpt-4.1-mini',
|
75
81
|
maxOutput: 32_768,
|
76
82
|
pricing: {
|
77
|
-
|
78
|
-
|
79
|
-
|
83
|
+
units: [
|
84
|
+
{ name: 'textInput_cacheRead', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
|
85
|
+
{ name: 'textInput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
|
86
|
+
{ name: 'textOutput', rate: 1.6, strategy: 'fixed', unit: 'millionTokens' },
|
87
|
+
],
|
80
88
|
},
|
81
89
|
releasedAt: '2025-04-14',
|
82
90
|
type: 'chat',
|
@@ -92,9 +100,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
92
100
|
id: 'openai/gpt-4.1-nano',
|
93
101
|
maxOutput: 32_768,
|
94
102
|
pricing: {
|
95
|
-
|
96
|
-
|
97
|
-
|
103
|
+
units: [
|
104
|
+
{ name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
|
105
|
+
{ name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
|
106
|
+
{ name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
|
107
|
+
],
|
98
108
|
},
|
99
109
|
releasedAt: '2025-04-14',
|
100
110
|
type: 'chat',
|
@@ -111,9 +121,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
111
121
|
id: 'openai/o3-mini',
|
112
122
|
maxOutput: 100_000,
|
113
123
|
pricing: {
|
114
|
-
|
115
|
-
|
116
|
-
|
124
|
+
units: [
|
125
|
+
{ name: 'textInput_cacheRead', rate: 0.55, strategy: 'fixed', unit: 'millionTokens' },
|
126
|
+
{ name: 'textInput', rate: 1.1, strategy: 'fixed', unit: 'millionTokens' },
|
127
|
+
{ name: 'textOutput', rate: 4.4, strategy: 'fixed', unit: 'millionTokens' },
|
128
|
+
],
|
117
129
|
},
|
118
130
|
releasedAt: '2025-01-31',
|
119
131
|
type: 'chat',
|
@@ -129,9 +141,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
129
141
|
id: 'openai/o1-mini',
|
130
142
|
maxOutput: 65_536,
|
131
143
|
pricing: {
|
132
|
-
|
133
|
-
|
134
|
-
|
144
|
+
units: [
|
145
|
+
{ name: 'textInput_cacheRead', rate: 0.55, strategy: 'fixed', unit: 'millionTokens' },
|
146
|
+
{ name: 'textInput', rate: 1.1, strategy: 'fixed', unit: 'millionTokens' },
|
147
|
+
{ name: 'textOutput', rate: 4.4, strategy: 'fixed', unit: 'millionTokens' },
|
148
|
+
],
|
135
149
|
},
|
136
150
|
releasedAt: '2024-09-12',
|
137
151
|
type: 'chat',
|
@@ -148,9 +162,11 @@ const githubChatModels: AIChatModelCard[] = [
|
|
148
162
|
id: 'openai/o1',
|
149
163
|
maxOutput: 100_000,
|
150
164
|
pricing: {
|
151
|
-
|
152
|
-
|
153
|
-
|
165
|
+
units: [
|
166
|
+
{ name: 'textInput_cacheRead', rate: 7.5, strategy: 'fixed', unit: 'millionTokens' },
|
167
|
+
{ name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
168
|
+
{ name: 'textOutput', rate: 60, strategy: 'fixed', unit: 'millionTokens' },
|
169
|
+
],
|
154
170
|
},
|
155
171
|
releasedAt: '2024-12-17',
|
156
172
|
type: 'chat',
|
@@ -166,8 +182,10 @@ const githubChatModels: AIChatModelCard[] = [
|
|
166
182
|
id: 'openai/o1-preview',
|
167
183
|
maxOutput: 32_768,
|
168
184
|
pricing: {
|
169
|
-
|
170
|
-
|
185
|
+
units: [
|
186
|
+
{ name: 'textInput', rate: 15, strategy: 'fixed', unit: 'millionTokens' },
|
187
|
+
{ name: 'textOutput', rate: 60, strategy: 'fixed', unit: 'millionTokens' },
|
188
|
+
],
|
171
189
|
},
|
172
190
|
releasedAt: '2024-09-12',
|
173
191
|
type: 'chat',
|