@lobehub/chat 1.108.2 → 1.109.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/models.json +6 -0
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/models.json +6 -0
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/models.json +6 -0
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/models.json +6 -0
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/models.json +6 -0
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/models.json +6 -0
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/models.json +6 -0
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/models.json +6 -0
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/models.json +6 -0
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/models.json +6 -0
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/models.json +6 -0
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/models.json +6 -0
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/models.json +6 -0
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/models.json +6 -0
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/models.json +6 -0
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/models.json +6 -0
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/models.json +6 -0
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/models.json +6 -0
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +1 -1
- package/src/config/aiModels/aihubmix.ts +465 -30
- package/src/config/aiModels/anthropic.ts +27 -1
- package/src/config/aiModels/groq.ts +40 -4
- package/src/config/aiModels/ollama.ts +27 -0
- package/src/config/aiModels/qwen.ts +24 -2
- package/src/libs/model-runtime/anthropic/index.ts +15 -2
- package/src/libs/model-runtime/utils/modelParse.ts +2 -2
- package/src/libs/model-runtime/utils/streams/ollama.test.ts +97 -51
- package/src/libs/model-runtime/utils/streams/ollama.ts +4 -0
@@ -5,53 +5,97 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
5
5
|
abilities: {
|
6
6
|
functionCall: true,
|
7
7
|
reasoning: true,
|
8
|
+
search: true,
|
9
|
+
vision: true,
|
8
10
|
},
|
9
|
-
contextWindowTokens:
|
10
|
-
description:
|
11
|
-
|
11
|
+
contextWindowTokens: 200_000,
|
12
|
+
description:
|
13
|
+
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
14
|
+
displayName: 'o4-mini',
|
12
15
|
enabled: true,
|
13
|
-
id: '
|
16
|
+
id: 'o4-mini',
|
17
|
+
maxOutput: 100_000,
|
18
|
+
pricing: {
|
19
|
+
cachedInput: 0.275,
|
20
|
+
input: 1.1,
|
21
|
+
output: 4.4,
|
22
|
+
},
|
23
|
+
releasedAt: '2025-04-17',
|
24
|
+
settings: {
|
25
|
+
extendParams: ['reasoningEffort'],
|
26
|
+
searchImpl: 'params',
|
27
|
+
},
|
14
28
|
type: 'chat',
|
15
29
|
},
|
16
30
|
{
|
17
31
|
abilities: {
|
18
32
|
functionCall: true,
|
19
33
|
reasoning: true,
|
34
|
+
search: true,
|
20
35
|
vision: true,
|
21
36
|
},
|
22
37
|
contextWindowTokens: 200_000,
|
23
38
|
description:
|
24
|
-
'
|
25
|
-
displayName: '
|
26
|
-
|
27
|
-
|
39
|
+
'o4-mini-deep-research 是我们更快速、更实惠的深度研究模型——非常适合处理复杂的多步骤研究任务。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。',
|
40
|
+
displayName: 'o4-mini Deep Research',
|
41
|
+
id: 'o4-mini-deep-research',
|
42
|
+
maxOutput: 100_000,
|
43
|
+
pricing: {
|
44
|
+
cachedInput: 0.5,
|
45
|
+
input: 2,
|
46
|
+
output: 8,
|
47
|
+
},
|
48
|
+
releasedAt: '2025-06-26',
|
49
|
+
settings: {
|
50
|
+
extendParams: ['reasoningEffort'],
|
51
|
+
searchImpl: 'params',
|
52
|
+
},
|
28
53
|
type: 'chat',
|
29
54
|
},
|
30
55
|
{
|
31
56
|
abilities: {
|
32
57
|
functionCall: true,
|
33
58
|
reasoning: true,
|
59
|
+
search: true,
|
34
60
|
vision: true,
|
35
61
|
},
|
36
62
|
contextWindowTokens: 200_000,
|
37
63
|
description:
|
38
|
-
'
|
39
|
-
displayName: '
|
40
|
-
|
41
|
-
|
64
|
+
'o3-pro 模型使用更多的计算来更深入地思考并始终提供更好的答案,仅支持 Responses API 下使用。',
|
65
|
+
displayName: 'o3-pro',
|
66
|
+
id: 'o3-pro',
|
67
|
+
maxOutput: 100_000,
|
68
|
+
pricing: {
|
69
|
+
input: 20,
|
70
|
+
output: 80,
|
71
|
+
},
|
72
|
+
releasedAt: '2025-06-10',
|
73
|
+
settings: {
|
74
|
+
extendParams: ['reasoningEffort'],
|
75
|
+
searchImpl: 'params',
|
76
|
+
},
|
42
77
|
type: 'chat',
|
43
78
|
},
|
44
79
|
{
|
45
80
|
abilities: {
|
46
81
|
functionCall: true,
|
47
82
|
reasoning: true,
|
83
|
+
search: true,
|
48
84
|
vision: true,
|
49
85
|
},
|
50
86
|
contextWindowTokens: 200_000,
|
51
|
-
description:
|
87
|
+
description:
|
88
|
+
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
52
89
|
displayName: 'o3',
|
53
90
|
enabled: true,
|
54
91
|
id: 'o3',
|
92
|
+
maxOutput: 100_000,
|
93
|
+
pricing: {
|
94
|
+
cachedInput: 0.5,
|
95
|
+
input: 2,
|
96
|
+
output: 8,
|
97
|
+
},
|
98
|
+
releasedAt: '2025-04-16',
|
55
99
|
settings: {
|
56
100
|
extendParams: ['reasoningEffort'],
|
57
101
|
searchImpl: 'params',
|
@@ -62,13 +106,21 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
62
106
|
abilities: {
|
63
107
|
functionCall: true,
|
64
108
|
reasoning: true,
|
109
|
+
search: true,
|
65
110
|
vision: true,
|
66
111
|
},
|
67
112
|
contextWindowTokens: 200_000,
|
68
|
-
description:
|
69
|
-
|
70
|
-
|
71
|
-
id: '
|
113
|
+
description:
|
114
|
+
'o3-deep-research 是我们最先进的深度研究模型,专为处理复杂的多步骤研究任务而设计。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。',
|
115
|
+
displayName: 'o3 Deep Research',
|
116
|
+
id: 'o3-deep-research',
|
117
|
+
maxOutput: 100_000,
|
118
|
+
pricing: {
|
119
|
+
cachedInput: 2.5,
|
120
|
+
input: 10,
|
121
|
+
output: 40,
|
122
|
+
},
|
123
|
+
releasedAt: '2025-06-26',
|
72
124
|
settings: {
|
73
125
|
extendParams: ['reasoningEffort'],
|
74
126
|
searchImpl: 'params',
|
@@ -78,25 +130,318 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
78
130
|
{
|
79
131
|
abilities: {
|
80
132
|
functionCall: true,
|
133
|
+
search: true,
|
81
134
|
vision: true,
|
82
135
|
},
|
83
136
|
contextWindowTokens: 1_047_576,
|
84
|
-
description: 'GPT-4.1
|
137
|
+
description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
|
85
138
|
displayName: 'GPT-4.1',
|
86
139
|
enabled: true,
|
87
140
|
id: 'gpt-4.1',
|
141
|
+
maxOutput: 32_768,
|
142
|
+
pricing: {
|
143
|
+
cachedInput: 0.5,
|
144
|
+
input: 2,
|
145
|
+
output: 8,
|
146
|
+
},
|
147
|
+
releasedAt: '2025-04-14',
|
148
|
+
settings: {
|
149
|
+
searchImpl: 'params',
|
150
|
+
},
|
88
151
|
type: 'chat',
|
89
152
|
},
|
90
153
|
{
|
91
154
|
abilities: {
|
92
155
|
functionCall: true,
|
156
|
+
search: true,
|
93
157
|
vision: true,
|
94
158
|
},
|
95
159
|
contextWindowTokens: 1_047_576,
|
96
|
-
description:
|
160
|
+
description:
|
161
|
+
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
97
162
|
displayName: 'GPT-4.1 mini',
|
98
|
-
enabled: true,
|
99
163
|
id: 'gpt-4.1-mini',
|
164
|
+
maxOutput: 32_768,
|
165
|
+
pricing: {
|
166
|
+
cachedInput: 0.1,
|
167
|
+
input: 0.4,
|
168
|
+
output: 1.6,
|
169
|
+
},
|
170
|
+
releasedAt: '2025-04-14',
|
171
|
+
settings: {
|
172
|
+
searchImpl: 'params',
|
173
|
+
},
|
174
|
+
type: 'chat',
|
175
|
+
},
|
176
|
+
{
|
177
|
+
abilities: {
|
178
|
+
functionCall: true,
|
179
|
+
vision: true,
|
180
|
+
},
|
181
|
+
contextWindowTokens: 1_047_576,
|
182
|
+
description: 'GPT-4.1 nano 是最快,最具成本效益的GPT-4.1模型。',
|
183
|
+
displayName: 'GPT-4.1 nano',
|
184
|
+
id: 'gpt-4.1-nano',
|
185
|
+
maxOutput: 32_768,
|
186
|
+
pricing: {
|
187
|
+
cachedInput: 0.025,
|
188
|
+
input: 0.1,
|
189
|
+
output: 0.4,
|
190
|
+
},
|
191
|
+
releasedAt: '2025-04-14',
|
192
|
+
type: 'chat',
|
193
|
+
},
|
194
|
+
{
|
195
|
+
abilities: {
|
196
|
+
vision: true,
|
197
|
+
},
|
198
|
+
contextWindowTokens: 128_000,
|
199
|
+
description:
|
200
|
+
'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
|
201
|
+
displayName: 'ChatGPT-4o',
|
202
|
+
enabled: true,
|
203
|
+
id: 'chatgpt-4o-latest',
|
204
|
+
pricing: {
|
205
|
+
input: 5,
|
206
|
+
output: 15,
|
207
|
+
},
|
208
|
+
type: 'chat',
|
209
|
+
},
|
210
|
+
{
|
211
|
+
abilities: {
|
212
|
+
functionCall: true,
|
213
|
+
search: true,
|
214
|
+
},
|
215
|
+
contextWindowTokens: 131_072,
|
216
|
+
description:
|
217
|
+
'旗舰级模型,擅长数据提取、编程和文本摘要等企业级应用,拥有金融、医疗、法律和科学等领域的深厚知识。',
|
218
|
+
displayName: 'Grok 3',
|
219
|
+
enabled: true,
|
220
|
+
id: 'grok-3',
|
221
|
+
pricing: {
|
222
|
+
cachedInput: 0.75,
|
223
|
+
input: 3,
|
224
|
+
output: 15,
|
225
|
+
},
|
226
|
+
releasedAt: '2025-04-03',
|
227
|
+
settings: {
|
228
|
+
searchImpl: 'params',
|
229
|
+
},
|
230
|
+
type: 'chat',
|
231
|
+
},
|
232
|
+
{
|
233
|
+
abilities: {
|
234
|
+
functionCall: true,
|
235
|
+
reasoning: true,
|
236
|
+
search: true,
|
237
|
+
},
|
238
|
+
contextWindowTokens: 131_072,
|
239
|
+
description:
|
240
|
+
'轻量级模型,回话前会先思考。运行快速、智能,适用于不需要深层领域知识的逻辑任务,并能获取原始的思维轨迹。',
|
241
|
+
displayName: 'Grok 3 Mini',
|
242
|
+
enabled: true,
|
243
|
+
id: 'grok-3-mini',
|
244
|
+
pricing: {
|
245
|
+
input: 0.3,
|
246
|
+
output: 0.5,
|
247
|
+
},
|
248
|
+
releasedAt: '2025-04-03',
|
249
|
+
settings: {
|
250
|
+
extendParams: ['reasoningEffort'],
|
251
|
+
searchImpl: 'params',
|
252
|
+
},
|
253
|
+
type: 'chat',
|
254
|
+
},
|
255
|
+
{
|
256
|
+
abilities: {
|
257
|
+
functionCall: true,
|
258
|
+
reasoning: true,
|
259
|
+
search: true,
|
260
|
+
vision: true,
|
261
|
+
},
|
262
|
+
contextWindowTokens: 200_000,
|
263
|
+
description:
|
264
|
+
'Claude Opus 4.1 是 Anthropic 最新的用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。',
|
265
|
+
displayName: 'Claude Opus 4.1',
|
266
|
+
enabled: true,
|
267
|
+
id: 'claude-opus-4-1-20250805',
|
268
|
+
maxOutput: 32_000,
|
269
|
+
pricing: {
|
270
|
+
cachedInput: 1.5,
|
271
|
+
input: 16.5,
|
272
|
+
output: 82.5,
|
273
|
+
writeCacheInput: 18.75,
|
274
|
+
},
|
275
|
+
releasedAt: '2025-08-05',
|
276
|
+
settings: {
|
277
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
278
|
+
searchImpl: 'params',
|
279
|
+
},
|
280
|
+
type: 'chat',
|
281
|
+
},
|
282
|
+
{
|
283
|
+
abilities: {
|
284
|
+
functionCall: true,
|
285
|
+
reasoning: true,
|
286
|
+
search: true,
|
287
|
+
vision: true,
|
288
|
+
},
|
289
|
+
contextWindowTokens: 200_000,
|
290
|
+
description:
|
291
|
+
'Claude Opus 4 是 Anthropic 用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。',
|
292
|
+
displayName: 'Claude Opus 4',
|
293
|
+
id: 'claude-opus-4-20250514',
|
294
|
+
maxOutput: 32_000,
|
295
|
+
pricing: {
|
296
|
+
cachedInput: 1.5,
|
297
|
+
input: 16.8,
|
298
|
+
output: 84,
|
299
|
+
writeCacheInput: 18.75,
|
300
|
+
},
|
301
|
+
releasedAt: '2025-05-23',
|
302
|
+
settings: {
|
303
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
304
|
+
searchImpl: 'params',
|
305
|
+
},
|
306
|
+
type: 'chat',
|
307
|
+
},
|
308
|
+
{
|
309
|
+
abilities: {
|
310
|
+
functionCall: true,
|
311
|
+
reasoning: true,
|
312
|
+
search: true,
|
313
|
+
vision: true,
|
314
|
+
},
|
315
|
+
contextWindowTokens: 200_000,
|
316
|
+
description:
|
317
|
+
'Claude Sonnet 4 可以产生近乎即时的响应或延长的逐步思考,用户可以清晰地看到这些过程。API 用户还可以对模型思考的时间进行细致的控制',
|
318
|
+
displayName: 'Claude Sonnet 4',
|
319
|
+
enabled: true,
|
320
|
+
id: 'claude-sonnet-4-20250514',
|
321
|
+
maxOutput: 64_000,
|
322
|
+
pricing: {
|
323
|
+
cachedInput: 0.3,
|
324
|
+
input: 3.3,
|
325
|
+
output: 16.5,
|
326
|
+
writeCacheInput: 3.75,
|
327
|
+
},
|
328
|
+
releasedAt: '2025-05-23',
|
329
|
+
settings: {
|
330
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
331
|
+
searchImpl: 'params',
|
332
|
+
},
|
333
|
+
type: 'chat',
|
334
|
+
},
|
335
|
+
{
|
336
|
+
abilities: {
|
337
|
+
functionCall: true,
|
338
|
+
reasoning: true,
|
339
|
+
search: true,
|
340
|
+
vision: true,
|
341
|
+
},
|
342
|
+
contextWindowTokens: 200_000,
|
343
|
+
description:
|
344
|
+
'Claude 3.7 Sonnet 是 Anthropic 迄今为止最智能的模型,也是市场上首个混合推理模型。Claude 3.7 Sonnet 可以产生近乎即时的响应或延长的逐步思考,用户可以清晰地看到这些过程。Sonnet 特别擅长编程、数据科学、视觉处理、代理任务。',
|
345
|
+
displayName: 'Claude 3.7 Sonnet',
|
346
|
+
id: 'claude-3-7-sonnet-20250219',
|
347
|
+
maxOutput: 64_000,
|
348
|
+
pricing: {
|
349
|
+
cachedInput: 0.3,
|
350
|
+
input: 3.3,
|
351
|
+
output: 16.5,
|
352
|
+
writeCacheInput: 3.75,
|
353
|
+
},
|
354
|
+
releasedAt: '2025-02-24',
|
355
|
+
settings: {
|
356
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
357
|
+
searchImpl: 'params',
|
358
|
+
},
|
359
|
+
type: 'chat',
|
360
|
+
},
|
361
|
+
{
|
362
|
+
abilities: {
|
363
|
+
functionCall: true,
|
364
|
+
vision: true,
|
365
|
+
},
|
366
|
+
contextWindowTokens: 200_000,
|
367
|
+
description:
|
368
|
+
'Claude 3.5 Haiku 是 Anthropic 最快的下一代模型。与 Claude 3 Haiku 相比,Claude 3.5 Haiku 在各项技能上都有所提升,并在许多智力基准测试中超越了上一代最大的模型 Claude 3 Opus。',
|
369
|
+
displayName: 'Claude 3.5 Haiku',
|
370
|
+
id: 'claude-3-5-haiku-20241022',
|
371
|
+
maxOutput: 8192,
|
372
|
+
pricing: {
|
373
|
+
cachedInput: 0.1,
|
374
|
+
input: 1.1,
|
375
|
+
output: 5.5,
|
376
|
+
writeCacheInput: 1.25,
|
377
|
+
},
|
378
|
+
releasedAt: '2024-11-05',
|
379
|
+
settings: {
|
380
|
+
extendParams: ['disableContextCaching'],
|
381
|
+
},
|
382
|
+
type: 'chat',
|
383
|
+
},
|
384
|
+
{
|
385
|
+
abilities: {
|
386
|
+
functionCall: true,
|
387
|
+
reasoning: true,
|
388
|
+
},
|
389
|
+
contextWindowTokens: 65_536,
|
390
|
+
description:
|
391
|
+
'已升级至最新版本250528;字节火山云开源部署的满血 R1,总参数量 671B,输入最高 64k。目前最稳定,推荐用这个。',
|
392
|
+
displayName: 'DeepSeek R1',
|
393
|
+
enabled: true,
|
394
|
+
id: 'DeepSeek-R1',
|
395
|
+
pricing: {
|
396
|
+
input: 0.546,
|
397
|
+
output: 2.184,
|
398
|
+
},
|
399
|
+
type: 'chat',
|
400
|
+
},
|
401
|
+
{
|
402
|
+
abilities: {
|
403
|
+
functionCall: true,
|
404
|
+
reasoning: true,
|
405
|
+
},
|
406
|
+
contextWindowTokens: 65_536,
|
407
|
+
description:
|
408
|
+
'由微软部署提供; DeepSeek R1型号已进行小版本升级,当前版本为DeepSeek-R1-0528。在最新的更新中,DeepSeek R1通过增加计算资源和引入后训练阶段的算法优化机制,大幅提升了推理深度和推断能力。该模型在数学、编程和通用逻辑等多个基准测试中表现出色,其整体性能已接近领先模型,如O3和Gemini 2.5 Pro 。',
|
409
|
+
displayName: 'DeepSeek R1 0528 (Azure)',
|
410
|
+
id: 'azure-DeepSeek-R1-0528',
|
411
|
+
pricing: {
|
412
|
+
input: 0.4,
|
413
|
+
output: 1.6,
|
414
|
+
},
|
415
|
+
type: 'chat',
|
416
|
+
},
|
417
|
+
{
|
418
|
+
abilities: {
|
419
|
+
functionCall: true,
|
420
|
+
},
|
421
|
+
contextWindowTokens: 65_536,
|
422
|
+
description: '字节火山云开源部署目前最稳定,推荐用这个。已经自动升级为最新发布的版本 250324 。',
|
423
|
+
displayName: 'DeepSeek V3',
|
424
|
+
enabled: true,
|
425
|
+
id: 'DeepSeek-V3',
|
426
|
+
pricing: {
|
427
|
+
input: 0.272,
|
428
|
+
output: 1.088,
|
429
|
+
},
|
430
|
+
type: 'chat',
|
431
|
+
},
|
432
|
+
{
|
433
|
+
abilities: {
|
434
|
+
functionCall: true,
|
435
|
+
},
|
436
|
+
contextWindowTokens: 65_536,
|
437
|
+
description:
|
438
|
+
'模型供应商为:sophnet平台。DeepSeek V3 Fast 是 DeepSeek V3 0324 版本的高TPS极速版,满血非量化,代码与数学能力更强,响应更快!',
|
439
|
+
displayName: 'DeepSeek V3 (Fast)',
|
440
|
+
id: 'DeepSeek-V3-Fast',
|
441
|
+
pricing: {
|
442
|
+
input: 0.55,
|
443
|
+
output: 2.2,
|
444
|
+
},
|
100
445
|
type: 'chat',
|
101
446
|
},
|
102
447
|
{
|
@@ -132,11 +477,17 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
132
477
|
search: true,
|
133
478
|
vision: true,
|
134
479
|
},
|
135
|
-
contextWindowTokens:
|
136
|
-
description: 'Gemini 2.5 Flash
|
480
|
+
contextWindowTokens: 1_048_576 + 65_536,
|
481
|
+
description: 'Gemini 2.5 Flash 是 Google 性价比最高的模型,提供全面的功能。',
|
137
482
|
displayName: 'Gemini 2.5 Flash',
|
138
483
|
enabled: true,
|
139
484
|
id: 'gemini-2.5-flash',
|
485
|
+
maxOutput: 65_536,
|
486
|
+
pricing: {
|
487
|
+
cachedInput: 0.075,
|
488
|
+
input: 0.3,
|
489
|
+
output: 2.5,
|
490
|
+
},
|
140
491
|
releasedAt: '2025-06-17',
|
141
492
|
settings: {
|
142
493
|
extendParams: ['thinkingBudget'],
|
@@ -148,23 +499,107 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
148
499
|
{
|
149
500
|
abilities: {
|
150
501
|
functionCall: true,
|
502
|
+
reasoning: true,
|
503
|
+
search: true,
|
504
|
+
vision: true,
|
505
|
+
},
|
506
|
+
contextWindowTokens: 1_048_576 + 65_536,
|
507
|
+
description: 'Gemini 2.5 Flash-Lite 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
|
508
|
+
displayName: 'Gemini 2.5 Flash-Lite',
|
509
|
+
id: 'gemini-2.5-flash-lite',
|
510
|
+
maxOutput: 65_536,
|
511
|
+
pricing: {
|
512
|
+
cachedInput: 0.025,
|
513
|
+
input: 0.1,
|
514
|
+
output: 0.4,
|
515
|
+
},
|
516
|
+
releasedAt: '2025-07-22',
|
517
|
+
settings: {
|
518
|
+
extendParams: ['thinkingBudget'],
|
519
|
+
searchImpl: 'params',
|
520
|
+
searchProvider: 'google',
|
521
|
+
},
|
522
|
+
type: 'chat',
|
523
|
+
},
|
524
|
+
{
|
525
|
+
abilities: {
|
526
|
+
functionCall: true,
|
527
|
+
reasoning: true,
|
528
|
+
},
|
529
|
+
contextWindowTokens: 131_072,
|
530
|
+
description:
|
531
|
+
'基于Qwen3的思考模式开源模型,相较上一版本(通义千问3-235B-A22B)逻辑能力、通用能力、知识增强及创作能力均有大幅提升,适用于高难度强推理场景。',
|
532
|
+
displayName: 'Qwen3 235B A22B Thinking 2507',
|
533
|
+
enabled: true,
|
534
|
+
id: 'qwen3-235b-a22b-thinking-2507',
|
535
|
+
maxOutput: 32_768,
|
536
|
+
organization: 'Qwen',
|
537
|
+
pricing: {
|
538
|
+
currency: 'CNY',
|
539
|
+
input: 0.28,
|
540
|
+
output: 2.8,
|
541
|
+
},
|
542
|
+
releasedAt: '2025-07-25',
|
543
|
+
type: 'chat',
|
544
|
+
},
|
545
|
+
{
|
546
|
+
abilities: {
|
547
|
+
functionCall: true,
|
548
|
+
},
|
549
|
+
contextWindowTokens: 131_072,
|
550
|
+
description:
|
551
|
+
'基于Qwen3的非思考模式开源模型,相较上一版本(通义千问3-235B-A22B)主观创作能力与模型安全性均有小幅度提升。',
|
552
|
+
displayName: 'Qwen3 235B A22B Instruct 2507',
|
553
|
+
enabled: true,
|
554
|
+
id: 'qwen3-235b-a22b-instruct-2507',
|
555
|
+
maxOutput: 32_768,
|
556
|
+
organization: 'Qwen',
|
557
|
+
pricing: {
|
558
|
+
currency: 'CNY',
|
559
|
+
input: 0.28,
|
560
|
+
output: 1.12,
|
561
|
+
},
|
562
|
+
releasedAt: '2025-07-22',
|
563
|
+
type: 'chat',
|
564
|
+
},
|
565
|
+
{
|
566
|
+
abilities: {
|
567
|
+
functionCall: true,
|
568
|
+
reasoning: true,
|
151
569
|
},
|
152
|
-
contextWindowTokens:
|
153
|
-
description:
|
154
|
-
|
570
|
+
contextWindowTokens: 131_072,
|
571
|
+
description:
|
572
|
+
'基于Qwen3的思考模式开源模型,相较上一版本(通义千问3-30B-A3B)逻辑能力、通用能力、知识增强及创作能力均有大幅提升,适用于高难度强推理场景。',
|
573
|
+
displayName: 'Qwen3 30B A3B Thinking 2507',
|
155
574
|
enabled: true,
|
156
|
-
id: '
|
575
|
+
id: 'qwen3-30b-a3b-thinking-2507',
|
576
|
+
maxOutput: 32_768,
|
577
|
+
organization: 'Qwen',
|
578
|
+
pricing: {
|
579
|
+
currency: 'CNY',
|
580
|
+
input: 0.12,
|
581
|
+
output: 1.2,
|
582
|
+
},
|
583
|
+
releasedAt: '2025-07-30',
|
157
584
|
type: 'chat',
|
158
585
|
},
|
159
586
|
{
|
160
587
|
abilities: {
|
161
588
|
functionCall: true,
|
162
589
|
},
|
163
|
-
contextWindowTokens:
|
164
|
-
description:
|
165
|
-
|
590
|
+
contextWindowTokens: 131_072,
|
591
|
+
description:
|
592
|
+
'相较上一版本(Qwen3-30B-A3B)中英文和多语言整体通用能力有大幅提升。主观开放类任务专项优化,显著更加符合用户偏好,能够提供更有帮助性的回复。',
|
593
|
+
displayName: 'Qwen3 30B A3B Instruct 2507',
|
166
594
|
enabled: true,
|
167
|
-
id: '
|
595
|
+
id: 'qwen3-30b-a3b-instruct-2507',
|
596
|
+
maxOutput: 32_768,
|
597
|
+
organization: 'Qwen',
|
598
|
+
pricing: {
|
599
|
+
input: 0.12,
|
600
|
+
output: 0.48,
|
601
|
+
},
|
602
|
+
releasedAt: '2025-07-29',
|
168
603
|
type: 'chat',
|
169
604
|
},
|
170
605
|
];
|
@@ -1,6 +1,33 @@
|
|
1
1
|
import { AIChatModelCard } from '@/types/aiModel';
|
2
2
|
|
3
3
|
const anthropicChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
reasoning: true,
|
8
|
+
search: true,
|
9
|
+
vision: true,
|
10
|
+
},
|
11
|
+
contextWindowTokens: 200_000,
|
12
|
+
description:
|
13
|
+
'Claude Opus 4.1 是 Anthropic 最新的用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。',
|
14
|
+
displayName: 'Claude Opus 4.1',
|
15
|
+
enabled: true,
|
16
|
+
id: 'claude-opus-4-1-20250805',
|
17
|
+
maxOutput: 32_000,
|
18
|
+
pricing: {
|
19
|
+
cachedInput: 1.5,
|
20
|
+
input: 15,
|
21
|
+
output: 75,
|
22
|
+
writeCacheInput: 18.75,
|
23
|
+
},
|
24
|
+
releasedAt: '2025-08-05',
|
25
|
+
settings: {
|
26
|
+
extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
|
27
|
+
searchImpl: 'params',
|
28
|
+
},
|
29
|
+
type: 'chat',
|
30
|
+
},
|
4
31
|
{
|
5
32
|
abilities: {
|
6
33
|
functionCall: true,
|
@@ -12,7 +39,6 @@ const anthropicChatModels: AIChatModelCard[] = [
|
|
12
39
|
description:
|
13
40
|
'Claude Opus 4 是 Anthropic 用于处理高度复杂任务的最强大模型。它在性能、智能、流畅性和理解力方面表现卓越。',
|
14
41
|
displayName: 'Claude Opus 4',
|
15
|
-
enabled: true,
|
16
42
|
id: 'claude-opus-4-20250514',
|
17
43
|
maxOutput: 32_000,
|
18
44
|
pricing: {
|