@lobehub/chat 1.50.4 → 1.51.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +1 -0
- package/CHANGELOG.md +60 -0
- package/changelog/v1.json +21 -0
- package/docs/changelog/2025-01-22-new-ai-provider.mdx +2 -2
- package/docs/changelog/2025-02-02-deepseek-r1.mdx +33 -0
- package/docs/changelog/2025-02-02-deepseek-r1.zh-CN.mdx +29 -0
- package/docs/changelog/index.json +6 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +7 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +7 -0
- package/locales/ar/modelProvider.json +4 -0
- package/locales/bg-BG/modelProvider.json +4 -0
- package/locales/de-DE/modelProvider.json +4 -0
- package/locales/en-US/modelProvider.json +4 -0
- package/locales/es-ES/modelProvider.json +4 -0
- package/locales/fa-IR/modelProvider.json +4 -0
- package/locales/fr-FR/modelProvider.json +4 -0
- package/locales/it-IT/modelProvider.json +4 -0
- package/locales/ja-JP/modelProvider.json +4 -0
- package/locales/ko-KR/modelProvider.json +4 -0
- package/locales/nl-NL/modelProvider.json +4 -0
- package/locales/pl-PL/modelProvider.json +4 -0
- package/locales/pt-BR/modelProvider.json +4 -0
- package/locales/ru-RU/modelProvider.json +4 -0
- package/locales/tr-TR/modelProvider.json +4 -0
- package/locales/vi-VN/modelProvider.json +4 -0
- package/locales/zh-CN/modelProvider.json +4 -0
- package/locales/zh-TW/modelProvider.json +4 -0
- package/package.json +1 -1
- package/src/app/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +8 -0
- package/src/config/aiModels/github.ts +30 -2
- package/src/config/aiModels/qwen.ts +139 -10
- package/src/config/modelProviders/github.ts +27 -3
- package/src/config/modelProviders/qwen.ts +90 -12
- package/src/hooks/useModelSupportReasoning.ts +15 -0
- package/src/libs/agent-runtime/siliconcloud/index.ts +3 -2
- package/src/locales/default/modelProvider.ts +5 -0
- package/src/store/aiInfra/slices/aiModel/action.ts +1 -0
- package/src/store/aiInfra/slices/aiModel/selectors.ts +7 -0
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +4 -0
- package/src/types/aiModel.ts +5 -0
- package/src/types/llm.ts +9 -0
- package/src/utils/_deprecated/parseModels.test.ts +11 -0
- package/src/utils/_deprecated/parseModels.ts +4 -0
- package/src/utils/merge.test.ts +56 -0
- package/src/utils/merge.ts +3 -2
- package/src/utils/parseModels.test.ts +14 -0
- package/src/utils/parseModels.ts +4 -0
@@ -5,11 +5,12 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
5
5
|
abilities: {
|
6
6
|
functionCall: true,
|
7
7
|
},
|
8
|
-
contextWindowTokens:
|
8
|
+
contextWindowTokens: 1_000_000,
|
9
9
|
description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入。',
|
10
10
|
displayName: 'Qwen Turbo',
|
11
11
|
enabled: true,
|
12
12
|
id: 'qwen-turbo-latest',
|
13
|
+
maxOutput: 8192,
|
13
14
|
pricing: {
|
14
15
|
currency: 'CNY',
|
15
16
|
input: 0.3,
|
@@ -26,6 +27,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
26
27
|
displayName: 'Qwen Plus',
|
27
28
|
enabled: true,
|
28
29
|
id: 'qwen-plus-latest',
|
30
|
+
maxOutput: 8192,
|
29
31
|
pricing: {
|
30
32
|
currency: 'CNY',
|
31
33
|
input: 0.8,
|
@@ -43,6 +45,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
43
45
|
displayName: 'Qwen Max',
|
44
46
|
enabled: true,
|
45
47
|
id: 'qwen-max-latest',
|
48
|
+
maxOutput: 8192,
|
46
49
|
pricing: {
|
47
50
|
currency: 'CNY',
|
48
51
|
input: 20,
|
@@ -56,6 +59,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
56
59
|
'通义千问超大规模语言模型,支持长文本上下文,以及基于长文档、多文档等多个场景的对话功能。',
|
57
60
|
displayName: 'Qwen Long',
|
58
61
|
id: 'qwen-long',
|
62
|
+
maxOutput: 6000,
|
59
63
|
pricing: {
|
60
64
|
currency: 'CNY',
|
61
65
|
input: 0.5,
|
@@ -73,10 +77,11 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
73
77
|
displayName: 'Qwen VL Plus',
|
74
78
|
enabled: true,
|
75
79
|
id: 'qwen-vl-plus-latest',
|
80
|
+
maxOutput: 2048,
|
76
81
|
pricing: {
|
77
82
|
currency: 'CNY',
|
78
|
-
input:
|
79
|
-
output:
|
83
|
+
input: 1.5,
|
84
|
+
output: 4.5,
|
80
85
|
},
|
81
86
|
type: 'chat',
|
82
87
|
},
|
@@ -84,16 +89,34 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
84
89
|
abilities: {
|
85
90
|
vision: true,
|
86
91
|
},
|
87
|
-
contextWindowTokens:
|
92
|
+
contextWindowTokens: 32_768,
|
88
93
|
description:
|
89
94
|
'通义千问超大规模视觉语言模型。相比增强版,再次提升视觉推理能力和指令遵循能力,提供更高的视觉感知和认知水平。',
|
90
95
|
displayName: 'Qwen VL Max',
|
91
96
|
enabled: true,
|
92
97
|
id: 'qwen-vl-max-latest',
|
98
|
+
maxOutput: 2048,
|
93
99
|
pricing: {
|
94
100
|
currency: 'CNY',
|
95
|
-
input:
|
96
|
-
output:
|
101
|
+
input: 3,
|
102
|
+
output: 9,
|
103
|
+
},
|
104
|
+
type: 'chat',
|
105
|
+
},
|
106
|
+
{
|
107
|
+
abilities: {
|
108
|
+
vision: true,
|
109
|
+
},
|
110
|
+
contextWindowTokens: 34_096,
|
111
|
+
description:
|
112
|
+
'通义千问OCR是文字提取专有模型,专注于文档、表格、试题、手写体文字等类型图像的文字提取能力。它能够识别多种文字,目前支持的语言有:汉语、英语、法语、日语、韩语、德语、俄语、意大利语、越南语、阿拉伯语。',
|
113
|
+
displayName: 'Qwen VL OCR',
|
114
|
+
id: 'qwen-vl-ocr-latest',
|
115
|
+
maxOutput: 4096,
|
116
|
+
pricing: {
|
117
|
+
currency: 'CNY',
|
118
|
+
input: 5,
|
119
|
+
output: 5,
|
97
120
|
},
|
98
121
|
type: 'chat',
|
99
122
|
},
|
@@ -102,6 +125,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
102
125
|
description: '通义千问数学模型是专门用于数学解题的语言模型。',
|
103
126
|
displayName: 'Qwen Math Turbo',
|
104
127
|
id: 'qwen-math-turbo-latest',
|
128
|
+
maxOutput: 3072,
|
105
129
|
pricing: {
|
106
130
|
currency: 'CNY',
|
107
131
|
input: 2,
|
@@ -114,6 +138,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
114
138
|
description: '通义千问数学模型是专门用于数学解题的语言模型。',
|
115
139
|
displayName: 'Qwen Math Plus',
|
116
140
|
id: 'qwen-math-plus-latest',
|
141
|
+
maxOutput: 3072,
|
117
142
|
pricing: {
|
118
143
|
currency: 'CNY',
|
119
144
|
input: 4,
|
@@ -126,6 +151,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
126
151
|
description: '通义千问代码模型。',
|
127
152
|
displayName: 'Qwen Coder Turbo',
|
128
153
|
id: 'qwen-coder-turbo-latest',
|
154
|
+
maxOutput: 8192,
|
129
155
|
pricing: {
|
130
156
|
currency: 'CNY',
|
131
157
|
input: 2,
|
@@ -138,6 +164,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
138
164
|
description: '通义千问代码模型。',
|
139
165
|
displayName: 'Qwen Coder Plus',
|
140
166
|
id: 'qwen-coder-plus-latest',
|
167
|
+
maxOutput: 8192,
|
141
168
|
pricing: {
|
142
169
|
currency: 'CNY',
|
143
170
|
input: 3.5,
|
@@ -146,10 +173,14 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
146
173
|
type: 'chat',
|
147
174
|
},
|
148
175
|
{
|
176
|
+
abilities: {
|
177
|
+
functionCall: true,
|
178
|
+
},
|
149
179
|
contextWindowTokens: 32_768,
|
150
180
|
description: 'QwQ模型是由 Qwen 团队开发的实验性研究模型,专注于增强 AI 推理能力。',
|
151
181
|
displayName: 'QwQ 32B Preview',
|
152
182
|
id: 'qwq-32b-preview',
|
183
|
+
maxOutput: 16_384,
|
153
184
|
pricing: {
|
154
185
|
currency: 'CNY',
|
155
186
|
input: 3.5,
|
@@ -166,6 +197,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
166
197
|
description: 'QVQ模型是由 Qwen 团队开发的实验性研究模型,专注于提升视觉推理能力,尤其在数学推理领域。',
|
167
198
|
displayName: 'QVQ 72B Preview',
|
168
199
|
id: 'qvq-72b-preview',
|
200
|
+
maxOutput: 16_384,
|
169
201
|
pricing: {
|
170
202
|
currency: 'CNY',
|
171
203
|
input: 12,
|
@@ -182,10 +214,11 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
182
214
|
description: '通义千问2.5对外开源的7B规模的模型。',
|
183
215
|
displayName: 'Qwen2.5 7B',
|
184
216
|
id: 'qwen2.5-7b-instruct',
|
217
|
+
maxOutput: 8192,
|
185
218
|
pricing: {
|
186
219
|
currency: 'CNY',
|
187
|
-
input:
|
188
|
-
output:
|
220
|
+
input: 0.5,
|
221
|
+
output: 1,
|
189
222
|
},
|
190
223
|
type: 'chat',
|
191
224
|
},
|
@@ -197,10 +230,11 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
197
230
|
description: '通义千问2.5对外开源的14B规模的模型。',
|
198
231
|
displayName: 'Qwen2.5 14B',
|
199
232
|
id: 'qwen2.5-14b-instruct',
|
233
|
+
maxOutput: 8192,
|
200
234
|
pricing: {
|
201
235
|
currency: 'CNY',
|
202
|
-
input:
|
203
|
-
output:
|
236
|
+
input: 1,
|
237
|
+
output: 3,
|
204
238
|
},
|
205
239
|
type: 'chat',
|
206
240
|
},
|
@@ -212,6 +246,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
212
246
|
description: '通义千问2.5对外开源的32B规模的模型。',
|
213
247
|
displayName: 'Qwen2.5 32B',
|
214
248
|
id: 'qwen2.5-32b-instruct',
|
249
|
+
maxOutput: 8192,
|
215
250
|
pricing: {
|
216
251
|
currency: 'CNY',
|
217
252
|
input: 3.5,
|
@@ -227,6 +262,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
227
262
|
description: '通义千问2.5对外开源的72B规模的模型。',
|
228
263
|
displayName: 'Qwen2.5 72B',
|
229
264
|
id: 'qwen2.5-72b-instruct',
|
265
|
+
maxOutput: 8192,
|
230
266
|
pricing: {
|
231
267
|
currency: 'CNY',
|
232
268
|
input: 4,
|
@@ -234,11 +270,29 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
234
270
|
},
|
235
271
|
type: 'chat',
|
236
272
|
},
|
273
|
+
{
|
274
|
+
abilities: {
|
275
|
+
functionCall: true,
|
276
|
+
},
|
277
|
+
contextWindowTokens: 1_000_000,
|
278
|
+
description: '通义千问2.5对外开源的72B规模的模型。',
|
279
|
+
displayName: 'Qwen2.5 14B 1M',
|
280
|
+
id: 'qwen2.5-14b-instruct-1m',
|
281
|
+
maxOutput: 8192,
|
282
|
+
pricing: {
|
283
|
+
currency: 'CNY',
|
284
|
+
input: 1,
|
285
|
+
output: 3,
|
286
|
+
},
|
287
|
+
releasedAt: '2025-01-27',
|
288
|
+
type: 'chat',
|
289
|
+
},
|
237
290
|
{
|
238
291
|
contextWindowTokens: 4096,
|
239
292
|
description: 'Qwen-Math 模型具有强大的数学解题能力。',
|
240
293
|
displayName: 'Qwen2.5 Math 7B',
|
241
294
|
id: 'qwen2.5-math-7b-instruct',
|
295
|
+
maxOutput: 3072,
|
242
296
|
pricing: {
|
243
297
|
currency: 'CNY',
|
244
298
|
input: 1,
|
@@ -251,6 +305,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
251
305
|
description: 'Qwen-Math 模型具有强大的数学解题能力。',
|
252
306
|
displayName: 'Qwen2.5 Math 72B',
|
253
307
|
id: 'qwen2.5-math-72b-instruct',
|
308
|
+
maxOutput: 3072,
|
254
309
|
pricing: {
|
255
310
|
currency: 'CNY',
|
256
311
|
input: 4,
|
@@ -263,6 +318,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
263
318
|
description: '通义千问代码模型开源版。',
|
264
319
|
displayName: 'Qwen2.5 Coder 7B',
|
265
320
|
id: 'qwen2.5-coder-7b-instruct',
|
321
|
+
maxOutput: 8192,
|
266
322
|
pricing: {
|
267
323
|
currency: 'CNY',
|
268
324
|
input: 1,
|
@@ -275,6 +331,7 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
275
331
|
description: '通义千问代码模型开源版。',
|
276
332
|
displayName: 'Qwen2.5 Coder 32B',
|
277
333
|
id: 'qwen2.5-coder-32b-instruct',
|
334
|
+
maxOutput: 8192,
|
278
335
|
pricing: {
|
279
336
|
currency: 'CNY',
|
280
337
|
input: 3.5,
|
@@ -312,6 +369,78 @@ const qwenChatModels: AIChatModelCard[] = [
|
|
312
369
|
},
|
313
370
|
type: 'chat',
|
314
371
|
},
|
372
|
+
{
|
373
|
+
abilities: {
|
374
|
+
vision: true,
|
375
|
+
},
|
376
|
+
contextWindowTokens: 131_072,
|
377
|
+
description:
|
378
|
+
'指令跟随、数学、解题、代码整体提升,万物识别能力提升,支持多样格式直接精准定位视觉元素,支持对长视频文件(最长10分钟)进行理解和秒级别的事件时刻定位,能理解时间先后和快慢,基于解析和定位能力支持操控OS或Mobile的Agent,关键信息抽取能力和Json格式输出能力强,此版本为72B版本,本系列能力最强的版本。',
|
379
|
+
displayName: 'Qwen2.5 VL 72B',
|
380
|
+
id: 'qwen2.5-vl-72b-instruct',
|
381
|
+
maxOutput: 2048,
|
382
|
+
pricing: {
|
383
|
+
currency: 'CNY',
|
384
|
+
input: 16,
|
385
|
+
output: 48,
|
386
|
+
},
|
387
|
+
releasedAt: '2025-01-27',
|
388
|
+
type: 'chat',
|
389
|
+
},
|
390
|
+
{
|
391
|
+
abilities: {
|
392
|
+
vision: true,
|
393
|
+
},
|
394
|
+
contextWindowTokens: 131_072,
|
395
|
+
description:
|
396
|
+
'指令跟随、数学、解题、代码整体提升,万物识别能力提升,支持多样格式直接精准定位视觉元素,支持对长视频文件(最长10分钟)进行理解和秒级别的事件时刻定位,能理解时间先后和快慢,基于解析和定位能力支持操控OS或Mobile的Agent,关键信息抽取能力和Json格式输出能力强,此版本为72B版本,本系列能力最强的版本。',
|
397
|
+
displayName: 'Qwen2.5 VL 7B',
|
398
|
+
id: 'qwen2.5-vl-7b-instruct',
|
399
|
+
maxOutput: 2048,
|
400
|
+
pricing: {
|
401
|
+
currency: 'CNY',
|
402
|
+
input: 2,
|
403
|
+
output: 5,
|
404
|
+
},
|
405
|
+
releasedAt: '2025-01-27',
|
406
|
+
type: 'chat',
|
407
|
+
},
|
408
|
+
{
|
409
|
+
abilities: {
|
410
|
+
reasoning: true,
|
411
|
+
},
|
412
|
+
contextWindowTokens: 65_536,
|
413
|
+
description:
|
414
|
+
'DeepSeek-R1 在后训练阶段大规模使用了强化学习技术,在仅有极少标注数据的情况下,极大提升了模型推理能力,尤其在数学、代码、自然语言推理等任务上。',
|
415
|
+
displayName: 'DeepSeek R1',
|
416
|
+
id: 'deepseek-r1',
|
417
|
+
maxOutput: 8192,
|
418
|
+
pricing: {
|
419
|
+
currency: 'CNY',
|
420
|
+
input: 0,
|
421
|
+
output: 0,
|
422
|
+
},
|
423
|
+
releasedAt: '2025-01-27',
|
424
|
+
type: 'chat',
|
425
|
+
},
|
426
|
+
{
|
427
|
+
abilities: {
|
428
|
+
functionCall: true,
|
429
|
+
},
|
430
|
+
contextWindowTokens: 65_536,
|
431
|
+
description:
|
432
|
+
'DeepSeek-V3 为自研 MoE 模型,671B 参数,激活 37B,在 14.8T token 上进行了预训练,在长文本、代码、数学、百科、中文能力上表现优秀。',
|
433
|
+
displayName: 'DeepSeek V3',
|
434
|
+
id: 'deepseek-v3',
|
435
|
+
maxOutput: 8192,
|
436
|
+
pricing: {
|
437
|
+
currency: 'CNY',
|
438
|
+
input: 0,
|
439
|
+
output: 0,
|
440
|
+
},
|
441
|
+
releasedAt: '2025-01-27',
|
442
|
+
type: 'chat',
|
443
|
+
},
|
315
444
|
];
|
316
445
|
|
317
446
|
export const allModels = [...qwenChatModels];
|
@@ -37,7 +37,7 @@ const Github: ModelProviderCard = {
|
|
37
37
|
vision: true,
|
38
38
|
},
|
39
39
|
{
|
40
|
-
contextWindowTokens:
|
40
|
+
contextWindowTokens: 134_144,
|
41
41
|
description: '一种经济高效的AI解决方案,适用于多种文本和图像任务。',
|
42
42
|
displayName: 'OpenAI GPT-4o mini',
|
43
43
|
enabled: true,
|
@@ -47,15 +47,21 @@ const Github: ModelProviderCard = {
|
|
47
47
|
vision: true,
|
48
48
|
},
|
49
49
|
{
|
50
|
-
contextWindowTokens:
|
50
|
+
contextWindowTokens: 134_144,
|
51
51
|
description: 'OpenAI GPT-4系列中最先进的多模态模型,可以处理文本和图像输入。',
|
52
52
|
displayName: 'OpenAI GPT-4o',
|
53
53
|
enabled: true,
|
54
54
|
functionCall: true,
|
55
55
|
id: 'gpt-4o',
|
56
|
-
maxOutput:
|
56
|
+
maxOutput: 16_384,
|
57
57
|
vision: true,
|
58
58
|
},
|
59
|
+
{
|
60
|
+
contextWindowTokens: 128_000,
|
61
|
+
displayName: 'DeepSeek R1',
|
62
|
+
id: 'DeepSeek-R1',
|
63
|
+
maxOutput: 4096,
|
64
|
+
},
|
59
65
|
{
|
60
66
|
contextWindowTokens: 262_144,
|
61
67
|
description:
|
@@ -112,6 +118,12 @@ const Github: ModelProviderCard = {
|
|
112
118
|
id: 'mistral-large',
|
113
119
|
maxOutput: 4096,
|
114
120
|
},
|
121
|
+
{
|
122
|
+
contextWindowTokens: 262_144,
|
123
|
+
displayName: 'Codestral',
|
124
|
+
id: 'Codestral-2501',
|
125
|
+
maxOutput: 4096,
|
126
|
+
},
|
115
127
|
{
|
116
128
|
contextWindowTokens: 131_072,
|
117
129
|
description: '在高分辨率图像上表现出色的图像推理能力,适用于视觉理解应用。',
|
@@ -166,6 +178,18 @@ const Github: ModelProviderCard = {
|
|
166
178
|
id: 'meta-llama-3-70b-instruct',
|
167
179
|
maxOutput: 4096,
|
168
180
|
},
|
181
|
+
{
|
182
|
+
contextWindowTokens: 16_384,
|
183
|
+
displayName: 'Phi 4',
|
184
|
+
id: 'Phi-4',
|
185
|
+
maxOutput: 16_384,
|
186
|
+
},
|
187
|
+
{
|
188
|
+
contextWindowTokens: 131_072,
|
189
|
+
displayName: 'Phi 3.5 MoE',
|
190
|
+
id: 'Phi-3.5-MoE-instruct',
|
191
|
+
maxOutput: 4096,
|
192
|
+
},
|
169
193
|
{
|
170
194
|
contextWindowTokens: 131_072,
|
171
195
|
description: 'Phi-3-mini模型的更新版。',
|
@@ -4,7 +4,7 @@ import { ModelProviderCard } from '@/types/llm';
|
|
4
4
|
const Qwen: ModelProviderCard = {
|
5
5
|
chatModels: [
|
6
6
|
{
|
7
|
-
contextWindowTokens:
|
7
|
+
contextWindowTokens: 1_000_000,
|
8
8
|
description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入。',
|
9
9
|
displayName: 'Qwen Turbo',
|
10
10
|
enabled: true,
|
@@ -64,13 +64,13 @@ const Qwen: ModelProviderCard = {
|
|
64
64
|
id: 'qwen-vl-plus-latest',
|
65
65
|
pricing: {
|
66
66
|
currency: 'CNY',
|
67
|
-
input:
|
68
|
-
output:
|
67
|
+
input: 1.5,
|
68
|
+
output: 4.5,
|
69
69
|
},
|
70
70
|
vision: true,
|
71
71
|
},
|
72
72
|
{
|
73
|
-
contextWindowTokens:
|
73
|
+
contextWindowTokens: 32_768,
|
74
74
|
description:
|
75
75
|
'通义千问超大规模视觉语言模型。相比增强版,再次提升视觉推理能力和指令遵循能力,提供更高的视觉感知和认知水平。',
|
76
76
|
displayName: 'Qwen VL Max',
|
@@ -78,8 +78,21 @@ const Qwen: ModelProviderCard = {
|
|
78
78
|
id: 'qwen-vl-max-latest',
|
79
79
|
pricing: {
|
80
80
|
currency: 'CNY',
|
81
|
-
input:
|
82
|
-
output:
|
81
|
+
input: 3,
|
82
|
+
output: 9,
|
83
|
+
},
|
84
|
+
vision: true,
|
85
|
+
},
|
86
|
+
{
|
87
|
+
contextWindowTokens: 34_096,
|
88
|
+
description:
|
89
|
+
'通义千问OCR是文字提取专有模型,专注于文档、表格、试题、手写体文字等类型图像的文字提取能力。它能够识别多种文字,目前支持的语言有:汉语、英语、法语、日语、韩语、德语、俄语、意大利语、越南语、阿拉伯语。',
|
90
|
+
displayName: 'Qwen VL OCR',
|
91
|
+
id: 'qwen-vl-ocr-latest',
|
92
|
+
pricing: {
|
93
|
+
currency: 'CNY',
|
94
|
+
input: 5,
|
95
|
+
output: 5,
|
83
96
|
},
|
84
97
|
vision: true,
|
85
98
|
},
|
@@ -134,9 +147,22 @@ const Qwen: ModelProviderCard = {
|
|
134
147
|
id: 'qwq-32b-preview',
|
135
148
|
pricing: {
|
136
149
|
currency: 'CNY',
|
137
|
-
input:
|
138
|
-
output:
|
150
|
+
input: 3.5,
|
151
|
+
output: 7,
|
152
|
+
},
|
153
|
+
},
|
154
|
+
{
|
155
|
+
contextWindowTokens: 32_768,
|
156
|
+
description: 'QVQ模型是由 Qwen 团队开发的实验性研究模型,专注于提升视觉推理能力,尤其在数学推理领域。',
|
157
|
+
displayName: 'QVQ 72B Preview',
|
158
|
+
id: 'qvq-72b-preview',
|
159
|
+
pricing: {
|
160
|
+
currency: 'CNY',
|
161
|
+
input: 12,
|
162
|
+
output: 36,
|
139
163
|
},
|
164
|
+
releasedAt: '2024-12-25',
|
165
|
+
vision: true,
|
140
166
|
},
|
141
167
|
{
|
142
168
|
contextWindowTokens: 131_072,
|
@@ -146,8 +172,8 @@ const Qwen: ModelProviderCard = {
|
|
146
172
|
id: 'qwen2.5-7b-instruct',
|
147
173
|
pricing: {
|
148
174
|
currency: 'CNY',
|
149
|
-
input:
|
150
|
-
output:
|
175
|
+
input: 0.5,
|
176
|
+
output: 1,
|
151
177
|
},
|
152
178
|
},
|
153
179
|
{
|
@@ -158,8 +184,8 @@ const Qwen: ModelProviderCard = {
|
|
158
184
|
id: 'qwen2.5-14b-instruct',
|
159
185
|
pricing: {
|
160
186
|
currency: 'CNY',
|
161
|
-
input:
|
162
|
-
output:
|
187
|
+
input: 1,
|
188
|
+
output: 3,
|
163
189
|
},
|
164
190
|
},
|
165
191
|
{
|
@@ -186,6 +212,18 @@ const Qwen: ModelProviderCard = {
|
|
186
212
|
output: 12,
|
187
213
|
},
|
188
214
|
},
|
215
|
+
{
|
216
|
+
contextWindowTokens: 1_000_000,
|
217
|
+
description: '通义千问2.5对外开源的72B规模的模型。',
|
218
|
+
displayName: 'Qwen2.5 14B 1M',
|
219
|
+
functionCall: true,
|
220
|
+
id: 'qwen2.5-14b-instruct-1m',
|
221
|
+
pricing: {
|
222
|
+
currency: 'CNY',
|
223
|
+
input: 1,
|
224
|
+
output: 3,
|
225
|
+
},
|
226
|
+
},
|
189
227
|
{
|
190
228
|
contextWindowTokens: 4096,
|
191
229
|
description: 'Qwen-Math 模型具有强大的数学解题能力。',
|
@@ -254,6 +292,46 @@ const Qwen: ModelProviderCard = {
|
|
254
292
|
},
|
255
293
|
vision: true,
|
256
294
|
},
|
295
|
+
{
|
296
|
+
contextWindowTokens: 128_000,
|
297
|
+
description:
|
298
|
+
'指令跟随、数学、解题、代码整体提升,万物识别能力提升,支持多样格式直接精准定位视觉元素,支持对长视频文件(最长10分钟)进行理解和秒级别的事件时刻定位,能理解时间先后和快慢,基于解析和定位能力支持操控OS或Mobile的Agent,关键信息抽取能力和Json格式输出能力强,此版本为72B版本,本系列能力最强的版本。',
|
299
|
+
displayName: 'Qwen2.5 VL 72B',
|
300
|
+
id: 'qwen2.5-vl-72b-instruct',
|
301
|
+
pricing: {
|
302
|
+
currency: 'CNY',
|
303
|
+
input: 16,
|
304
|
+
output: 48,
|
305
|
+
},
|
306
|
+
releasedAt: '2025-01-26',
|
307
|
+
vision: true,
|
308
|
+
},
|
309
|
+
{
|
310
|
+
contextWindowTokens: 65_536,
|
311
|
+
description:
|
312
|
+
'DeepSeek-R1 在后训练阶段大规模使用了强化学习技术,在仅有极少标注数据的情况下,极大提升了模型推理能力,尤其在数学、代码、自然语言推理等任务上。',
|
313
|
+
displayName: 'DeepSeek R1',
|
314
|
+
id: 'deepseek-r1',
|
315
|
+
pricing: {
|
316
|
+
currency: 'CNY',
|
317
|
+
input: 0,
|
318
|
+
output: 0,
|
319
|
+
},
|
320
|
+
releasedAt: '2025-01-27',
|
321
|
+
},
|
322
|
+
{
|
323
|
+
contextWindowTokens: 65_536,
|
324
|
+
description:
|
325
|
+
'DeepSeek-V3 为自研 MoE 模型,671B 参数,激活 37B,在 14.8T token 上进行了预训练,在长文本、代码、数学、百科、中文能力上表现优秀。',
|
326
|
+
displayName: 'DeepSeek V3',
|
327
|
+
id: 'deepseek-v3',
|
328
|
+
pricing: {
|
329
|
+
currency: 'CNY',
|
330
|
+
input: 0,
|
331
|
+
output: 0,
|
332
|
+
},
|
333
|
+
releasedAt: '2025-01-27',
|
334
|
+
},
|
257
335
|
],
|
258
336
|
checkModel: 'qwen-turbo-latest',
|
259
337
|
description:
|
@@ -0,0 +1,15 @@
|
|
1
|
+
import { isDeprecatedEdition } from '@/const/version';
|
2
|
+
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
3
|
+
import { useUserStore } from '@/store/user';
|
4
|
+
import { modelProviderSelectors } from '@/store/user/selectors';
|
5
|
+
|
6
|
+
export const useModelSupportReasoning = (model: string, provider: string) => {
|
7
|
+
const newValue = useAiInfraStore(aiModelSelectors.isModelSupportReasoning(model, provider));
|
8
|
+
|
9
|
+
// TODO: remove this in V2.0
|
10
|
+
const oldValue = useUserStore(modelProviderSelectors.isModelEnabledReasoning(model));
|
11
|
+
if (isDeprecatedEdition) return oldValue;
|
12
|
+
//
|
13
|
+
|
14
|
+
return newValue;
|
15
|
+
};
|
@@ -57,7 +57,7 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
|
|
57
57
|
const functionCallKeywords = [
|
58
58
|
'qwen/qwen2.5',
|
59
59
|
'thudm/glm-4',
|
60
|
-
'deepseek-ai/
|
60
|
+
'deepseek-ai/deepseek',
|
61
61
|
'internlm/internlm2_5',
|
62
62
|
'meta-llama/meta-llama-3.1',
|
63
63
|
'meta-llama/meta-llama-3.3',
|
@@ -65,6 +65,7 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
|
|
65
65
|
|
66
66
|
const visionKeywords = [
|
67
67
|
'opengvlab/internvl',
|
68
|
+
'qwen/qvq',
|
68
69
|
'qwen/qwen2-vl',
|
69
70
|
'teleai/telemm',
|
70
71
|
'deepseek-ai/deepseek-vl',
|
@@ -74,7 +75,7 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
|
|
74
75
|
|
75
76
|
return {
|
76
77
|
enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
|
77
|
-
functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
|
78
|
+
functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('deepseek-r1'),
|
78
79
|
id: model.id,
|
79
80
|
vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
|
80
81
|
};
|
@@ -231,6 +231,11 @@ export default {
|
|
231
231
|
title: '模型 ID',
|
232
232
|
},
|
233
233
|
modalTitle: '自定义模型配置',
|
234
|
+
reasoning: {
|
235
|
+
extra:
|
236
|
+
'此配置将仅开启模型深度思考的能力,具体效果完全取决于模型本身,请自行测试该模型是否具备可用的深度思考能力',
|
237
|
+
title: '支持深度思考',
|
238
|
+
},
|
234
239
|
tokens: {
|
235
240
|
extra: '设置模型支持的最大 Token 数',
|
236
241
|
title: '最大上下文窗口',
|
@@ -48,6 +48,12 @@ const isModelSupportVision = (id: string, provider: string) => (s: AIProviderSto
|
|
48
48
|
return model?.abilities?.vision;
|
49
49
|
};
|
50
50
|
|
51
|
+
const isModelSupportReasoning = (id: string, provider: string) => (s: AIProviderStoreState) => {
|
52
|
+
const model = getEnabledModelById(id, provider)(s);
|
53
|
+
|
54
|
+
return model?.abilities?.reasoning;
|
55
|
+
};
|
56
|
+
|
51
57
|
const isModelHasContextWindowToken =
|
52
58
|
(id: string, provider: string) => (s: AIProviderStoreState) => {
|
53
59
|
const model = getEnabledModelById(id, provider)(s);
|
@@ -71,6 +77,7 @@ export const aiModelSelectors = {
|
|
71
77
|
isModelEnabled,
|
72
78
|
isModelHasContextWindowToken,
|
73
79
|
isModelLoading,
|
80
|
+
isModelSupportReasoning,
|
74
81
|
isModelSupportToolUse,
|
75
82
|
isModelSupportVision,
|
76
83
|
modelContextWindowTokens,
|
@@ -122,6 +122,9 @@ const isModelEnabledFunctionCall = (id: string) => (s: UserStore) =>
|
|
122
122
|
const isModelEnabledVision = (id: string) => (s: UserStore) =>
|
123
123
|
getModelCardById(id)(s)?.vision || id.includes('vision');
|
124
124
|
|
125
|
+
const isModelEnabledReasoning = (id: string) => (s: UserStore) =>
|
126
|
+
getModelCardById(id)(s)?.reasoning || false;
|
127
|
+
|
125
128
|
const isModelEnabledFiles = (id: string) => (s: UserStore) => getModelCardById(id)(s)?.files;
|
126
129
|
|
127
130
|
const isModelEnabledUpload = (id: string) => (s: UserStore) =>
|
@@ -144,6 +147,7 @@ export const modelProviderSelectors = {
|
|
144
147
|
getModelCardsById,
|
145
148
|
isModelEnabledFiles,
|
146
149
|
isModelEnabledFunctionCall,
|
150
|
+
isModelEnabledReasoning,
|
147
151
|
isModelEnabledUpload,
|
148
152
|
isModelEnabledVision,
|
149
153
|
isModelHasMaxToken,
|
package/src/types/aiModel.ts
CHANGED
@@ -43,6 +43,7 @@ export interface ModelAbilities {
|
|
43
43
|
const AiModelAbilitiesSchema = z.object({
|
44
44
|
// files: z.boolean().optional(),
|
45
45
|
functionCall: z.boolean().optional(),
|
46
|
+
reasoning: z.boolean().optional(),
|
46
47
|
vision: z.boolean().optional(),
|
47
48
|
});
|
48
49
|
|
@@ -205,6 +206,10 @@ export interface AIRealtimeModelCard extends AIBaseModelCard {
|
|
205
206
|
* whether model supports function call
|
206
207
|
*/
|
207
208
|
functionCall?: boolean;
|
209
|
+
/**
|
210
|
+
* whether model supports reasoning
|
211
|
+
*/
|
212
|
+
reasoning?: boolean;
|
208
213
|
/**
|
209
214
|
* whether model supports vision
|
210
215
|
*/
|
package/src/types/llm.ts
CHANGED
@@ -42,6 +42,15 @@ export interface ChatModelCard {
|
|
42
42
|
legacy?: boolean;
|
43
43
|
maxOutput?: number;
|
44
44
|
pricing?: ChatModelPricing;
|
45
|
+
|
46
|
+
/**
|
47
|
+
* whether model supports reasoning
|
48
|
+
*/
|
49
|
+
reasoning?: boolean;
|
50
|
+
|
51
|
+
/**
|
52
|
+
* whether model is legacy (deprecated but not removed yet)
|
53
|
+
*/
|
45
54
|
releasedAt?: string;
|
46
55
|
|
47
56
|
/**
|