@lobehub/chat 1.97.12 → 1.97.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/chat.json +3 -0
- package/locales/ar/models.json +8 -8
- package/locales/bg-BG/chat.json +3 -0
- package/locales/bg-BG/models.json +6 -6
- package/locales/de-DE/chat.json +3 -0
- package/locales/de-DE/models.json +4 -4
- package/locales/en-US/chat.json +3 -0
- package/locales/en-US/models.json +4 -4
- package/locales/es-ES/chat.json +3 -0
- package/locales/es-ES/models.json +5 -5
- package/locales/fa-IR/chat.json +3 -0
- package/locales/fa-IR/models.json +6 -6
- package/locales/fr-FR/chat.json +3 -0
- package/locales/fr-FR/models.json +3 -3
- package/locales/it-IT/chat.json +3 -0
- package/locales/it-IT/models.json +3 -3
- package/locales/ja-JP/chat.json +3 -0
- package/locales/ja-JP/models.json +6 -6
- package/locales/ko-KR/chat.json +3 -0
- package/locales/ko-KR/models.json +7 -7
- package/locales/nl-NL/chat.json +3 -0
- package/locales/nl-NL/models.json +4 -4
- package/locales/pl-PL/chat.json +3 -0
- package/locales/pl-PL/models.json +6 -6
- package/locales/pt-BR/chat.json +3 -0
- package/locales/pt-BR/models.json +2 -20
- package/locales/ru-RU/chat.json +3 -0
- package/locales/ru-RU/models.json +5 -5
- package/locales/tr-TR/chat.json +3 -0
- package/locales/tr-TR/models.json +7 -7
- package/locales/vi-VN/chat.json +3 -0
- package/locales/vi-VN/models.json +4 -4
- package/locales/zh-CN/chat.json +3 -0
- package/locales/zh-CN/models.json +1 -1
- package/locales/zh-TW/chat.json +3 -0
- package/locales/zh-TW/models.json +1 -1
- package/package.json +2 -2
- package/src/app/[variants]/(main)/chat/@session/features/SessionListContent/Inbox/index.tsx +2 -15
- package/src/config/aiModels/google.ts +5 -40
- package/src/config/aiModels/moonshot.ts +25 -2
- package/src/config/aiModels/openai.ts +50 -41
- package/src/config/aiModels/volcengine.ts +58 -53
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
- package/src/features/ChatInput/ActionBar/Model/ThinkingSlider.tsx +57 -0
- package/src/libs/model-runtime/volcengine/index.ts +9 -5
- package/src/locales/default/chat.ts +3 -0
- package/src/services/chat.ts +4 -0
- package/src/store/chat/slices/message/selectors.ts +0 -6
- package/src/types/agent/chatConfig.ts +1 -0
- package/src/types/aiModel.ts +4 -0
@@ -2,6 +2,29 @@ import { AIChatModelCard } from '@/types/aiModel';
|
|
2
2
|
|
3
3
|
// https://platform.moonshot.cn/docs/pricing/chat
|
4
4
|
const moonshotChatModels: AIChatModelCard[] = [
|
5
|
+
{
|
6
|
+
abilities: {
|
7
|
+
functionCall: true,
|
8
|
+
search: true,
|
9
|
+
},
|
10
|
+
contextWindowTokens: 131_072,
|
11
|
+
description:
|
12
|
+
'kimi-k2 是一款具备超强代码和 Agent 能力的 MoE 架构基础模型,总参数 1T,激活参数 32B。在通用知识推理、编程、数学、Agent 等主要类别的基准性能测试中,K2 模型的性能超过其他主流开源模型。',
|
13
|
+
displayName: 'Kimi K2',
|
14
|
+
enabled: true,
|
15
|
+
id: 'kimi-k2-0711-preview',
|
16
|
+
pricing: {
|
17
|
+
cachedInput: 1,
|
18
|
+
currency: 'CNY',
|
19
|
+
input: 4,
|
20
|
+
output: 16,
|
21
|
+
},
|
22
|
+
releasedAt: '2025-07-11',
|
23
|
+
settings: {
|
24
|
+
searchImpl: 'params',
|
25
|
+
},
|
26
|
+
type: 'chat',
|
27
|
+
},
|
5
28
|
{
|
6
29
|
abilities: {
|
7
30
|
functionCall: true,
|
@@ -17,7 +40,7 @@ const moonshotChatModels: AIChatModelCard[] = [
|
|
17
40
|
pricing: {
|
18
41
|
cachedInput: 1,
|
19
42
|
currency: 'CNY',
|
20
|
-
input: 10,
|
43
|
+
input: 10, // 128k 上下文时
|
21
44
|
output: 30,
|
22
45
|
},
|
23
46
|
releasedAt: '2025-02-17',
|
@@ -56,7 +79,7 @@ const moonshotChatModels: AIChatModelCard[] = [
|
|
56
79
|
id: 'moonshot-v1-auto',
|
57
80
|
pricing: {
|
58
81
|
currency: 'CNY',
|
59
|
-
input: 10,
|
82
|
+
input: 10, // 128k 上下文时
|
60
83
|
output: 30,
|
61
84
|
},
|
62
85
|
settings: {
|
@@ -186,7 +186,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
186
186
|
description:
|
187
187
|
'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
|
188
188
|
displayName: 'o1-mini',
|
189
|
-
id: 'o1-mini',
|
189
|
+
id: 'o1-mini', // deprecated on 2025-10-27
|
190
190
|
maxOutput: 65_536,
|
191
191
|
pricing: {
|
192
192
|
cachedInput: 0.55,
|
@@ -490,7 +490,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
490
490
|
input: 2.5,
|
491
491
|
output: 10,
|
492
492
|
},
|
493
|
-
releasedAt: '2024-
|
493
|
+
releasedAt: '2024-12-17',
|
494
494
|
/*
|
495
495
|
settings: {
|
496
496
|
searchImpl: 'params',
|
@@ -626,23 +626,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
626
626
|
releasedAt: '2023-06-13',
|
627
627
|
type: 'chat',
|
628
628
|
},
|
629
|
-
{
|
630
|
-
abilities: {
|
631
|
-
functionCall: true,
|
632
|
-
},
|
633
|
-
contextWindowTokens: 32_768,
|
634
|
-
|
635
|
-
description:
|
636
|
-
'GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。',
|
637
|
-
displayName: 'GPT-4 32K',
|
638
|
-
id: 'gpt-4-32k', // deprecated on 2025-06-06
|
639
|
-
legacy: true,
|
640
|
-
pricing: {
|
641
|
-
input: 60,
|
642
|
-
output: 120,
|
643
|
-
},
|
644
|
-
type: 'chat',
|
645
|
-
},
|
646
629
|
{
|
647
630
|
abilities: {
|
648
631
|
functionCall: true,
|
@@ -713,6 +696,7 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
713
696
|
id: 'codex-mini-latest',
|
714
697
|
maxOutput: 100_000,
|
715
698
|
pricing: {
|
699
|
+
cachedInput: 0.375,
|
716
700
|
input: 1.5,
|
717
701
|
output: 6,
|
718
702
|
},
|
@@ -800,7 +784,8 @@ export const openaiTTSModels: AITTSModelCard[] = [
|
|
800
784
|
displayName: 'GPT-4o Mini TTS',
|
801
785
|
id: 'gpt-4o-mini-tts',
|
802
786
|
pricing: {
|
803
|
-
input:
|
787
|
+
input: 0.6,
|
788
|
+
output: 12,
|
804
789
|
},
|
805
790
|
type: 'tts',
|
806
791
|
},
|
@@ -809,7 +794,7 @@ export const openaiTTSModels: AITTSModelCard[] = [
|
|
809
794
|
// 语音识别模型
|
810
795
|
export const openaiSTTModels: AISTTModelCard[] = [
|
811
796
|
{
|
812
|
-
description: '
|
797
|
+
description: '通用语音识别模型,支持多语言语音识别、语音翻译和语言识别。',
|
813
798
|
displayName: 'Whisper',
|
814
799
|
id: 'whisper-1',
|
815
800
|
pricing: {
|
@@ -817,6 +802,30 @@ export const openaiSTTModels: AISTTModelCard[] = [
|
|
817
802
|
},
|
818
803
|
type: 'stt',
|
819
804
|
},
|
805
|
+
{
|
806
|
+
contextWindowTokens: 16_000,
|
807
|
+
description: 'GPT-4o Transcribe 是一种使用 GPT-4o 转录音频的语音转文本模型。与原始 Whisper 模型相比,它提高了单词错误率,并提高了语言识别和准确性。使用它来获得更准确的转录。',
|
808
|
+
displayName: 'GPT-4o Transcribe',
|
809
|
+
id: 'gpt-4o-transcribe',
|
810
|
+
maxOutput: 2000,
|
811
|
+
pricing: {
|
812
|
+
input: 6, // Audio
|
813
|
+
output: 10,
|
814
|
+
},
|
815
|
+
type: 'stt',
|
816
|
+
},
|
817
|
+
{
|
818
|
+
contextWindowTokens: 16_000,
|
819
|
+
description: 'GPT-4o Mini Transcribe 是一种使用 GPT-4o 转录音频的语音转文本模型。与原始 Whisper 模型相比,它提高了单词错误率,并提高了语言识别和准确性。使用它来获得更准确的转录。',
|
820
|
+
displayName: 'GPT-4o Mini Transcribe',
|
821
|
+
id: 'gpt-4o-mini-transcribe',
|
822
|
+
maxOutput: 2000,
|
823
|
+
pricing: {
|
824
|
+
input: 3, // Audio
|
825
|
+
output: 5,
|
826
|
+
},
|
827
|
+
type: 'stt',
|
828
|
+
},
|
820
829
|
];
|
821
830
|
|
822
831
|
// 图像生成模型
|
@@ -848,54 +857,54 @@ export const openaiImageModels: AIText2ImageModelCard[] = [
|
|
848
857
|
// GPT-4o 和 GPT-4o-mini 实时模型
|
849
858
|
export const openaiRealtimeModels: AIRealtimeModelCard[] = [
|
850
859
|
{
|
851
|
-
contextWindowTokens:
|
860
|
+
contextWindowTokens: 16_000,
|
852
861
|
description: 'GPT-4o 实时版本,支持音频和文本实时输入输出',
|
853
|
-
displayName: 'GPT-4o Realtime',
|
862
|
+
displayName: 'GPT-4o Realtime 241217',
|
854
863
|
id: 'gpt-4o-realtime-preview',
|
855
864
|
maxOutput: 4096,
|
856
865
|
pricing: {
|
857
|
-
audioInput:
|
858
|
-
audioOutput:
|
859
|
-
cachedAudioInput:
|
866
|
+
audioInput: 40,
|
867
|
+
audioOutput: 80,
|
868
|
+
cachedAudioInput: 2.5,
|
860
869
|
cachedInput: 2.5,
|
861
870
|
input: 5,
|
862
871
|
output: 20,
|
863
872
|
},
|
864
|
-
releasedAt: '2024-
|
873
|
+
releasedAt: '2024-12-17',
|
865
874
|
type: 'realtime',
|
866
875
|
},
|
867
876
|
{
|
868
|
-
contextWindowTokens:
|
877
|
+
contextWindowTokens: 32_000,
|
869
878
|
description: 'GPT-4o 实时版本,支持音频和文本实时输入输出',
|
870
|
-
displayName: 'GPT-4o Realtime
|
871
|
-
id: 'gpt-4o-realtime-preview-
|
879
|
+
displayName: 'GPT-4o Realtime 250603',
|
880
|
+
id: 'gpt-4o-realtime-preview-2025-06-03',
|
872
881
|
maxOutput: 4096,
|
873
882
|
pricing: {
|
874
|
-
audioInput:
|
875
|
-
audioOutput:
|
876
|
-
cachedAudioInput:
|
883
|
+
audioInput: 40,
|
884
|
+
audioOutput: 80,
|
885
|
+
cachedAudioInput: 2.5,
|
877
886
|
cachedInput: 2.5,
|
878
887
|
input: 5,
|
879
888
|
output: 20,
|
880
889
|
},
|
881
|
-
releasedAt: '
|
890
|
+
releasedAt: '2025-06-03',
|
882
891
|
type: 'realtime',
|
883
892
|
},
|
884
893
|
{
|
885
|
-
contextWindowTokens:
|
894
|
+
contextWindowTokens: 16_000,
|
886
895
|
description: 'GPT-4o 实时版本,支持音频和文本实时输入输出',
|
887
|
-
displayName: 'GPT-4o Realtime
|
888
|
-
id: 'gpt-4o-realtime-preview-2024-
|
896
|
+
displayName: 'GPT-4o Realtime 241001',
|
897
|
+
id: 'gpt-4o-realtime-preview-2024-10-01', // deprecated on 2025-09-10
|
889
898
|
maxOutput: 4096,
|
890
899
|
pricing: {
|
891
|
-
audioInput:
|
892
|
-
audioOutput:
|
893
|
-
cachedAudioInput:
|
900
|
+
audioInput: 100,
|
901
|
+
audioOutput: 200,
|
902
|
+
cachedAudioInput: 20,
|
894
903
|
cachedInput: 2.5,
|
895
904
|
input: 5,
|
896
905
|
output: 20,
|
897
906
|
},
|
898
|
-
releasedAt: '2024-
|
907
|
+
releasedAt: '2024-10-01',
|
899
908
|
type: 'realtime',
|
900
909
|
},
|
901
910
|
{
|
@@ -22,8 +22,8 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
22
22
|
maxOutput: 16_000,
|
23
23
|
pricing: {
|
24
24
|
currency: 'CNY',
|
25
|
-
input: 2
|
26
|
-
output:
|
25
|
+
input: 1.2, // 输入长度 (32, 128] 千 token
|
26
|
+
output: 16,
|
27
27
|
},
|
28
28
|
type: 'chat',
|
29
29
|
},
|
@@ -45,8 +45,11 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
45
45
|
maxOutput: 16_000,
|
46
46
|
pricing: {
|
47
47
|
currency: 'CNY',
|
48
|
-
input: 2
|
49
|
-
output:
|
48
|
+
input: 1.2, // 输入长度 (32, 128] 千 token
|
49
|
+
output: 16,
|
50
|
+
},
|
51
|
+
settings: {
|
52
|
+
extendParams: ['thinking'],
|
50
53
|
},
|
51
54
|
type: 'chat',
|
52
55
|
},
|
@@ -68,8 +71,36 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
68
71
|
maxOutput: 16_000,
|
69
72
|
pricing: {
|
70
73
|
currency: 'CNY',
|
71
|
-
input: 0.
|
72
|
-
output:
|
74
|
+
input: 0.3, // 输入长度 (32, 128] 千 token
|
75
|
+
output: 3,
|
76
|
+
},
|
77
|
+
settings: {
|
78
|
+
extendParams: ['enableReasoning'],
|
79
|
+
},
|
80
|
+
type: 'chat',
|
81
|
+
},
|
82
|
+
{
|
83
|
+
abilities: {
|
84
|
+
functionCall: true,
|
85
|
+
reasoning: true,
|
86
|
+
vision: true,
|
87
|
+
},
|
88
|
+
config: {
|
89
|
+
deploymentName: 'doubao-1-5-ui-tars-250428',
|
90
|
+
},
|
91
|
+
contextWindowTokens: 131_072,
|
92
|
+
description:
|
93
|
+
'Doubao-1.5-UI-TARS 是一款原生面向图形界面交互(GUI)的Agent模型。通过感知、推理和行动等类人的能力,与 GUI 进行无缝交互。',
|
94
|
+
displayName: 'Doubao 1.5 UI TARS',
|
95
|
+
id: 'doubao-1.5-ui-tars',
|
96
|
+
maxOutput: 16_000,
|
97
|
+
pricing: {
|
98
|
+
currency: 'CNY',
|
99
|
+
input: 3.5,
|
100
|
+
output: 12,
|
101
|
+
},
|
102
|
+
settings: {
|
103
|
+
extendParams: ['thinking'],
|
73
104
|
},
|
74
105
|
type: 'chat',
|
75
106
|
},
|
@@ -86,8 +117,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
86
117
|
description:
|
87
118
|
'全新视觉深度思考模型,具备更强的通用多模态理解和推理能力,在 59 个公开评测基准中的 37 个上取得 SOTA 表现。',
|
88
119
|
displayName: 'Doubao 1.5 Thinking Vision Pro',
|
89
|
-
|
90
|
-
id: 'Doubao-1.5-thinking-vision-pro',
|
120
|
+
id: 'doubao-1.5-thinking-vision-pro',
|
91
121
|
maxOutput: 16_000,
|
92
122
|
pricing: {
|
93
123
|
currency: 'CNY',
|
@@ -95,7 +125,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
95
125
|
output: 9,
|
96
126
|
},
|
97
127
|
settings: {
|
98
|
-
extendParams: ['
|
128
|
+
extendParams: ['thinking'],
|
99
129
|
},
|
100
130
|
type: 'chat',
|
101
131
|
},
|
@@ -111,7 +141,6 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
111
141
|
description:
|
112
142
|
'Doubao-1.5全新深度思考模型,在数学、编程、科学推理等专业领域及创意写作等通用任务中表现突出,在AIME 2024、Codeforces、GPQA等多项权威基准上达到或接近业界第一梯队水平。支持128k上下文窗口,16k输出。',
|
113
143
|
displayName: 'Doubao 1.5 Thinking Pro',
|
114
|
-
enabled: true,
|
115
144
|
id: 'doubao-1.5-thinking-pro',
|
116
145
|
maxOutput: 16_000,
|
117
146
|
pricing: {
|
@@ -128,19 +157,22 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
128
157
|
vision: true,
|
129
158
|
},
|
130
159
|
config: {
|
131
|
-
deploymentName: 'doubao-1-5-thinking-pro-m-
|
160
|
+
deploymentName: 'doubao-1-5-thinking-pro-m-250428',
|
132
161
|
},
|
133
162
|
contextWindowTokens: 131_072,
|
134
163
|
description:
|
135
164
|
'Doubao-1.5全新深度思考模型 (m 版本自带原生多模态深度推理能力),在数学、编程、科学推理等专业领域及创意写作等通用任务中表现突出,在AIME 2024、Codeforces、GPQA等多项权威基准上达到或接近业界第一梯队水平。支持128k上下文窗口,16k输出。',
|
136
165
|
displayName: 'Doubao 1.5 Thinking Pro M',
|
137
|
-
id: '
|
166
|
+
id: 'doubao-1.5-thinking-pro-m',
|
138
167
|
maxOutput: 16_000,
|
139
168
|
pricing: {
|
140
169
|
currency: 'CNY',
|
141
170
|
input: 4,
|
142
171
|
output: 16,
|
143
172
|
},
|
173
|
+
settings: {
|
174
|
+
extendParams: ['enableReasoning'],
|
175
|
+
},
|
144
176
|
type: 'chat',
|
145
177
|
},
|
146
178
|
{
|
@@ -233,13 +265,12 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
233
265
|
config: {
|
234
266
|
deploymentName: 'doubao-1-5-pro-32k-250115',
|
235
267
|
},
|
236
|
-
contextWindowTokens:
|
268
|
+
contextWindowTokens: 128_000,
|
237
269
|
description:
|
238
270
|
'Doubao-1.5-pro 全新一代主力模型,性能全面升级,在知识、代码、推理、等方面表现卓越。',
|
239
271
|
displayName: 'Doubao 1.5 Pro 32k',
|
240
|
-
enabled: true,
|
241
272
|
id: 'doubao-1.5-pro-32k',
|
242
|
-
maxOutput:
|
273
|
+
maxOutput: 16_384,
|
243
274
|
pricing: {
|
244
275
|
currency: 'CNY',
|
245
276
|
input: 0.8,
|
@@ -274,7 +305,6 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
274
305
|
contextWindowTokens: 32_768,
|
275
306
|
description: 'Doubao-1.5-lite 全新一代轻量版模型,极致响应速度,效果与时延均达到全球一流水平。',
|
276
307
|
displayName: 'Doubao 1.5 Lite 32k',
|
277
|
-
enabled: true,
|
278
308
|
id: 'doubao-1.5-lite-32k',
|
279
309
|
maxOutput: 12_288,
|
280
310
|
pricing: {
|
@@ -296,7 +326,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
296
326
|
description:
|
297
327
|
'Doubao-1.5-vision-pro 全新升级的多模态大模型,支持任意分辨率和极端长宽比图像识别,增强视觉推理、文档识别、细节信息理解和指令遵循能力。',
|
298
328
|
displayName: 'Doubao 1.5 Vision Pro 32k',
|
299
|
-
id: '
|
329
|
+
id: 'doubao-1.5-vision-pro-32k',
|
300
330
|
maxOutput: 12_288,
|
301
331
|
pricing: {
|
302
332
|
currency: 'CNY',
|
@@ -318,7 +348,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
318
348
|
description:
|
319
349
|
'Doubao-1.5-vision-pro 全新升级的多模态大模型,支持任意分辨率和极端长宽比图像识别,增强视觉推理、文档识别、细节信息理解和指令遵循能力。',
|
320
350
|
displayName: 'Doubao 1.5 Vision Pro',
|
321
|
-
id: '
|
351
|
+
id: 'doubao-1.5-vision-pro',
|
322
352
|
maxOutput: 16_384,
|
323
353
|
pricing: {
|
324
354
|
currency: 'CNY',
|
@@ -361,7 +391,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
361
391
|
description:
|
362
392
|
'Doubao-vision 模型是豆包推出的多模态大模型,具备强大的图片理解与推理能力,以及精准的指令理解能力。模型在图像文本信息抽取、基于图像的推理任务上有展现出了强大的性能,能够应用于更复杂、更广泛的视觉问答任务。',
|
363
393
|
displayName: 'Doubao Vision Pro 32k',
|
364
|
-
id: '
|
394
|
+
id: 'doubao-vision-pro-32k',
|
365
395
|
maxOutput: 4096,
|
366
396
|
pricing: {
|
367
397
|
currency: 'CNY',
|
@@ -382,7 +412,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
382
412
|
description:
|
383
413
|
'Doubao-vision 模型是豆包推出的多模态大模型,具备强大的图片理解与推理能力,以及精准的指令理解能力。模型在图像文本信息抽取、基于图像的推理任务上有展现出了强大的性能,能够应用于更复杂、更广泛的视觉问答任务。',
|
384
414
|
displayName: 'Doubao Vision Lite 32k',
|
385
|
-
id: '
|
415
|
+
id: 'doubao-vision-lite-32k',
|
386
416
|
maxOutput: 4096,
|
387
417
|
pricing: {
|
388
418
|
currency: 'CNY',
|
@@ -393,11 +423,14 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
393
423
|
type: 'chat',
|
394
424
|
},
|
395
425
|
{
|
426
|
+
config: {
|
427
|
+
deploymentName: 'doubao-lite-4k-character-240828',
|
428
|
+
},
|
396
429
|
contextWindowTokens: 4096,
|
397
430
|
description:
|
398
431
|
'拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 4k 上下文窗口的推理和精调。',
|
399
432
|
displayName: 'Doubao Lite 4k',
|
400
|
-
id: '
|
433
|
+
id: 'doubao-lite-4k',
|
401
434
|
maxOutput: 4096,
|
402
435
|
pricing: {
|
403
436
|
currency: 'CNY',
|
@@ -414,7 +447,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
414
447
|
description:
|
415
448
|
'拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 32k 上下文窗口的推理和精调。',
|
416
449
|
displayName: 'Doubao Lite 32k',
|
417
|
-
id: '
|
450
|
+
id: 'doubao-lite-32k',
|
418
451
|
maxOutput: 4096,
|
419
452
|
pricing: {
|
420
453
|
currency: 'CNY',
|
@@ -431,7 +464,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
431
464
|
description:
|
432
465
|
'拥有极致的响应速度,更好的性价比,为客户不同场景提供更灵活的选择。支持 128k 上下文窗口的推理和精调。',
|
433
466
|
displayName: 'Doubao Lite 128k',
|
434
|
-
id: '
|
467
|
+
id: 'doubao-lite-128k',
|
435
468
|
maxOutput: 4096,
|
436
469
|
pricing: {
|
437
470
|
currency: 'CNY',
|
@@ -440,20 +473,6 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
440
473
|
},
|
441
474
|
type: 'chat',
|
442
475
|
},
|
443
|
-
{
|
444
|
-
contextWindowTokens: 4096,
|
445
|
-
description:
|
446
|
-
'效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 4k 上下文窗口的推理和精调。',
|
447
|
-
displayName: 'Doubao Pro 4k',
|
448
|
-
id: 'Doubao-pro-4k',
|
449
|
-
maxOutput: 4096,
|
450
|
-
pricing: {
|
451
|
-
currency: 'CNY',
|
452
|
-
input: 0.8,
|
453
|
-
output: 2,
|
454
|
-
},
|
455
|
-
type: 'chat',
|
456
|
-
},
|
457
476
|
{
|
458
477
|
config: {
|
459
478
|
deploymentName: 'doubao-pro-32k-241215',
|
@@ -462,7 +481,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
462
481
|
description:
|
463
482
|
'效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 32k 上下文窗口的推理和精调。',
|
464
483
|
displayName: 'Doubao Pro 32k',
|
465
|
-
id: '
|
484
|
+
id: 'doubao-pro-32k',
|
466
485
|
maxOutput: 4096,
|
467
486
|
pricing: {
|
468
487
|
currency: 'CNY',
|
@@ -471,20 +490,6 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
471
490
|
},
|
472
491
|
type: 'chat',
|
473
492
|
},
|
474
|
-
{
|
475
|
-
contextWindowTokens: 128_000,
|
476
|
-
description:
|
477
|
-
'效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 128k 上下文窗口的推理和精调。',
|
478
|
-
displayName: 'Doubao Pro 128k',
|
479
|
-
id: 'Doubao-pro-128k',
|
480
|
-
maxOutput: 4096,
|
481
|
-
pricing: {
|
482
|
-
currency: 'CNY',
|
483
|
-
input: 5,
|
484
|
-
output: 9,
|
485
|
-
},
|
486
|
-
type: 'chat',
|
487
|
-
},
|
488
493
|
{
|
489
494
|
config: {
|
490
495
|
deploymentName: 'doubao-pro-256k-241115',
|
@@ -493,7 +498,7 @@ const doubaoChatModels: AIChatModelCard[] = [
|
|
493
498
|
description:
|
494
499
|
'效果最好的主力模型,适合处理复杂任务,在参考问答、总结摘要、创作、文本分类、角色扮演等场景都有很好的效果。支持 256k 上下文窗口的推理和精调。',
|
495
500
|
displayName: 'Doubao Pro 256k',
|
496
|
-
id: '
|
501
|
+
id: 'doubao-pro-256k',
|
497
502
|
maxOutput: 4096,
|
498
503
|
pricing: {
|
499
504
|
currency: 'CNY',
|
@@ -14,6 +14,7 @@ import ContextCachingSwitch from './ContextCachingSwitch';
|
|
14
14
|
import ReasoningEffortSlider from './ReasoningEffortSlider';
|
15
15
|
import ReasoningTokenSlider from './ReasoningTokenSlider';
|
16
16
|
import ThinkingBudgetSlider from './ThinkingBudgetSlider';
|
17
|
+
import ThinkingSlider from './ThinkingSlider';
|
17
18
|
|
18
19
|
const ControlsForm = memo(() => {
|
19
20
|
const { t } = useTranslation('chat');
|
@@ -105,6 +106,16 @@ const ControlsForm = memo(() => {
|
|
105
106
|
},
|
106
107
|
tag: 'thinkingBudget',
|
107
108
|
},
|
109
|
+
{
|
110
|
+
children: <ThinkingSlider />,
|
111
|
+
label: t('extendParams.thinking.title'),
|
112
|
+
layout: 'horizontal',
|
113
|
+
minWidth: undefined,
|
114
|
+
name: 'thinking',
|
115
|
+
style: {
|
116
|
+
paddingBottom: 0,
|
117
|
+
},
|
118
|
+
},
|
108
119
|
].filter(Boolean) as FormItemProps[];
|
109
120
|
|
110
121
|
return (
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import { Slider } from 'antd';
|
2
|
+
import { memo, useCallback } from 'react';
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
4
|
+
|
5
|
+
import { useAgentStore } from '@/store/agent';
|
6
|
+
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
7
|
+
|
8
|
+
const ThinkingSlider = memo(() => {
|
9
|
+
const [config, updateAgentChatConfig] = useAgentStore((s) => [
|
10
|
+
agentChatConfigSelectors.currentChatConfig(s),
|
11
|
+
s.updateAgentChatConfig,
|
12
|
+
]);
|
13
|
+
|
14
|
+
const thinking = config.thinking || 'auto'; // Default to 'auto' if not set
|
15
|
+
|
16
|
+
const marks = {
|
17
|
+
0: 'OFF',
|
18
|
+
1: 'Auto',
|
19
|
+
2: 'ON',
|
20
|
+
};
|
21
|
+
|
22
|
+
const thinkingValues = ['disabled', 'auto', 'enabled'];
|
23
|
+
const indexValue = thinkingValues.indexOf(thinking);
|
24
|
+
const currentValue = indexValue === -1 ? 1 : indexValue;
|
25
|
+
|
26
|
+
const updateThinking = useCallback(
|
27
|
+
(value: number) => {
|
28
|
+
const thinkingMode = thinkingValues[value] as 'disabled' | 'auto' | 'enabled';
|
29
|
+
updateAgentChatConfig({ thinking: thinkingMode });
|
30
|
+
},
|
31
|
+
[updateAgentChatConfig],
|
32
|
+
);
|
33
|
+
|
34
|
+
return (
|
35
|
+
<Flexbox
|
36
|
+
align={'center'}
|
37
|
+
gap={12}
|
38
|
+
horizontal
|
39
|
+
paddingInline={'0 20px'}
|
40
|
+
style={{ minWidth: 200, width: '100%' }}
|
41
|
+
>
|
42
|
+
<Flexbox flex={1}>
|
43
|
+
<Slider
|
44
|
+
marks={marks}
|
45
|
+
max={2}
|
46
|
+
min={0}
|
47
|
+
onChange={updateThinking}
|
48
|
+
step={1}
|
49
|
+
tooltip={{ open: false }}
|
50
|
+
value={currentValue}
|
51
|
+
/>
|
52
|
+
</Flexbox>
|
53
|
+
</Flexbox>
|
54
|
+
);
|
55
|
+
});
|
56
|
+
|
57
|
+
export default ThinkingSlider;
|
@@ -2,6 +2,13 @@ import { ModelProvider } from '../types';
|
|
2
2
|
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
3
3
|
import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
|
4
4
|
|
5
|
+
const THINKING_MODELS = [
|
6
|
+
'thinking-vision-pro',
|
7
|
+
'thinking-pro-m',
|
8
|
+
'doubao-seed-1-6',
|
9
|
+
'doubao-1-5-ui-tars'
|
10
|
+
];
|
11
|
+
|
5
12
|
export interface VolcengineModelCard {
|
6
13
|
id: string;
|
7
14
|
}
|
@@ -15,12 +22,9 @@ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
|
|
15
22
|
return {
|
16
23
|
...rest,
|
17
24
|
model,
|
18
|
-
...(
|
25
|
+
...(THINKING_MODELS.some((keyword) => model.toLowerCase().includes(keyword))
|
19
26
|
? {
|
20
|
-
thinking:
|
21
|
-
thinking !== undefined && thinking.type === 'enabled'
|
22
|
-
? { type: 'enabled' }
|
23
|
-
: { type: 'disabled' },
|
27
|
+
thinking: { type: thinking?.type }
|
24
28
|
}
|
25
29
|
: {}),
|
26
30
|
} as any;
|
package/src/services/chat.ts
CHANGED
@@ -264,6 +264,10 @@ class ChatService {
|
|
264
264
|
extendParams.reasoning_effort = chatConfig.reasoningEffort;
|
265
265
|
}
|
266
266
|
|
267
|
+
if (modelExtendParams!.includes('thinking') && chatConfig.thinking) {
|
268
|
+
extendParams.thinking = { type: chatConfig.thinking };
|
269
|
+
}
|
270
|
+
|
267
271
|
if (
|
268
272
|
modelExtendParams!.includes('thinkingBudget') &&
|
269
273
|
chatConfig.thinkingBudget !== undefined
|
@@ -201,11 +201,6 @@ const isSendButtonDisabledByMessage = (s: ChatStoreState) =>
|
|
201
201
|
// 4. when the message is in RAG flow
|
202
202
|
isInRAGFlow(s);
|
203
203
|
|
204
|
-
const inboxActiveTopicMessages = (state: ChatStoreState) => {
|
205
|
-
const activeTopicId = state.activeTopicId;
|
206
|
-
return state.messagesMap[messageMapKey(INBOX_SESSION_ID, activeTopicId)] || [];
|
207
|
-
};
|
208
|
-
|
209
204
|
export const chatSelectors = {
|
210
205
|
activeBaseChats,
|
211
206
|
activeBaseChatsWithoutTool,
|
@@ -218,7 +213,6 @@ export const chatSelectors = {
|
|
218
213
|
getMessageById,
|
219
214
|
getMessageByToolCallId,
|
220
215
|
getTraceIdByMessageId,
|
221
|
-
inboxActiveTopicMessages,
|
222
216
|
isAIGenerating,
|
223
217
|
isCreatingMessage,
|
224
218
|
isCurrentChatLoaded,
|
package/src/types/aiModel.ts
CHANGED
@@ -121,6 +121,7 @@ export interface AIBaseModelCard {
|
|
121
121
|
* whether model is legacy (deprecated but not removed yet)
|
122
122
|
*/
|
123
123
|
legacy?: boolean;
|
124
|
+
maxOutput?: number;
|
124
125
|
/**
|
125
126
|
* who create this model
|
126
127
|
*/
|
@@ -148,6 +149,7 @@ export type ExtendParamsType =
|
|
148
149
|
| 'enableReasoning'
|
149
150
|
| 'disableContextCaching'
|
150
151
|
| 'reasoningEffort'
|
152
|
+
| 'thinking'
|
151
153
|
| 'thinkingBudget';
|
152
154
|
|
153
155
|
export interface AiModelSettings {
|
@@ -207,6 +209,7 @@ export interface AITTSModelCard extends AIBaseModelCard {
|
|
207
209
|
* the input pricing, e.g. $1 / 1M tokens
|
208
210
|
*/
|
209
211
|
input?: number;
|
212
|
+
output?: number;
|
210
213
|
};
|
211
214
|
type: 'tts';
|
212
215
|
}
|
@@ -222,6 +225,7 @@ export interface AISTTModelCard extends AIBaseModelCard {
|
|
222
225
|
* the input pricing, e.g. $1 / 1M tokens
|
223
226
|
*/
|
224
227
|
input?: number;
|
228
|
+
output?: number;
|
225
229
|
};
|
226
230
|
type: 'stt';
|
227
231
|
}
|