@lobehub/chat 1.80.0 → 1.80.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/changelog/v1.json +21 -0
  3. package/docs/development/basic/feature-development.mdx +370 -619
  4. package/docs/development/basic/feature-development.zh-CN.mdx +368 -611
  5. package/package.json +1 -1
  6. package/src/app/[variants]/oauth/consent/[uid]/Client.tsx +36 -23
  7. package/src/app/[variants]/oauth/consent/[uid]/page.tsx +2 -0
  8. package/src/config/aiModels/azure.ts +79 -1
  9. package/src/config/aiModels/azureai.ts +181 -0
  10. package/src/config/aiModels/google.ts +36 -2
  11. package/src/config/aiModels/groq.ts +31 -3
  12. package/src/config/aiModels/hunyuan.ts +54 -18
  13. package/src/config/aiModels/moonshot.ts +17 -17
  14. package/src/config/aiModels/novita.ts +25 -30
  15. package/src/config/aiModels/siliconcloud.ts +80 -2
  16. package/src/config/aiModels/stepfun.ts +40 -31
  17. package/src/config/aiModels/tencentcloud.ts +7 -6
  18. package/src/config/aiModels/volcengine.ts +1 -0
  19. package/src/config/aiModels/zhipu.ts +91 -27
  20. package/src/const/settings/knowledge.ts +2 -2
  21. package/src/features/ChatInput/ActionBar/Upload/ClientMode.tsx +7 -6
  22. package/src/hooks/useModelSupportFiles.ts +15 -0
  23. package/src/libs/agent-runtime/stepfun/index.ts +7 -1
  24. package/src/libs/agent-runtime/zhipu/index.ts +17 -10
  25. package/src/libs/oidc-provider/config.ts +0 -3
  26. package/src/libs/trpc/edge/index.ts +0 -4
  27. package/src/libs/trpc/lambda/context.ts +90 -6
  28. package/src/libs/trpc/lambda/index.ts +2 -1
  29. package/src/libs/trpc/lambda/middleware/oidcAuth.ts +14 -0
  30. package/src/libs/trpc/middleware/userAuth.ts +2 -4
  31. package/src/server/services/oidc/index.ts +71 -0
  32. package/src/store/aiInfra/slices/aiModel/selectors.ts +7 -0
  33. package/src/utils/parseModels.test.ts +19 -3
  34. package/src/utils/server/__tests__/auth.test.ts +45 -1
  35. package/src/utils/server/auth.ts +26 -2
  36. package/docs/development/basic/feature-development-new.mdx +0 -465
  37. package/docs/development/basic/feature-development-new.zh-CN.mdx +0 -465
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.80.0",
3
+ "version": "1.80.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -15,6 +15,7 @@ interface ClientProps {
15
15
  clientId: string;
16
16
  clientMetadata: {
17
17
  clientName?: string;
18
+ isFirstParty?: boolean;
18
19
  logo?: string;
19
20
  };
20
21
 
@@ -128,29 +129,41 @@ const ConsentClient = memo<ClientProps>(
128
129
  return (
129
130
  <Center className={styles.container} gap={16}>
130
131
  <Flexbox gap={40}>
131
- <Flexbox align={'center'} gap={12} horizontal justify={'center'}>
132
- <div className={styles.icon}>
133
- {clientMetadata?.logo ? (
134
- <Image
135
- alt={clientDisplayName}
136
- height={56}
137
- src={clientMetadata?.logo}
138
- unoptimized
139
- width={56}
140
- />
141
- ) : (
142
- <Icon icon={ServerIcon} />
143
- )}
144
- </div>
145
- <div className={styles.connectorLine} />
146
- <Center className={styles.connector}>
147
- <Icon icon={Link2Icon} style={{ color: theme.colorTextSecondary, fontSize: 20 }} />
148
- </Center>
149
- <div className={styles.connectorLine} />
150
- <div className={styles.lobeIcon}>
151
- <ProductLogo height={48} style={{ objectFit: 'cover' }} width={48} />
152
- </div>
153
- </Flexbox>
132
+ {clientMetadata.isFirstParty ? (
133
+ <Flexbox align={'center'} gap={12} horizontal justify={'center'}>
134
+ <Image
135
+ alt={clientDisplayName}
136
+ height={64}
137
+ src={clientMetadata.logo!}
138
+ unoptimized
139
+ width={64}
140
+ />
141
+ </Flexbox>
142
+ ) : (
143
+ <Flexbox align={'center'} gap={12} horizontal justify={'center'}>
144
+ <div className={styles.icon}>
145
+ {clientMetadata?.logo ? (
146
+ <Image
147
+ alt={clientDisplayName}
148
+ height={56}
149
+ src={clientMetadata?.logo}
150
+ unoptimized
151
+ width={56}
152
+ />
153
+ ) : (
154
+ <Icon icon={ServerIcon} />
155
+ )}
156
+ </div>
157
+ <div className={styles.connectorLine} />
158
+ <Center className={styles.connector}>
159
+ <Icon icon={Link2Icon} style={{ color: theme.colorTextSecondary, fontSize: 20 }} />
160
+ </Center>
161
+ <div className={styles.connectorLine} />
162
+ <div className={styles.lobeIcon}>
163
+ <ProductLogo height={48} style={{ objectFit: 'cover' }} width={48} />
164
+ </div>
165
+ </Flexbox>
166
+ )}
154
167
 
155
168
  <Title className={styles.title} level={3}>
156
169
  {t('consent.title', { clientName: clientDisplayName })}
@@ -1,6 +1,7 @@
1
1
  import { notFound } from 'next/navigation';
2
2
 
3
3
  import { oidcEnv } from '@/envs/oidc';
4
+ import { defaultClients } from '@/libs/oidc-provider/config';
4
5
  import { OIDCService } from '@/server/services/oidc';
5
6
 
6
7
  import ConsentClient from './Client';
@@ -42,6 +43,7 @@ const InteractionPage = async (props: { params: Promise<{ uid: string }> }) => {
42
43
  clientId={clientId}
43
44
  clientMetadata={{
44
45
  clientName: clientDetail?.client_name,
46
+ isFirstParty: defaultClients.map((c) => c.client_id).includes(clientId),
45
47
  logo: clientDetail?.logo_uri,
46
48
  }}
47
49
  redirectUri={details.params.redirect_uri as string}
@@ -1,6 +1,75 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const azureChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ vision: true,
8
+ },
9
+ config: {
10
+ deploymentName: 'gpt-4.1',
11
+ },
12
+ contextWindowTokens: 1_047_576,
13
+ description:
14
+ 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
15
+ displayName: 'GPT-4.1',
16
+ enabled: true,
17
+ id: 'gpt-4.1',
18
+ maxOutput: 32_768,
19
+ pricing: {
20
+ cachedInput: 0.5,
21
+ input: 2,
22
+ output: 8,
23
+ },
24
+ releasedAt: '2025-04-14',
25
+ type: 'chat',
26
+ },
27
+ {
28
+ abilities: {
29
+ functionCall: true,
30
+ vision: true,
31
+ },
32
+ config: {
33
+ deploymentName: 'gpt-4.1-mini',
34
+ },
35
+ contextWindowTokens: 1_047_576,
36
+ description:
37
+ 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
38
+ displayName: 'GPT-4.1 mini',
39
+ enabled: true,
40
+ id: 'gpt-4.1-mini',
41
+ maxOutput: 32_768,
42
+ pricing: {
43
+ cachedInput: 0.1,
44
+ input: 0.4,
45
+ output: 1.6,
46
+ },
47
+ releasedAt: '2025-04-14',
48
+ type: 'chat',
49
+ },
50
+ {
51
+ abilities: {
52
+ functionCall: true,
53
+ vision: true,
54
+ },
55
+ config: {
56
+ deploymentName: 'gpt-4.1-nano',
57
+ },
58
+ contextWindowTokens: 1_047_576,
59
+ description:
60
+ 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
61
+ displayName: 'GPT-4.1 nano',
62
+ enabled: true,
63
+ id: 'gpt-4.1-nano',
64
+ maxOutput: 32_768,
65
+ pricing: {
66
+ cachedInput: 0.025,
67
+ input: 0.1,
68
+ output: 0.4,
69
+ },
70
+ releasedAt: '2025-04-14',
71
+ type: 'chat',
72
+ },
4
73
  {
5
74
  abilities: {
6
75
  functionCall: true,
@@ -16,6 +85,7 @@ const azureChatModels: AIChatModelCard[] = [
16
85
  id: 'o3-mini',
17
86
  maxOutput: 100_000,
18
87
  pricing: {
88
+ cachedInput: 0.55,
19
89
  input: 1.1,
20
90
  output: 4.4,
21
91
  },
@@ -37,6 +107,7 @@ const azureChatModels: AIChatModelCard[] = [
37
107
  id: 'o1-mini',
38
108
  maxOutput: 65_536,
39
109
  pricing: {
110
+ cachedInput: 0.55,
40
111
  input: 1.1,
41
112
  output: 4.4,
42
113
  },
@@ -58,6 +129,7 @@ const azureChatModels: AIChatModelCard[] = [
58
129
  id: 'o1',
59
130
  maxOutput: 100_000,
60
131
  pricing: {
132
+ cachedInput: 7.5,
61
133
  input: 15,
62
134
  output: 60,
63
135
  },
@@ -98,14 +170,15 @@ const azureChatModels: AIChatModelCard[] = [
98
170
  displayName: 'GPT-4o',
99
171
  enabled: true,
100
172
  id: 'gpt-4o',
173
+ maxOutput: 4096,
101
174
  pricing: {
175
+ cachedInput: 1.25,
102
176
  input: 2.5,
103
177
  output: 10,
104
178
  },
105
179
  releasedAt: '2024-05-13',
106
180
  type: 'chat',
107
181
  },
108
-
109
182
  {
110
183
  abilities: {
111
184
  functionCall: true,
@@ -135,6 +208,11 @@ const azureChatModels: AIChatModelCard[] = [
135
208
  enabled: true,
136
209
  id: 'gpt-4o-mini',
137
210
  maxOutput: 4096,
211
+ pricing: {
212
+ cachedInput: 0.075,
213
+ input: 0.15,
214
+ output: 0.6,
215
+ },
138
216
  type: 'chat',
139
217
  },
140
218
  ];
@@ -8,7 +8,188 @@ const azureChatModels: AIChatModelCard[] = [
8
8
  contextWindowTokens: 128_000,
9
9
  displayName: 'DeepSeek R1',
10
10
  id: 'DeepSeek-R1',
11
+ pricing: {
12
+ input: 1.35,
13
+ output: 5.4,
14
+ },
15
+ type: 'chat',
16
+ },
17
+ {
18
+ abilities: {
19
+ functionCall: true,
20
+ },
21
+ contextWindowTokens: 128_000,
22
+ displayName: 'DeepSeek V3',
23
+ id: 'DeepSeek-V3',
24
+ pricing: {
25
+ input: 1.14,
26
+ output: 4.56,
27
+ },
28
+ type: 'chat',
29
+ },
30
+ {
31
+ abilities: {
32
+ functionCall: true,
33
+ vision: true,
34
+ },
35
+ contextWindowTokens: 1_047_576,
36
+ description:
37
+ 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
38
+ displayName: 'GPT-4.1',
39
+ enabled: true,
40
+ id: 'gpt-4.1',
41
+ maxOutput: 32_768,
42
+ pricing: {
43
+ cachedInput: 0.5,
44
+ input: 2,
45
+ output: 8,
46
+ },
47
+ releasedAt: '2025-04-14',
48
+ type: 'chat',
49
+ },
50
+ {
51
+ abilities: {
52
+ functionCall: true,
53
+ vision: true,
54
+ },
55
+ contextWindowTokens: 1_047_576,
56
+ description:
57
+ 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
58
+ displayName: 'GPT-4.1 mini',
59
+ enabled: true,
60
+ id: 'gpt-4.1-mini',
61
+ maxOutput: 32_768,
62
+ pricing: {
63
+ cachedInput: 0.1,
64
+ input: 0.4,
65
+ output: 1.6,
66
+ },
67
+ releasedAt: '2025-04-14',
68
+ type: 'chat',
69
+ },
70
+ {
71
+ abilities: {
72
+ functionCall: true,
73
+ vision: true,
74
+ },
75
+ contextWindowTokens: 1_047_576,
76
+ description:
77
+ 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
78
+ displayName: 'GPT-4.1 nano',
79
+ enabled: true,
80
+ id: 'gpt-4.1-nano',
81
+ maxOutput: 32_768,
82
+ pricing: {
83
+ cachedInput: 0.025,
84
+ input: 0.1,
85
+ output: 0.4,
86
+ },
87
+ releasedAt: '2025-04-14',
88
+ type: 'chat',
89
+ },
90
+ {
91
+ abilities: {
92
+ functionCall: true,
93
+ },
94
+ contextWindowTokens: 128_000,
95
+ description:
96
+ 'GPT-4.5-preview 是最新的通用模型,具有深厚的世界知识和对用户意图的更好理解,擅长创意任务和代理规划。该模型的知识截止2023年10月。',
97
+ displayName: 'GPT 4.5 Preview',
98
+ id: 'gpt-4.5-preview',
99
+ pricing: {
100
+ cachedInput: 37.5,
101
+ input: 75,
102
+ output: 150,
103
+ },
104
+ releasedAt: '2025-02-27',
105
+ type: 'chat',
106
+ },
107
+ {
108
+ abilities: {
109
+ functionCall: true,
110
+ reasoning: true,
111
+ },
112
+ contextWindowTokens: 200_000,
113
+ description:
114
+ 'o3-mini 是我们最新的小型推理模型,在与 o1-mini 相同的成本和延迟目标下提供高智能。',
115
+ displayName: 'o3-mini',
116
+ id: 'o3-mini',
117
+ pricing: {
118
+ cachedInput: 0.55,
119
+ input: 1.1,
120
+ output: 4.4,
121
+ },
122
+ releasedAt: '2025-01-31',
123
+ type: 'chat',
124
+ },
125
+ {
126
+ abilities: {
127
+ reasoning: true,
128
+ },
129
+ contextWindowTokens: 128_000,
130
+ description:
131
+ 'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
132
+ displayName: 'o1-mini',
133
+ id: 'o1-mini',
134
+ pricing: {
135
+ cachedInput: 0.55,
136
+ input: 1.1,
137
+ output: 4.4,
138
+ },
139
+ releasedAt: '2024-09-12',
140
+ type: 'chat',
141
+ },
142
+ {
143
+ abilities: {
144
+ reasoning: true,
145
+ },
146
+ contextWindowTokens: 200_000,
147
+ description:
148
+ 'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
149
+ displayName: 'o1',
150
+ id: 'o1',
151
+ pricing: {
152
+ cachedInput: 7.5,
153
+ input: 15,
154
+ output: 60,
155
+ },
156
+ releasedAt: '2024-12-17',
157
+ type: 'chat',
158
+ },
159
+ {
160
+ abilities: {
161
+ functionCall: true,
162
+ vision: true,
163
+ },
164
+ contextWindowTokens: 128_000,
165
+ description:
166
+ 'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
167
+ displayName: 'GPT-4o',
168
+ id: 'gpt-4o',
11
169
  maxOutput: 4096,
170
+ pricing: {
171
+ cachedInput: 1.25,
172
+ input: 2.5,
173
+ output: 10,
174
+ },
175
+ releasedAt: '2024-05-13',
176
+ type: 'chat',
177
+ },
178
+ {
179
+ abilities: {
180
+ functionCall: true,
181
+ vision: true,
182
+ },
183
+ contextWindowTokens: 128_000,
184
+ description: 'GPT-4o Mini,小型高效模型,具备与GPT-4o相似的卓越性能。',
185
+ displayName: 'GPT 4o Mini',
186
+ id: 'gpt-4o-mini',
187
+ maxOutput: 16_384,
188
+ pricing: {
189
+ cachedInput: 0.075,
190
+ input: 0.15,
191
+ output: 0.6,
192
+ },
12
193
  type: 'chat',
13
194
  },
14
195
  ];
@@ -16,7 +16,6 @@ const googleChatModels: AIChatModelCard[] = [
16
16
  id: 'gemini-2.5-pro-exp-03-25',
17
17
  maxOutput: 65_536,
18
18
  pricing: {
19
- cachedInput: 0,
20
19
  input: 0,
21
20
  output: 0,
22
21
  },
@@ -174,7 +173,6 @@ const googleChatModels: AIChatModelCard[] = [
174
173
  },
175
174
  {
176
175
  abilities: {
177
- imageOutput: true,
178
176
  vision: true,
179
177
  },
180
178
  contextWindowTokens: 1_048_576 + 8192,
@@ -297,6 +295,42 @@ const googleChatModels: AIChatModelCard[] = [
297
295
  releasedAt: '2024-10-03',
298
296
  type: 'chat',
299
297
  },
298
+ {
299
+ contextWindowTokens: 32_768 + 8192,
300
+ displayName: 'Gemma 3 1B',
301
+ id: 'gemma-3-1b-it',
302
+ maxOutput: 8192,
303
+ pricing: {
304
+ cachedInput: 0,
305
+ input: 0,
306
+ output: 0,
307
+ },
308
+ type: 'chat',
309
+ },
310
+ {
311
+ contextWindowTokens: 32_768 + 8192,
312
+ displayName: 'Gemma 3 4B',
313
+ id: 'gemma-3-4b-it',
314
+ maxOutput: 8192,
315
+ pricing: {
316
+ cachedInput: 0,
317
+ input: 0,
318
+ output: 0,
319
+ },
320
+ type: 'chat',
321
+ },
322
+ {
323
+ contextWindowTokens: 32_768 + 8192,
324
+ displayName: 'Gemma 3 12B',
325
+ id: 'gemma-3-12b-it',
326
+ maxOutput: 8192,
327
+ pricing: {
328
+ cachedInput: 0,
329
+ input: 0,
330
+ output: 0,
331
+ },
332
+ type: 'chat',
333
+ },
300
334
  ];
301
335
 
302
336
  export const allModels = [...googleChatModels];
@@ -4,6 +4,30 @@ import { AIChatModelCard } from '@/types/aiModel';
4
4
  // https://console.groq.com/docs/models
5
5
 
6
6
  const groqChatModels: AIChatModelCard[] = [
7
+ {
8
+ contextWindowTokens: 131_072,
9
+ displayName: 'Llama 4 Scout (17Bx16E)',
10
+ enabled: true,
11
+ id: 'meta-llama/llama-4-scout-17b-16e-instruct',
12
+ maxOutput: 8192,
13
+ pricing: {
14
+ input: 0.11,
15
+ output: 0.34,
16
+ },
17
+ type: 'chat',
18
+ },
19
+ {
20
+ contextWindowTokens: 131_072,
21
+ displayName: 'Llama 4 Maverick (17Bx128E)',
22
+ enabled: true,
23
+ id: 'meta-llama/llama-4-maverick-17b-128e-instruct',
24
+ maxOutput: 8192,
25
+ pricing: {
26
+ input: 0.5,
27
+ output: 0.77,
28
+ },
29
+ type: 'chat',
30
+ },
7
31
  {
8
32
  abilities: {
9
33
  functionCall: true,
@@ -11,6 +35,7 @@ const groqChatModels: AIChatModelCard[] = [
11
35
  },
12
36
  contextWindowTokens: 131_072,
13
37
  displayName: 'Qwen QwQ 32B',
38
+ enabled: true,
14
39
  id: 'qwen-qwq-32b',
15
40
  pricing: {
16
41
  input: 0.29,
@@ -25,7 +50,6 @@ const groqChatModels: AIChatModelCard[] = [
25
50
  },
26
51
  contextWindowTokens: 131_072,
27
52
  displayName: 'DeepSeek R1 Distill Llama 70B',
28
- enabled: true,
29
53
  id: 'deepseek-r1-distill-llama-70b',
30
54
  pricing: {
31
55
  input: 0.75, // 0.75 - 5.00
@@ -51,7 +75,6 @@ const groqChatModels: AIChatModelCard[] = [
51
75
  },
52
76
  contextWindowTokens: 131_072,
53
77
  displayName: 'DeepSeek R1 Distill Qwen 32B',
54
- enabled: true,
55
78
  id: 'deepseek-r1-distill-qwen-32b',
56
79
  maxOutput: 16_384,
57
80
  pricing: {
@@ -159,7 +182,6 @@ const groqChatModels: AIChatModelCard[] = [
159
182
  contextWindowTokens: 131_072,
160
183
  description: 'Meta Llama 3.3 多语言大语言模型 ( LLM ) 是 70B(文本输入/文本输出)中的预训练和指令调整生成模型。 Llama 3.3 指令调整的纯文本模型针对多语言对话用例进行了优化,并且在常见行业基准上优于许多可用的开源和封闭式聊天模型。',
161
184
  displayName: 'Llama 3.3 70B Versatile',
162
- enabled: true,
163
185
  id: 'llama-3.3-70b-versatile',
164
186
  maxOutput: 32_768,
165
187
  pricing: {
@@ -247,6 +269,12 @@ const groqChatModels: AIChatModelCard[] = [
247
269
  },
248
270
  type: 'chat',
249
271
  },
272
+ {
273
+ contextWindowTokens: 4096,
274
+ displayName: 'ALLaM 2 7B',
275
+ id: 'allam-2-7b',
276
+ type: 'chat',
277
+ },
250
278
  ];
251
279
 
252
280
  export const allModels = [...groqChatModels];
@@ -20,7 +20,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
20
20
  input: 1,
21
21
  output: 4,
22
22
  },
23
- releasedAt: '2025-03-21',
23
+ releasedAt: '2025-04-03',
24
24
  settings: {
25
25
  searchImpl: 'params',
26
26
  },
@@ -72,7 +72,6 @@ const hunyuanChatModels: AIChatModelCard[] = [
72
72
  description:
73
73
  '采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指标达到99.9%。MOE-32K 性价比相对更高,在平衡效果、价格的同时,可对实现对长文本输入的处理。',
74
74
  displayName: 'Hunyuan Standard',
75
- enabled: true,
76
75
  id: 'hunyuan-standard',
77
76
  maxOutput: 2000,
78
77
  pricing: {
@@ -137,7 +136,6 @@ const hunyuanChatModels: AIChatModelCard[] = [
137
136
  description:
138
137
  '擅长处理长文任务如文档摘要和文档问答等,同时也具备处理通用文本生成任务的能力。在长文本的分析和生成上表现优异,能有效应对复杂和详尽的长文内容处理需求。',
139
138
  displayName: 'Hunyuan Large Longcontext',
140
- enabled: true,
141
139
  id: 'hunyuan-large-longcontext',
142
140
  maxOutput: 6000,
143
141
  pricing: {
@@ -160,7 +158,6 @@ const hunyuanChatModels: AIChatModelCard[] = [
160
158
  description:
161
159
  '通用体验优化,包括NLP理解、文本创作、闲聊、知识问答、翻译、领域等;提升拟人性,优化模型情商;提升意图模糊时模型主动澄清能力;提升字词解析类问题的处理能力;提升创作的质量和可互动性;提升多轮体验。',
162
160
  displayName: 'Hunyuan Turbo',
163
- enabled: true,
164
161
  id: 'hunyuan-turbo-latest',
165
162
  maxOutput: 4000,
166
163
  pricing: {
@@ -201,19 +198,18 @@ const hunyuanChatModels: AIChatModelCard[] = [
201
198
  functionCall: true,
202
199
  search: true,
203
200
  },
204
- contextWindowTokens: 32_000,
201
+ contextWindowTokens: 134_000,
205
202
  description:
206
- 'hunyuan-TurboS 混元旗舰大模型最新版本,具备更强的思考能力,更优的体验效果。',
207
- displayName: 'Hunyuan TurboS',
208
- enabled: true,
209
- id: 'hunyuan-turbos-latest',
210
- maxOutput: 8000,
203
+ '擅长处理长文任务如文档摘要和文档问答等,同时也具备处理通用文本生成任务的能力。在长文本的分析和生成上表现优异,能有效应对复杂和详尽的长文内容处理需求。',
204
+ displayName: 'Hunyuan TurboS LongText 128K',
205
+ id: 'hunyuan-turbos-longtext-128k-20250325',
206
+ maxOutput: 6000,
211
207
  pricing: {
212
208
  currency: 'CNY',
213
- input: 0.8,
214
- output: 2,
209
+ input: 1.5,
210
+ output: 6,
215
211
  },
216
- releasedAt: '2025-03-13',
212
+ releasedAt: '2025-03-25',
217
213
  settings: {
218
214
  searchImpl: 'params',
219
215
  },
@@ -226,9 +222,10 @@ const hunyuanChatModels: AIChatModelCard[] = [
226
222
  },
227
223
  contextWindowTokens: 32_000,
228
224
  description:
229
- '统一数学解题步骤的风格,加强数学多轮问答。文本创作优化回答风格,去除AI味,增加文采。',
230
- displayName: 'Hunyuan TurboS 20250313',
231
- id: 'hunyuan-turbos-20250313',
225
+ 'hunyuan-TurboS 混元旗舰大模型最新版本,具备更强的思考能力,更优的体验效果。',
226
+ displayName: 'Hunyuan TurboS',
227
+ enabled: true,
228
+ id: 'hunyuan-turbos-latest',
232
229
  maxOutput: 8000,
233
230
  pricing: {
234
231
  currency: 'CNY',
@@ -241,6 +238,29 @@ const hunyuanChatModels: AIChatModelCard[] = [
241
238
  },
242
239
  type: 'chat',
243
240
  },
241
+ // 重定向模型先行注释,待 latest 更新后再显示
242
+ // {
243
+ // abilities: {
244
+ // functionCall: true,
245
+ // search: true,
246
+ // },
247
+ // contextWindowTokens: 32_000,
248
+ // description:
249
+ // '统一数学解题步骤的风格,加强数学多轮问答。文本创作优化回答风格,去除AI味,增加文采。',
250
+ // displayName: 'Hunyuan TurboS 20250313',
251
+ // id: 'hunyuan-turbos-20250313',
252
+ // maxOutput: 8000,
253
+ // pricing: {
254
+ // currency: 'CNY',
255
+ // input: 0.8,
256
+ // output: 2,
257
+ // },
258
+ // releasedAt: '2025-03-13',
259
+ // settings: {
260
+ // searchImpl: 'params',
261
+ // },
262
+ // type: 'chat',
263
+ // },
244
264
  {
245
265
  abilities: {
246
266
  functionCall: true,
@@ -270,7 +290,6 @@ const hunyuanChatModels: AIChatModelCard[] = [
270
290
  contextWindowTokens: 36_000,
271
291
  description: '混元最新7B多模态模型,上下文窗口32K,支持中英文场景的多模态对话、图像物体识别、文档表格理解、多模态数学等,在多个维度上评测指标优于7B竞品模型。',
272
292
  displayName: 'Hunyuan Lite Vision',
273
- enabled: true,
274
293
  id: 'hunyuan-lite-vision',
275
294
  maxOutput: 4000,
276
295
  releasedAt: '2024-12-12',
@@ -296,7 +315,6 @@ const hunyuanChatModels: AIChatModelCard[] = [
296
315
  contextWindowTokens: 8000,
297
316
  description: '混元新一代视觉语言旗舰大模型,采用全新的混合专家模型(MoE)结构,在图文理解相关的基础识别、内容创作、知识问答、分析推理等能力上相比前一代模型全面提升。',
298
317
  displayName: 'Hunyuan Turbo Vision',
299
- enabled: true,
300
318
  id: 'hunyuan-turbo-vision',
301
319
  maxOutput: 2000,
302
320
  pricing: {
@@ -307,6 +325,24 @@ const hunyuanChatModels: AIChatModelCard[] = [
307
325
  releasedAt: '2024-11-26',
308
326
  type: 'chat',
309
327
  },
328
+ {
329
+ abilities: {
330
+ vision: true,
331
+ },
332
+ contextWindowTokens: 8000,
333
+ description: '此模型适用于图文理解场景,是基于混元最新 turbos 的新一代视觉语言旗舰大模型,聚焦图文理解相关任务,包括基于图片的实体识别、知识问答、文案创作、拍照解题等方面,相比前一代模型全面提升。',
334
+ displayName: 'Hunyuan TurboS Vision',
335
+ enabled: true,
336
+ id: 'hunyuan-turbos-vision',
337
+ maxOutput: 2000,
338
+ pricing: {
339
+ currency: 'CNY',
340
+ input: 3,
341
+ output: 9,
342
+ },
343
+ releasedAt: '2025-04-07',
344
+ type: 'chat',
345
+ },
310
346
  {
311
347
  abilities: {
312
348
  vision: true,