@lobehub/chat 1.47.22 → 1.47.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.47.23](https://github.com/lobehub/lobe-chat/compare/v1.47.22...v1.47.23)
6
+
7
+ <sup>Released on **2025-01-24**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Fix model fetch match tag error & add Hunyuan model fetch support.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Fix model fetch match tag error & add Hunyuan model fetch support, closes [#5566](https://github.com/lobehub/lobe-chat/issues/5566) ([7b075ef](https://github.com/lobehub/lobe-chat/commit/7b075ef))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.47.22](https://github.com/lobehub/lobe-chat/compare/v1.47.21...v1.47.22)
6
31
 
7
32
  <sup>Released on **2025-01-24**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Fix model fetch match tag error & add Hunyuan model fetch support."
6
+ ]
7
+ },
8
+ "date": "2025-01-24",
9
+ "version": "1.47.23"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.47.22",
3
+ "version": "1.47.23",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -14,6 +14,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
14
14
  input: 0,
15
15
  output: 0,
16
16
  },
17
+ releasedAt: '2024-10-30',
17
18
  type: 'chat',
18
19
  },
19
20
  {
@@ -26,9 +27,10 @@ const hunyuanChatModels: AIChatModelCard[] = [
26
27
  maxOutput: 2000,
27
28
  pricing: {
28
29
  currency: 'CNY',
29
- input: 4.5,
30
- output: 5,
30
+ input: 0.8,
31
+ output: 2,
31
32
  },
33
+ releasedAt: '2024-10-28',
32
34
  type: 'chat',
33
35
  },
34
36
  {
@@ -41,9 +43,10 @@ const hunyuanChatModels: AIChatModelCard[] = [
41
43
  maxOutput: 6000,
42
44
  pricing: {
43
45
  currency: 'CNY',
44
- input: 15,
45
- output: 60,
46
+ input: 0.5,
47
+ output: 2,
46
48
  },
49
+ releasedAt: '2024-10-28',
47
50
  type: 'chat',
48
51
  },
49
52
  {
@@ -52,9 +55,27 @@ const hunyuanChatModels: AIChatModelCard[] = [
52
55
  },
53
56
  contextWindowTokens: 32_000,
54
57
  description:
55
- '混元全新一代大语言模型的预览版,采用全新的混合专家模型(MoE)结构,相比hunyuan-pro推理效率更快,效果表现更强。',
58
+ '通用体验优化,包括NLP理解、文本创作、闲聊、知识问答、翻译、领域等;提升拟人性,优化模型情商;提升意图模糊时模型主动澄清能力;提升字词解析类问题的处理能力;提升创作的质量和可互动性;提升多轮体验。',
56
59
  displayName: 'Hunyuan Turbo',
57
60
  enabled: true,
61
+ id: 'hunyuan-turbo-latest',
62
+ maxOutput: 4000,
63
+ pricing: {
64
+ currency: 'CNY',
65
+ input: 15,
66
+ output: 50,
67
+ },
68
+ releasedAt: '2025-01-10',
69
+ type: 'chat',
70
+ },
71
+ {
72
+ abilities: {
73
+ functionCall: true,
74
+ },
75
+ contextWindowTokens: 32_000,
76
+ description:
77
+ '本版本优化:数据指令scaling,大幅提升模型通用泛化能力;大幅提升数学、代码、逻辑推理能力;优化文本理解字词理解相关能力;优化文本创作内容生成质量',
78
+ displayName: 'Hunyuan Turbo',
58
79
  id: 'hunyuan-turbo',
59
80
  maxOutput: 4000,
60
81
  pricing: {
@@ -62,6 +83,25 @@ const hunyuanChatModels: AIChatModelCard[] = [
62
83
  input: 15,
63
84
  output: 50,
64
85
  },
86
+ releasedAt: '2025-01-10',
87
+ type: 'chat',
88
+ },
89
+ {
90
+ abilities: {
91
+ functionCall: true,
92
+ },
93
+ contextWindowTokens: 32_000,
94
+ description:
95
+ '本版本优化:数据指令scaling,大幅提升模型通用泛化能力;大幅提升数学、代码、逻辑推理能力;优化文本理解字词理解相关能力;优化文本创作内容生成质量',
96
+ displayName: 'Hunyuan Turbo 20241223',
97
+ id: 'hunyuan-turbo-20241223',
98
+ maxOutput: 4000,
99
+ pricing: {
100
+ currency: 'CNY',
101
+ input: 15,
102
+ output: 50,
103
+ },
104
+ releasedAt: '2025-01-10',
65
105
  type: 'chat',
66
106
  },
67
107
  {
@@ -70,16 +110,74 @@ const hunyuanChatModels: AIChatModelCard[] = [
70
110
  },
71
111
  contextWindowTokens: 32_000,
72
112
  description:
73
- '万亿级参数规模 MOE-32K 长文模型。在各种 benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 functioncall,在多语言翻译、金融法律医疗等领域应用重点优化。',
74
- displayName: 'Hunyuan Pro',
113
+ 'hunyuan-turbo 2024 年 11 月 20 日固定版本,介于 hunyuan-turbo hunyuan-turbo-latest 之间的一个版本。',
114
+ displayName: 'Hunyuan Turbo 20241120',
115
+ id: 'hunyuan-turbo-20241120',
116
+ maxOutput: 4000,
117
+ pricing: {
118
+ currency: 'CNY',
119
+ input: 15,
120
+ output: 50,
121
+ },
122
+ releasedAt: '2024-11-20',
123
+ type: 'chat',
124
+ },
125
+ {
126
+ contextWindowTokens: 32_000,
127
+ description:
128
+ 'Hunyuan-large 模型总参数量约 389B,激活参数量约 52B,是当前业界参数规模最大、效果最好的 Transformer 架构的开源 MoE 模型。',
129
+ displayName: 'Hunyuan Large',
75
130
  enabled: true,
76
- id: 'hunyuan-pro',
131
+ id: 'hunyuan-large',
77
132
  maxOutput: 4000,
78
133
  pricing: {
79
134
  currency: 'CNY',
80
- input: 30,
81
- output: 100,
135
+ input: 4,
136
+ output: 12,
137
+ },
138
+ releasedAt: '2024-11-20',
139
+ type: 'chat',
140
+ },
141
+ {
142
+ contextWindowTokens: 134_000,
143
+ description:
144
+ '擅长处理长文任务如文档摘要和文档问答等,同时也具备处理通用文本生成任务的能力。在长文本的分析和生成上表现优异,能有效应对复杂和详尽的长文内容处理需求。',
145
+ displayName: 'Hunyuan Large Longcontext',
146
+ enabled: true,
147
+ id: 'hunyuan-large-longcontext',
148
+ maxOutput: 6000,
149
+ pricing: {
150
+ currency: 'CNY',
151
+ input: 6,
152
+ output: 18,
153
+ },
154
+ releasedAt: '2024-12-18',
155
+ type: 'chat',
156
+ },
157
+ {
158
+ abilities: {
159
+ vision: true,
160
+ },
161
+ contextWindowTokens: 36_000,
162
+ description: '混元最新7B多模态模型,上下文窗口32K,支持中英文场景的多模态对话、图像物体识别、文档表格理解、多模态数学等,在多个维度上评测指标优于7B竞品模型。',
163
+ displayName: 'Hunyuan Lite Vision',
164
+ enabled: true,
165
+ id: 'hunyuan-lite-vision',
166
+ maxOutput: 4000,
167
+ releasedAt: '2024-12-12',
168
+ type: 'chat',
169
+ },
170
+ {
171
+ abilities: {
172
+ vision: true,
82
173
  },
174
+ contextWindowTokens: 8000,
175
+ description: '混元最新多模态模型,支持多语种作答,中英文能力均衡。',
176
+ displayName: 'Hunyuan Standard Vision',
177
+ enabled: true,
178
+ id: 'hunyuan-standard-vision',
179
+ maxOutput: 2000,
180
+ releasedAt: '2024-12-31',
83
181
  type: 'chat',
84
182
  },
85
183
  {
@@ -87,16 +185,35 @@ const hunyuanChatModels: AIChatModelCard[] = [
87
185
  vision: true,
88
186
  },
89
187
  contextWindowTokens: 8000,
188
+ description: '混元新一代视觉语言旗舰大模型,采用全新的混合专家模型(MoE)结构,在图文理解相关的基础识别、内容创作、知识问答、分析推理等能力上相比前一代模型全面提升。',
189
+ displayName: 'Hunyuan Turbo Vision',
190
+ enabled: true,
191
+ id: 'hunyuan-turbo-vision',
192
+ maxOutput: 2000,
193
+ pricing: {
194
+ currency: 'CNY',
195
+ input: 80,
196
+ output: 80,
197
+ },
198
+ releasedAt: '2024-11-26',
199
+ type: 'chat',
200
+ },
201
+ {
202
+ abilities: {
203
+ vision: true,
204
+ },
205
+ contextWindowTokens: 12_000,
90
206
  description: '混元最新多模态模型,支持图片+文本输入生成文本内容。',
91
207
  displayName: 'Hunyuan Vision',
92
208
  enabled: true,
93
209
  id: 'hunyuan-vision',
94
- maxOutput: 4000,
210
+ maxOutput: 6000,
95
211
  pricing: {
96
212
  currency: 'CNY',
97
213
  input: 18,
98
214
  output: 18,
99
215
  },
216
+ releasedAt: '2025-01-03',
100
217
  type: 'chat',
101
218
  },
102
219
  {
@@ -111,6 +228,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
111
228
  input: 4,
112
229
  output: 8,
113
230
  },
231
+ releasedAt: '2024-11-12',
114
232
  type: 'chat',
115
233
  },
116
234
  {
@@ -128,10 +246,11 @@ const hunyuanChatModels: AIChatModelCard[] = [
128
246
  input: 4,
129
247
  output: 8,
130
248
  },
249
+ releasedAt: '2024-11-15',
131
250
  type: 'chat',
132
251
  },
133
252
  {
134
- contextWindowTokens: 8000,
253
+ contextWindowTokens: 32_000,
135
254
  description:
136
255
  '混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果。',
137
256
  displayName: 'Hunyuan Role',
@@ -142,6 +261,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
142
261
  input: 4,
143
262
  output: 8,
144
263
  },
264
+ releasedAt: '2024-07-04',
145
265
  type: 'chat',
146
266
  },
147
267
  ];
@@ -152,9 +152,26 @@ const qwenChatModels: AIChatModelCard[] = [
152
152
  id: 'qwq-32b-preview',
153
153
  pricing: {
154
154
  currency: 'CNY',
155
- input: 0,
156
- output: 0,
155
+ input: 3.5,
156
+ output: 7,
157
+ },
158
+ releasedAt: '2024-11-28',
159
+ type: 'chat',
160
+ },
161
+ {
162
+ abilities: {
163
+ vision: true,
164
+ },
165
+ contextWindowTokens: 32_768,
166
+ description: 'QVQ模型是由 Qwen 团队开发的实验性研究模型,专注于提升视觉推理能力,尤其在数学推理领域。',
167
+ displayName: 'QVQ 72B Preview',
168
+ id: 'qvq-72b-preview',
169
+ pricing: {
170
+ currency: 'CNY',
171
+ input: 12,
172
+ output: 36,
157
173
  },
174
+ releasedAt: '2024-12-25',
158
175
  type: 'chat',
159
176
  },
160
177
  {
@@ -135,11 +135,13 @@ const Hunyuan: ModelProviderCard = {
135
135
  '由腾讯研发的大语言模型,具备强大的中文创作能力,复杂语境下的逻辑推理能力,以及可靠的任务执行能力',
136
136
  disableBrowserRequest: true,
137
137
  id: 'hunyuan',
138
+ modelList: { showModelFetcher: true },
138
139
  modelsUrl: 'https://cloud.tencent.com/document/product/1729/104753',
139
140
  name: 'Hunyuan',
140
141
  settings: {
141
142
  disableBrowserRequest: true,
142
143
  sdkType: 'openai',
144
+ showModelFetcher: true,
143
145
  },
144
146
  url: 'https://hunyuan.tencent.com',
145
147
  };
@@ -40,7 +40,7 @@ export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
40
40
 
41
41
  return {
42
42
  enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
43
- functionCall: true,
43
+ functionCall: !model.id.toLowerCase().includes('deepseek-reasoner'),
44
44
  id: model.id,
45
45
  };
46
46
  },
@@ -1,10 +1,34 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface HunyuanModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeHunyuanAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.hunyuan.cloud.tencent.com/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_HUNYUAN_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: {
16
+ transformModel: (m) => {
17
+ const functionCallKeywords = [
18
+ 'hunyuan-functioncall',
19
+ 'hunyuan-turbo',
20
+ 'hunyuan-pro',
21
+ ];
22
+
23
+ const model = m as unknown as HunyuanModelCard;
24
+
25
+ return {
26
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
27
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) && !model.id.toLowerCase().includes('vision'),
28
+ id: model.id,
29
+ vision: model.id.toLowerCase().includes('vision'),
30
+ };
31
+ },
32
+ },
9
33
  provider: ModelProvider.Hunyuan,
10
34
  });
@@ -49,7 +49,7 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
49
49
  : undefined,
50
50
  stream: !payload.tools,
51
51
  temperature: (temperature !== undefined && temperature >= 0 && temperature < 2) ? temperature : undefined,
52
- ...(model.startsWith('qwen-vl') ? {
52
+ ...(model.startsWith('qvq') || model.startsWith('qwen-vl') ? {
53
53
  top_p: (top_p !== undefined && top_p > 0 && top_p <= 1) ? top_p : undefined,
54
54
  } : {
55
55
  top_p: (top_p !== undefined && top_p > 0 && top_p < 1) ? top_p : undefined,
@@ -67,7 +67,7 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
67
67
  debug: {
68
68
  chatCompletion: () => process.env.DEBUG_QWEN_CHAT_COMPLETION === '1',
69
69
  },
70
- models: {
70
+ models: {
71
71
  transformModel: (m) => {
72
72
  const functionCallKeywords = [
73
73
  'qwen-max',
@@ -76,13 +76,18 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
76
76
  'qwen2.5',
77
77
  ];
78
78
 
79
+ const visionKeywords = [
80
+ 'qvq',
81
+ 'vl',
82
+ ];
83
+
79
84
  const model = m as unknown as QwenModelCard;
80
85
 
81
86
  return {
82
87
  enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
83
88
  functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
84
89
  id: model.id,
85
- vision: model.id.toLowerCase().includes('vl'),
90
+ vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
86
91
  };
87
92
  },
88
93
  },
@@ -25,7 +25,13 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
25
25
  // ref: https://platform.stepfun.com/docs/llm/modeloverview
26
26
  const functionCallKeywords = [
27
27
  'step-1-',
28
+ 'step-1o-',
29
+ 'step-1v-',
28
30
  'step-2-',
31
+ ];
32
+
33
+ const visionKeywords = [
34
+ 'step-1o-',
29
35
  'step-1v-',
30
36
  ];
31
37
 
@@ -35,7 +41,7 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
35
41
  enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
36
42
  functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
37
43
  id: model.id,
38
- vision: model.id.toLowerCase().includes('v'),
44
+ vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
39
45
  };
40
46
  },
41
47
  },