@lobehub/chat 1.87.2 → 1.87.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.desktop CHANGED
@@ -6,3 +6,4 @@ DATABASE_URL=postgresql://postgres@localhost:5432/postgres
6
6
  SEARCH_PROVIDERS=search1api
7
7
  NEXT_PUBLIC_SERVICE_MODE='server'
8
8
  NEXT_PUBLIC_IS_DESKTOP_APP=1
9
+ NEXT_PUBLIC_ENABLE_NEXT_AUTH=0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.87.4](https://github.com/lobehub/lobe-chat/compare/v1.87.3...v1.87.4)
6
+
7
+ <sup>Released on **2025-05-18**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Supported SenseNova v6 models correctly & update Gemini models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Supported SenseNova v6 models correctly & update Gemini models, closes [#7778](https://github.com/lobehub/lobe-chat/issues/7778) ([e2b5ed3](https://github.com/lobehub/lobe-chat/commit/e2b5ed3))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.87.3](https://github.com/lobehub/lobe-chat/compare/v1.87.2...v1.87.3)
31
+
32
+ <sup>Released on **2025-05-17**</sup>
33
+
34
+ #### ♻ Code Refactoring
35
+
36
+ - **misc**: Clean code with new antd api.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Code refactoring
44
+
45
+ - **misc**: Clean code with new antd api, closes [#7870](https://github.com/lobehub/lobe-chat/issues/7870) ([c543884](https://github.com/lobehub/lobe-chat/commit/c543884))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.87.2](https://github.com/lobehub/lobe-chat/compare/v1.87.1...v1.87.2)
6
56
 
7
57
  <sup>Released on **2025-05-16**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Supported SenseNova v6 models correctly & update Gemini models."
6
+ ]
7
+ },
8
+ "date": "2025-05-18",
9
+ "version": "1.87.4"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Clean code with new antd api."
15
+ ]
16
+ },
17
+ "date": "2025-05-17",
18
+ "version": "1.87.3"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.87.2",
3
+ "version": "1.87.4",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -170,7 +170,7 @@
170
170
  "@xterm/xterm": "^5.5.0",
171
171
  "ahooks": "^3.8.4",
172
172
  "ai": "^3.4.33",
173
- "antd": "^5.24.6",
173
+ "antd": "^5.25.1",
174
174
  "antd-style": "^3.7.1",
175
175
  "brotli-wasm": "^3.0.1",
176
176
  "chroma-js": "^3.1.2",
@@ -28,7 +28,7 @@ const CreateGroupModal = memo<CreateGroupModalProps>(
28
28
  <div onClick={(e) => e.stopPropagation()}>
29
29
  <Modal
30
30
  allowFullscreen
31
- destroyOnClose
31
+ destroyOnHidden
32
32
  okButtonProps={{ loading }}
33
33
  onCancel={(e) => {
34
34
  setInput('');
@@ -29,7 +29,7 @@ const RenameGroupModal = memo<RenameGroupModalProps>(({ id, open, onCancel }) =>
29
29
  return (
30
30
  <Modal
31
31
  allowFullscreen
32
- destroyOnClose
32
+ destroyOnHidden
33
33
  okButtonProps={{ loading }}
34
34
  onCancel={(e) => {
35
35
  setInput(group?.name ?? '');
@@ -3,6 +3,7 @@
3
3
  import { memo } from 'react';
4
4
 
5
5
  import PageTitle from '@/components/PageTitle';
6
+ import { withSuspense } from '@/components/withSuspense';
6
7
  import { useChatStore } from '@/store/chat';
7
8
  import { topicSelectors } from '@/store/chat/selectors';
8
9
  import { useSessionStore } from '@/store/session';
@@ -15,4 +16,4 @@ const Title = memo(() => {
15
16
  return <PageTitle title={[topicTitle, agentTitle].filter(Boolean).join(' · ')} />;
16
17
  });
17
18
 
18
- export default Title;
19
+ export default withSuspense(Title);
@@ -39,7 +39,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, pro
39
39
 
40
40
  return (
41
41
  <Modal
42
- destroyOnClose
42
+ destroyOnHidden
43
43
  footer={[
44
44
  <Button key="cancel" onClick={closeModal}>
45
45
  {tc('cancel')}
@@ -30,7 +30,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ open, setOpen }) => {
30
30
 
31
31
  return (
32
32
  <Modal
33
- destroyOnClose
33
+ destroyOnHidden
34
34
  footer={[
35
35
  <Button key="cancel" onClick={closeModal}>
36
36
  {t('cancel', { ns: 'common' })}
@@ -32,7 +32,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ id, open, setOpen }) =>
32
32
 
33
33
  return (
34
34
  <Modal
35
- destroyOnClose
35
+ destroyOnHidden
36
36
  footer={[
37
37
  <Button key="cancel" onClick={closeModal}>
38
38
  {t('cancel')}
@@ -1,31 +1,6 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const googleChatModels: AIChatModelCard[] = [
4
- {
5
- abilities: {
6
- functionCall: true,
7
- reasoning: true,
8
- search: true,
9
- vision: true,
10
- },
11
- contextWindowTokens: 1_048_576 + 65_536,
12
- description:
13
- 'Gemini 2.5 Pro Experimental 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
14
- displayName: 'Gemini 2.5 Pro Experimental 03-25',
15
- enabled: true,
16
- id: 'gemini-2.5-pro-exp-03-25',
17
- maxOutput: 65_536,
18
- pricing: {
19
- input: 0,
20
- output: 0,
21
- },
22
- releasedAt: '2025-03-25',
23
- settings: {
24
- searchImpl: 'params',
25
- searchProvider: 'google',
26
- },
27
- type: 'chat',
28
- },
29
4
  {
30
5
  abilities: {
31
6
  functionCall: true,
@@ -59,13 +34,13 @@ const googleChatModels: AIChatModelCard[] = [
59
34
  },
60
35
  contextWindowTokens: 1_048_576 + 65_536,
61
36
  description:
62
- 'Gemini 2.5 Pro Preview 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
63
- displayName: 'Gemini 2.5 Pro Preview 03-25 (Paid)',
64
- id: 'gemini-2.5-pro-preview-03-25',
37
+ 'Gemini 2.5 Pro Experimental 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
38
+ displayName: 'Gemini 2.5 Pro Experimental 03-25',
39
+ id: 'gemini-2.5-pro-exp-03-25',
65
40
  maxOutput: 65_536,
66
41
  pricing: {
67
- input: 1.25, // prompts <= 200k tokens
68
- output: 10, // prompts <= 200k tokens
42
+ input: 0,
43
+ output: 0,
69
44
  },
70
45
  releasedAt: '2025-03-25',
71
46
  settings: {
@@ -98,26 +73,6 @@ const googleChatModels: AIChatModelCard[] = [
98
73
  },
99
74
  type: 'chat',
100
75
  },
101
- {
102
- abilities: {
103
- reasoning: true,
104
- vision: true,
105
- },
106
- contextWindowTokens: 1_048_576 + 65_536,
107
- description:
108
- 'Gemini 2.0 Flash Thinking Exp 是 Google 的实验性多模态推理AI模型,能对复杂问题进行推理,拥有新的思维能力。',
109
- displayName: 'Gemini 2.0 Flash Thinking Experimental 01-21',
110
- enabled: true,
111
- id: 'gemini-2.0-flash-thinking-exp-01-21',
112
- maxOutput: 65_536,
113
- pricing: {
114
- cachedInput: 0,
115
- input: 0,
116
- output: 0,
117
- },
118
- releasedAt: '2025-01-21',
119
- type: 'chat',
120
- },
121
76
  {
122
77
  abilities: {
123
78
  functionCall: true,
@@ -128,7 +83,6 @@ const googleChatModels: AIChatModelCard[] = [
128
83
  description:
129
84
  'Gemini 2.0 Flash 提供下一代功能和改进,包括卓越的速度、原生工具使用、多模态生成和1M令牌上下文窗口。',
130
85
  displayName: 'Gemini 2.0 Flash',
131
- enabled: true,
132
86
  id: 'gemini-2.0-flash',
133
87
  maxOutput: 8192,
134
88
  pricing: {
@@ -167,6 +121,24 @@ const googleChatModels: AIChatModelCard[] = [
167
121
  },
168
122
  type: 'chat',
169
123
  },
124
+ {
125
+ abilities: {
126
+ imageOutput: true,
127
+ vision: true,
128
+ },
129
+ contextWindowTokens: 32_768 + 8192,
130
+ description: 'Gemini 2.0 Flash 预览模型,支持图像生成',
131
+ displayName: 'Gemini 2.0 Flash Preview Image Generation',
132
+ enabled: true,
133
+ id: 'gemini-2.0-flash-preview-image-generation',
134
+ maxOutput: 8192,
135
+ pricing: {
136
+ input: 0.1,
137
+ output: 0.039, // per image
138
+ },
139
+ releasedAt: '2025-05-07',
140
+ type: 'chat',
141
+ },
170
142
  {
171
143
  abilities: {
172
144
  imageOutput: true,
@@ -175,7 +147,6 @@ const googleChatModels: AIChatModelCard[] = [
175
147
  contextWindowTokens: 1_048_576 + 8192,
176
148
  description: 'Gemini 2.0 Flash 实验模型,支持图像生成',
177
149
  displayName: 'Gemini 2.0 Flash (Image Generation) Experimental',
178
- enabled: true,
179
150
  id: 'gemini-2.0-flash-exp-image-generation',
180
151
  maxOutput: 8192,
181
152
  pricing: {
@@ -221,6 +192,7 @@ const googleChatModels: AIChatModelCard[] = [
221
192
  },
222
193
  {
223
194
  abilities: {
195
+ imageOutput: true,
224
196
  vision: true,
225
197
  },
226
198
  contextWindowTokens: 1_048_576 + 8192,
@@ -235,6 +207,23 @@ const googleChatModels: AIChatModelCard[] = [
235
207
  releasedAt: '2025-02-05',
236
208
  type: 'chat',
237
209
  },
210
+ {
211
+ abilities: {
212
+ vision: true,
213
+ },
214
+ contextWindowTokens: 1_048_576 + 32_768,
215
+ description:
216
+ 'LearnLM 是一个实验性的、特定于任务的语言模型,经过训练以符合学习科学原则,可在教学和学习场景中遵循系统指令,充当专家导师等。',
217
+ displayName: 'LearnLM 2.0 Flash Experimental',
218
+ id: 'learnlm-2.0-flash-experimental',
219
+ maxOutput: 32_768,
220
+ pricing: {
221
+ cachedInput: 0,
222
+ input: 0,
223
+ output: 0,
224
+ },
225
+ type: 'chat',
226
+ },
238
227
  {
239
228
  abilities: {
240
229
  vision: true,
@@ -179,6 +179,17 @@ const groqChatModels: AIChatModelCard[] = [
179
179
  },
180
180
  type: 'chat',
181
181
  },
182
+ {
183
+ contextWindowTokens: 131_072,
184
+ displayName: 'Llama Guard 4 12B',
185
+ id: 'meta-llama/Llama-Guard-4-12B',
186
+ maxOutput: 128,
187
+ pricing: {
188
+ input: 0.2,
189
+ output: 0.2,
190
+ },
191
+ type: 'chat',
192
+ },
182
193
  {
183
194
  contextWindowTokens: 8192,
184
195
  displayName: 'Llama Guard 3 8B',
@@ -1,7 +1,106 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
-
3
2
  // https://novita.ai/pricing
4
3
  const novitaChatModels: AIChatModelCard[] = [
4
+ {
5
+ contextWindowTokens: 32_000,
6
+ displayName: 'Qwen3 0.6B FP8',
7
+ id: 'qwen/qwen3-0.6b-fp8',
8
+ pricing: {
9
+ input: 0,
10
+ output: 0
11
+ },
12
+ type: 'chat',
13
+ },
14
+ {
15
+ contextWindowTokens: 32_000,
16
+ displayName: 'Qwen3 1.7B FP8',
17
+ id: 'qwen/qwen3-1.7b-fp8',
18
+ pricing: {
19
+ input: 0,
20
+ output: 0
21
+ },
22
+ type: 'chat',
23
+ },
24
+ {
25
+ contextWindowTokens: 128_000,
26
+ displayName: 'Qwen3 4B FP8',
27
+ id: 'qwen/qwen3-4b-fp8',
28
+ pricing: {
29
+ input: 0,
30
+ output: 0
31
+ },
32
+ type: 'chat',
33
+ },
34
+ {
35
+ contextWindowTokens: 160_000,
36
+ displayName: 'Deepseek Prover V2 671B',
37
+ id: 'deepseek/deepseek-prover-v2-671b',
38
+ pricing: {
39
+ input: 0.7,
40
+ output: 2.5
41
+ },
42
+ type: 'chat',
43
+ },
44
+ {
45
+ contextWindowTokens: 40_960,
46
+ displayName: 'Qwen3 235B A22B FP8',
47
+ id: 'qwen/qwen3-235b-a22b-fp8',
48
+ pricing: {
49
+ input: 0.2,
50
+ output: 0.8
51
+ },
52
+ type: 'chat',
53
+ },
54
+ {
55
+ contextWindowTokens: 40_960,
56
+ displayName: 'Qwen3 30B A3B FP8',
57
+ id: 'qwen/qwen3-30b-a3b-fp8',
58
+ pricing: {
59
+ input: 0.1,
60
+ output: 0.45
61
+ },
62
+ type: 'chat',
63
+ },
64
+ {
65
+ contextWindowTokens: 40_960,
66
+ displayName: 'Qwen3 32B FP8',
67
+ id: 'qwen/qwen3-32b-fp8',
68
+ pricing: {
69
+ input: 0.1,
70
+ output: 0.45
71
+ },
72
+ type: 'chat',
73
+ },
74
+ {
75
+ contextWindowTokens: 131_072,
76
+ displayName: 'Llama 3.3 70B Instruct',
77
+ id: 'meta-llama/llama-3.3-70b-instruct',
78
+ pricing: {
79
+ input: 0.13,
80
+ output: 0.39
81
+ },
82
+ type: 'chat',
83
+ },
84
+ {
85
+ contextWindowTokens: 128_000,
86
+ displayName: 'Qwen3 8B FP8',
87
+ id: 'qwen/qwen3-8b-fp8',
88
+ pricing: {
89
+ input: 0.035,
90
+ output: 0.138
91
+ },
92
+ type: 'chat',
93
+ },
94
+ {
95
+ contextWindowTokens: 40_960,
96
+ displayName: 'Qwen3 14B FP8',
97
+ id: 'qwen/qwen3-14b-fp8',
98
+ pricing: {
99
+ input: 0.07,
100
+ output: 0.275
101
+ },
102
+ type: 'chat',
103
+ },
5
104
  {
6
105
  contextWindowTokens: 131_072,
7
106
  displayName: 'Llama 4 Scout 17B Instruct',
@@ -469,4 +568,4 @@ const novitaChatModels: AIChatModelCard[] = [
469
568
 
470
569
  export const allModels = [...novitaChatModels];
471
570
 
472
- export default allModels;
571
+ export default allModels;
@@ -9,7 +9,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
9
9
  reasoning: true,
10
10
  vision: true,
11
11
  },
12
- contextWindowTokens: 131_072,
12
+ contextWindowTokens: 32_768,
13
13
  description: '兼顾视觉、语言深度推理,实现慢思考和深度推理,呈现完整的思维链过程。',
14
14
  displayName: 'SenseNova V6 Reasoner',
15
15
  enabled: true,
@@ -24,10 +24,9 @@ const sensenovaChatModels: AIChatModelCard[] = [
24
24
  },
25
25
  {
26
26
  abilities: {
27
- reasoning: true,
28
27
  vision: true,
29
28
  },
30
- contextWindowTokens: 131_072,
29
+ contextWindowTokens: 32_768,
31
30
  description:
32
31
  '实现图片、文本、视频能力的原生统一,突破传统多模态分立局限,在多模基础能力、语言基础能力等核心维度全面领先,文理兼修,在多项测评中多次位列国内外第一梯队水平。',
33
32
  displayName: 'SenseNova V6 Turbo',
@@ -45,7 +44,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
45
44
  abilities: {
46
45
  vision: true,
47
46
  },
48
- contextWindowTokens: 131_072,
47
+ contextWindowTokens: 32_768,
49
48
  description:
50
49
  '实现图片、文本、视频能力的原生统一,突破传统多模态分立局限,在OpenCompass和SuperCLUE评测中斩获双冠军。',
51
50
  displayName: 'SenseNova V6 Pro',
@@ -53,21 +52,33 @@ const sensenovaChatModels: AIChatModelCard[] = [
53
52
  id: 'SenseNova-V6-Pro',
54
53
  pricing: {
55
54
  currency: 'CNY',
56
- input: 9,
57
- output: 3,
55
+ input: 3,
56
+ output: 9,
58
57
  },
59
58
  releasedAt: '2025-04-14',
60
59
  type: 'chat',
61
60
  },
61
+ {
62
+ contextWindowTokens: 32_768,
63
+ description: '部分性能优于 SenseCat-5-1202',
64
+ displayName: 'SenseChat 5.5 Beta',
65
+ id: 'SenseChat-5-beta',
66
+ pricing: {
67
+ currency: 'CNY',
68
+ input: 8,
69
+ output: 20,
70
+ },
71
+ type: 'chat',
72
+ },
62
73
  {
63
74
  abilities: {
64
75
  functionCall: true,
65
76
  },
66
- contextWindowTokens: 131_072,
77
+ contextWindowTokens: 32_768,
67
78
  description:
68
79
  '是基于V5.5的最新版本,较上版本在中英文基础能力,聊天,理科知识, 文科知识,写作,数理逻辑,字数控制 等几个维度的表现有显著提升。',
69
80
  displayName: 'SenseChat 5.5 1202',
70
- id: 'SenseChat-5-1202',
81
+ id: 'SenseCat-5-1202',
71
82
  pricing: {
72
83
  currency: 'CNY',
73
84
  input: 8,
@@ -101,10 +112,11 @@ const sensenovaChatModels: AIChatModelCard[] = [
101
112
  '最新版本模型 (V5.5),128K上下文长度,在数学推理、英文对话、指令跟随以及长文本理解等领域能力显著提升,比肩GPT-4o。',
102
113
  displayName: 'SenseChat 5.5',
103
114
  id: 'SenseChat-5',
115
+ maxOutput: 131_072,
104
116
  pricing: {
105
117
  currency: 'CNY',
106
- input: 40,
107
- output: 100,
118
+ input: 8,
119
+ output: 20,
108
120
  },
109
121
  type: 'chat',
110
122
  },
@@ -112,11 +124,12 @@ const sensenovaChatModels: AIChatModelCard[] = [
112
124
  abilities: {
113
125
  vision: true,
114
126
  },
115
- contextWindowTokens: 32_768,
127
+ contextWindowTokens: 16_384,
116
128
  description:
117
129
  '最新版本模型 (V5.5),支持多图的输入,全面实现模型基础能力优化,在对象属性识别、空间关系、动作事件识别、场景理解、情感识别、逻辑常识推理和文本理解生成上都实现了较大提升。',
118
130
  displayName: 'SenseChat 5.5 Vision',
119
131
  id: 'SenseChat-Vision',
132
+ maxOutput: 16_384,
120
133
  pricing: {
121
134
  currency: 'CNY',
122
135
  input: 10, // 限时优惠
@@ -133,10 +146,11 @@ const sensenovaChatModels: AIChatModelCard[] = [
133
146
  description: '适用于快速问答、模型微调场景',
134
147
  displayName: 'SenseChat 5.0 Turbo',
135
148
  id: 'SenseChat-Turbo',
149
+ maxOutput: 32_768,
136
150
  pricing: {
137
151
  currency: 'CNY',
138
- input: 2,
139
- output: 5,
152
+ input: 0.3,
153
+ output: 0.6,
140
154
  },
141
155
  type: 'chat',
142
156
  },
@@ -145,6 +159,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
145
159
  description: '基础版本模型 (V4),128K上下文长度,在长文本理解及生成等任务中表现出色',
146
160
  displayName: 'SenseChat 4.0 128K',
147
161
  id: 'SenseChat-128K',
162
+ maxOutput: 131_072,
148
163
  pricing: {
149
164
  currency: 'CNY',
150
165
  input: 60,
@@ -157,6 +172,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
157
172
  description: '基础版本模型 (V4),32K上下文长度,灵活应用于各类场景',
158
173
  displayName: 'SenseChat 4.0 32K',
159
174
  id: 'SenseChat-32K',
175
+ maxOutput: 32_768,
160
176
  pricing: {
161
177
  currency: 'CNY',
162
178
  input: 36,
@@ -169,6 +185,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
169
185
  description: '基础版本模型 (V4),4K上下文长度,通用能力强大',
170
186
  displayName: 'SenseChat 4.0 4K',
171
187
  id: 'SenseChat',
188
+ maxOutput: 4096,
172
189
  pricing: {
173
190
  currency: 'CNY',
174
191
  input: 12,
@@ -182,6 +199,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
182
199
  '专门为适应香港地区的对话习惯、俚语及本地知识而设计,在粤语的对话理解上超越了GPT-4,在知识、推理、数学及代码编写等多个领域均能与GPT-4 Turbo相媲美。',
183
200
  displayName: 'SenseChat 5.0 Cantonese',
184
201
  id: 'SenseChat-5-Cantonese',
202
+ maxOutput: 32_768,
185
203
  pricing: {
186
204
  currency: 'CNY',
187
205
  input: 27,
@@ -194,6 +212,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
194
212
  description: '拟人对话标准版模型,8K上下文长度,高响应速度',
195
213
  displayName: 'SenseChat Character',
196
214
  id: 'SenseChat-Character',
215
+ maxOutput: 1024,
197
216
  pricing: {
198
217
  currency: 'CNY',
199
218
  input: 12,
@@ -206,6 +225,7 @@ const sensenovaChatModels: AIChatModelCard[] = [
206
225
  description: '拟人对话高级版模型,32K上下文长度,能力全面提升,支持中/英文对话',
207
226
  displayName: 'SenseChat Character Pro',
208
227
  id: 'SenseChat-Character-Pro',
228
+ maxOutput: 4096,
209
229
  pricing: {
210
230
  currency: 'CNY',
211
231
  input: 15,
@@ -50,7 +50,32 @@ const CommonSTT = memo<{
50
50
 
51
51
  return (
52
52
  <Dropdown
53
- dropdownRender={
53
+ menu={{
54
+ activeKey: 'time',
55
+ items: [
56
+ {
57
+ key: 'title',
58
+ label: (
59
+ <Flexbox>
60
+ <div style={{ fontWeight: 'bolder' }}>{t('stt.action')}</div>
61
+ </Flexbox>
62
+ ),
63
+ },
64
+ {
65
+ key: 'time',
66
+ label: (
67
+ <Flexbox align={'center'} gap={8} horizontal>
68
+ <div className={styles.recording} />
69
+ {time > 0 ? formattedTime : t(isRecording ? 'stt.loading' : 'stt.prettifying')}
70
+ </Flexbox>
71
+ ),
72
+ },
73
+ ],
74
+ }}
75
+ onOpenChange={handleDropdownVisibleChange}
76
+ open={dropdownOpen || !!error || isRecording || isLoading}
77
+ placement={mobile ? 'topRight' : 'top'}
78
+ popupRender={
54
79
  error
55
80
  ? () => (
56
81
  <Alert
@@ -79,31 +104,6 @@ const CommonSTT = memo<{
79
104
  )
80
105
  : undefined
81
106
  }
82
- menu={{
83
- activeKey: 'time',
84
- items: [
85
- {
86
- key: 'title',
87
- label: (
88
- <Flexbox>
89
- <div style={{ fontWeight: 'bolder' }}>{t('stt.action')}</div>
90
- </Flexbox>
91
- ),
92
- },
93
- {
94
- key: 'time',
95
- label: (
96
- <Flexbox align={'center'} gap={8} horizontal>
97
- <div className={styles.recording} />
98
- {time > 0 ? formattedTime : t(isRecording ? 'stt.loading' : 'stt.prettifying')}
99
- </Flexbox>
100
- ),
101
- },
102
- ],
103
- }}
104
- onOpenChange={handleDropdownVisibleChange}
105
- open={dropdownOpen || !!error || isRecording || isLoading}
106
- placement={mobile ? 'topRight' : 'top'}
107
107
  trigger={['click']}
108
108
  >
109
109
  <ActionIcon
@@ -110,7 +110,7 @@ const DevModal = memo<DevModalProps>(
110
110
  >
111
111
  <Drawer
112
112
  containerMaxWidth={'auto'}
113
- destroyOnClose
113
+ destroyOnHidden
114
114
  footer={footer}
115
115
  height={'100vh'}
116
116
  onClose={(e) => {
@@ -38,11 +38,13 @@ const modelsOffSafetySettings = new Set(['gemini-2.0-flash-exp']);
38
38
  const modelsWithModalities = new Set([
39
39
  'gemini-2.0-flash-exp',
40
40
  'gemini-2.0-flash-exp-image-generation',
41
+ 'gemini-2.0-flash-preview-image-generation',
41
42
  ]);
42
43
 
43
44
  const modelsDisableInstuction = new Set([
44
45
  'gemini-2.0-flash-exp',
45
46
  'gemini-2.0-flash-exp-image-generation',
47
+ 'gemini-2.0-flash-preview-image-generation',
46
48
  ]);
47
49
 
48
50
  export interface GoogleModelCard {
@@ -12,7 +12,8 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
12
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
15
- const { frequency_penalty, messages, model, temperature, top_p, ...rest } = payload;
15
+ const { frequency_penalty, max_tokens, messages, model, temperature, top_p, ...rest } =
16
+ payload;
16
17
 
17
18
  return {
18
19
  ...rest,
@@ -20,8 +21,9 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
20
21
  frequency_penalty !== undefined && frequency_penalty > 0 && frequency_penalty <= 2
21
22
  ? frequency_penalty
22
23
  : undefined,
24
+ max_new_tokens: max_tokens !== undefined && max_tokens > 0 ? max_tokens : undefined,
23
25
  messages: messages.map((message) =>
24
- message.role !== 'user' || !/^Sense(Nova-V6|Chat-Vision)/.test(model)
26
+ message.role !== 'user' || !model || !/^Sense(Nova-V6|Chat-Vision)/.test(model)
25
27
  ? message
26
28
  : { ...message, content: convertSenseNovaMessage(message.content) },
27
29
  ) as any[],
@@ -41,11 +43,11 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
41
43
  models: async ({ client }) => {
42
44
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
43
45
 
44
- const functionCallKeywords = ['sensechat-5'];
46
+ const functionCallKeywords = ['1202'];
45
47
 
46
48
  const visionKeywords = ['vision', 'sensenova-v6'];
47
49
 
48
- const reasoningKeywords = ['deepseek-r1', 'sensenova-v6'];
50
+ const reasoningKeywords = ['deepseek-r1', 'reasoner'];
49
51
 
50
52
  client.baseURL = 'https://api.sensenova.cn/v1/llm';
51
53
 
@@ -4,18 +4,29 @@ export const convertSenseNovaMessage = (content: any) => {
4
4
  return [{ text: content, type: 'text' }];
5
5
  }
6
6
 
7
+ // 如果内容为空或不是数组,返回空数组避免后续错误
8
+ if (!Array.isArray(content)) {
9
+ return [];
10
+ }
11
+
7
12
  // 如果内容包含图片内容,则需要对 array 类 content,进行格式转换
8
13
  return content
9
- ?.map((item: any) => {
14
+ .map((item: any) => {
15
+ // 如果项为空,跳过处理
16
+ if (!item) return null;
17
+
10
18
  // 如果为 content,则格式转换为 text 类
11
19
  if (item.type === 'text') return item;
12
20
 
13
21
  // 如果为 image_url,则格式转换为 image_url 类
14
- if (item.type === 'image_url' && item.image_url?.url) {
22
+ if (item.type === 'image_url' && item.image_url) {
15
23
  const url = item.image_url.url;
16
24
 
25
+ // 确保 URL 存在且为字符串
26
+ if (!url || typeof url !== 'string') return null;
27
+
17
28
  // 如果 image_url 为 base64 格式,则返回 image_base64 类,否则返回 image_url 类
18
- return url.startsWith('data:image/jpeg;base64')
29
+ return url.startsWith('data:image/jpeg;base64') || url.startsWith('data:image/png;base64')
19
30
  ? {
20
31
  image_base64: url.split(',')[1],
21
32
  type: 'image_base64',