@lobehub/chat 1.87.7 → 1.87.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.87.9](https://github.com/lobehub/lobe-chat/compare/v1.87.8...v1.87.9)
6
+
7
+ <sup>Released on **2025-05-23**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Resolve InputNumber display overlap issue.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Resolve InputNumber display overlap issue, closes [#7892](https://github.com/lobehub/lobe-chat/issues/7892) ([5486663](https://github.com/lobehub/lobe-chat/commit/5486663))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.87.8](https://github.com/lobehub/lobe-chat/compare/v1.87.7...v1.87.8)
31
+
32
+ <sup>Released on **2025-05-22**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: 'top_p' is not supported with o4-mini, pin zustand version to avoid type error.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: 'top_p' is not supported with o4-mini, closes [#7747](https://github.com/lobehub/lobe-chat/issues/7747) ([4e04399](https://github.com/lobehub/lobe-chat/commit/4e04399))
46
+ - **misc**: Pin zustand version to avoid type error, closes [#7929](https://github.com/lobehub/lobe-chat/issues/7929) ([4f6e286](https://github.com/lobehub/lobe-chat/commit/4f6e286))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.87.7](https://github.com/lobehub/lobe-chat/compare/v1.87.6...v1.87.7)
6
57
 
7
58
  <sup>Released on **2025-05-21**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Resolve InputNumber display overlap issue."
6
+ ]
7
+ },
8
+ "date": "2025-05-23",
9
+ "version": "1.87.9"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "'top_p' is not supported with o4-mini, pin zustand version to avoid type error."
15
+ ]
16
+ },
17
+ "date": "2025-05-22",
18
+ "version": "1.87.8"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.87.7",
3
+ "version": "1.87.9",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -268,7 +268,7 @@
268
268
  "yaml": "^2.8.0",
269
269
  "yjs": "^13.6.27",
270
270
  "zod": "^3.25.7",
271
- "zustand": "^5.0.4",
271
+ "zustand": "5.0.4",
272
272
  "zustand-utils": "^2.1.0"
273
273
  },
274
274
  "devDependencies": {
@@ -256,6 +256,16 @@ const novitaChatModels: AIChatModelCard[] = [
256
256
  },
257
257
  type: 'chat',
258
258
  },
259
+ {
260
+ contextWindowTokens: 160_000,
261
+ displayName: 'Deepseek Prover V2 671B',
262
+ id: 'deepseek/deepseek-prover-v2-671b',
263
+ pricing: {
264
+ input: 0.7,
265
+ output: 2.5,
266
+ },
267
+ type: 'chat',
268
+ },
259
269
  {
260
270
  contextWindowTokens: 64_000,
261
271
  displayName: 'Deepseek V3 Turbo',
@@ -299,7 +309,7 @@ const novitaChatModels: AIChatModelCard[] = [
299
309
  enabled: true,
300
310
  id: 'deepseek/deepseek-v3-0324',
301
311
  pricing: {
302
- input: 0.37,
312
+ input: 0.33,
303
313
  output: 1.3,
304
314
  },
305
315
  type: 'chat',
@@ -479,7 +479,7 @@ export const openaiChatModels: AIChatModelCard[] = [
479
479
  {
480
480
  contextWindowTokens: 4096,
481
481
  description:
482
- 'GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125',
482
+ 'GPT 3.5 Turbo,适用于各种文本生成和理解任务,对指令遵循的优化',
483
483
  displayName: 'GPT-3.5 Turbo Instruct',
484
484
  id: 'gpt-3.5-turbo-instruct',
485
485
  pricing: {
@@ -16,8 +16,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
16
16
  organization: 'Qwen',
17
17
  pricing: {
18
18
  currency: 'CNY',
19
- input: 1.25,
20
- output: 5,
19
+ input: 2.5,
20
+ output: 10,
21
21
  },
22
22
  releasedAt: '2025-04-28',
23
23
  settings: {
@@ -38,8 +38,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
38
38
  organization: 'Qwen',
39
39
  pricing: {
40
40
  currency: 'CNY',
41
- input: 0.5,
42
- output: 2,
41
+ input: 1,
42
+ output: 4,
43
43
  },
44
44
  releasedAt: '2025-04-28',
45
45
  settings: {
@@ -60,8 +60,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
60
60
  organization: 'Qwen',
61
61
  pricing: {
62
62
  currency: 'CNY',
63
- input: 0.35,
64
- output: 1.4,
63
+ input: 0.7,
64
+ output: 2.8,
65
65
  },
66
66
  releasedAt: '2025-04-28',
67
67
  settings: {
@@ -82,8 +82,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
82
82
  organization: 'Qwen',
83
83
  pricing: {
84
84
  currency: 'CNY',
85
- input: 0.25,
86
- output: 1,
85
+ input: 0.5,
86
+ output: 2,
87
87
  },
88
88
  releasedAt: '2025-04-28',
89
89
  settings: {
@@ -40,6 +40,11 @@ const Controls = memo<ControlsProps>(({ updating, setUpdating }) => {
40
40
  size={'small'}
41
41
  step={1}
42
42
  style={{ marginBlock: 8, paddingLeft: 4 }}
43
+ styles={{
44
+ input: {
45
+ maxWidth: 64,
46
+ },
47
+ }}
43
48
  />
44
49
  ),
45
50
  name: 'historyCount',
@@ -29,6 +29,11 @@ const FrequencyPenalty = memo<FrequencyPenaltyProps>(({ value, onChange }) => {
29
29
  onChange={onChange}
30
30
  size={'small'}
31
31
  step={0.1}
32
+ styles={{
33
+ input: {
34
+ maxWidth: 64,
35
+ },
36
+ }}
32
37
  value={value}
33
38
  />
34
39
  </Flexbox>
@@ -27,6 +27,11 @@ const PresencePenalty = memo<PresencePenaltyProps>(({ value, onChange }) => {
27
27
  onChange={onChange}
28
28
  size={'small'}
29
29
  step={0.1}
30
+ styles={{
31
+ input: {
32
+ maxWidth: 64,
33
+ },
34
+ }}
30
35
  value={value}
31
36
  />
32
37
  </Flexbox>
@@ -61,6 +61,11 @@ const Temperature = memo<TemperatureProps>(({ value, onChange }) => {
61
61
  size={'small'}
62
62
  step={0.1}
63
63
  style={{ height: 48 }}
64
+ styles={{
65
+ input: {
66
+ maxWidth: 64,
67
+ },
68
+ }}
64
69
  value={value}
65
70
  />
66
71
  <Warning />
@@ -31,6 +31,11 @@ const TopP = memo<TopPProps>(({ value, onChange }) => {
31
31
  onChange={onChange}
32
32
  size={'small'}
33
33
  step={0.1}
34
+ styles={{
35
+ input: {
36
+ maxWidth: 64,
37
+ },
38
+ }}
34
39
  value={value}
35
40
  />
36
41
  </Flexbox>
@@ -56,9 +56,9 @@ export class LobeAzureAI implements LobeRuntimeAI {
56
56
  model,
57
57
  ...params,
58
58
  stream: enableStreaming,
59
- temperature: model.includes('o3') ? undefined : temperature,
59
+ temperature: (model.includes('o3') || model.includes('o4')) ? undefined : temperature,
60
60
  tool_choice: params.tools ? 'auto' : undefined,
61
- top_p: model.includes('o3') ? undefined : top_p,
61
+ top_p: (model.includes('o3') || model.includes('o4')) ? undefined : top_p,
62
62
  },
63
63
  });
64
64
 
@@ -42,7 +42,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
42
42
  "contextWindowTokens": undefined,
43
43
  "displayName": undefined,
44
44
  "enabled": false,
45
- "functionCall": true,
45
+ "functionCall": false,
46
46
  "id": "gpt-3.5-turbo-16k",
47
47
  "reasoning": false,
48
48
  "vision": false,
@@ -69,7 +69,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
69
69
  "contextWindowTokens": undefined,
70
70
  "displayName": undefined,
71
71
  "enabled": false,
72
- "functionCall": true,
72
+ "functionCall": false,
73
73
  "id": "gpt-3.5-turbo-16k-0613",
74
74
  "reasoning": false,
75
75
  "vision": false,
@@ -87,16 +87,16 @@ exports[`LobeOpenAI > models > should get models 1`] = `
87
87
  "contextWindowTokens": undefined,
88
88
  "displayName": undefined,
89
89
  "enabled": false,
90
- "functionCall": true,
90
+ "functionCall": false,
91
91
  "id": "gpt-4-1106-vision-preview",
92
92
  "reasoning": false,
93
- "vision": true,
93
+ "vision": false,
94
94
  },
95
95
  {
96
96
  "contextWindowTokens": undefined,
97
97
  "displayName": undefined,
98
98
  "enabled": false,
99
- "functionCall": true,
99
+ "functionCall": false,
100
100
  "id": "gpt-3.5-turbo-instruct-0914",
101
101
  "reasoning": false,
102
102
  "vision": false,
@@ -123,7 +123,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
123
123
  "contextWindowTokens": 4096,
124
124
  "displayName": "GPT-3.5 Turbo Instruct",
125
125
  "enabled": false,
126
- "functionCall": true,
126
+ "functionCall": false,
127
127
  "id": "gpt-3.5-turbo-instruct",
128
128
  "reasoning": false,
129
129
  "vision": false,
@@ -132,7 +132,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
132
132
  "contextWindowTokens": undefined,
133
133
  "displayName": undefined,
134
134
  "enabled": false,
135
- "functionCall": true,
135
+ "functionCall": false,
136
136
  "id": "gpt-3.5-turbo-0301",
137
137
  "reasoning": false,
138
138
  "vision": false,
@@ -141,7 +141,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
141
141
  "contextWindowTokens": undefined,
142
142
  "displayName": undefined,
143
143
  "enabled": false,
144
- "functionCall": true,
144
+ "functionCall": false,
145
145
  "id": "gpt-3.5-turbo-0613",
146
146
  "reasoning": false,
147
147
  "vision": false,
@@ -204,7 +204,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
204
204
  "contextWindowTokens": 128000,
205
205
  "displayName": "GPT 4 Turbo with Vision Preview",
206
206
  "enabled": false,
207
- "functionCall": true,
207
+ "functionCall": false,
208
208
  "id": "gpt-4-vision-preview",
209
209
  "reasoning": false,
210
210
  "vision": true,
@@ -8,13 +8,15 @@ export interface OpenAIModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
+ const prunePrefixes = ['o1', 'o3', 'o4'];
12
+
11
13
  export const LobeOpenAI = LobeOpenAICompatibleFactory({
12
14
  baseURL: 'https://api.openai.com/v1',
13
15
  chatCompletion: {
14
16
  handlePayload: (payload) => {
15
17
  const { model } = payload;
16
18
 
17
- if (model.startsWith('o1') || model.startsWith('o3')) {
19
+ if (prunePrefixes.some(prefix => model.startsWith(prefix))) {
18
20
  return pruneReasoningPayload(payload) as any;
19
21
  }
20
22
 
@@ -38,11 +40,11 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
38
40
  models: async ({ client }) => {
39
41
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
40
42
 
41
- const functionCallKeywords = ['gpt-4', 'gpt-3.5', 'o3-mini'];
43
+ const functionCallKeywords = ['4o', '4.1', 'o3', 'o4'];
42
44
 
43
- const visionKeywords = ['gpt-4o', 'vision'];
45
+ const visionKeywords = ['4o', '4.1', 'o4'];
44
46
 
45
- const reasoningKeywords = ['o1', 'o3'];
47
+ const reasoningKeywords = ['o1', 'o3', 'o4'];
46
48
 
47
49
  const modelsPage = (await client.models.list()) as any;
48
50
  const modelList: OpenAIModelCard[] = modelsPage.data;