@lobehub/chat 1.87.6 → 1.87.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.87.8](https://github.com/lobehub/lobe-chat/compare/v1.87.7...v1.87.8)
6
+
7
+ <sup>Released on **2025-05-22**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: 'top_p' is not supported with o4-mini, pin zustand version to avoid type error.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: 'top_p' is not supported with o4-mini, closes [#7747](https://github.com/lobehub/lobe-chat/issues/7747) ([4e04399](https://github.com/lobehub/lobe-chat/commit/4e04399))
21
+ - **misc**: Pin zustand version to avoid type error, closes [#7929](https://github.com/lobehub/lobe-chat/issues/7929) ([4f6e286](https://github.com/lobehub/lobe-chat/commit/4f6e286))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
31
+ ### [Version 1.87.7](https://github.com/lobehub/lobe-chat/compare/v1.87.6...v1.87.7)
32
+
33
+ <sup>Released on **2025-05-21**</sup>
34
+
35
+ #### 🐛 Bug Fixes
36
+
37
+ - **misc**: Bump @lobehub/ui to 2.1.7.
38
+
39
+ <br/>
40
+
41
+ <details>
42
+ <summary><kbd>Improvements and Fixes</kbd></summary>
43
+
44
+ #### What's fixed
45
+
46
+ - **misc**: Bump @lobehub/ui to 2.1.7, closes [#7912](https://github.com/lobehub/lobe-chat/issues/7912) ([457b645](https://github.com/lobehub/lobe-chat/commit/457b645))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 1.87.6](https://github.com/lobehub/lobe-chat/compare/v1.87.5...v1.87.6)
6
57
 
7
58
  <sup>Released on **2025-05-21**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "'top_p' is not supported with o4-mini, pin zustand version to avoid type error."
6
+ ]
7
+ },
8
+ "date": "2025-05-22",
9
+ "version": "1.87.8"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Bump @lobehub/ui to 2.1.7."
15
+ ]
16
+ },
17
+ "date": "2025-05-21",
18
+ "version": "1.87.7"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.87.6",
3
+ "version": "1.87.8",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -150,7 +150,7 @@
150
150
  "@lobehub/chat-plugins-gateway": "^1.9.0",
151
151
  "@lobehub/icons": "^2.0.0",
152
152
  "@lobehub/tts": "^2.0.1",
153
- "@lobehub/ui": "^2.1.6",
153
+ "@lobehub/ui": "^2.1.7",
154
154
  "@modelcontextprotocol/sdk": "^1.11.4",
155
155
  "@neondatabase/serverless": "^1.0.0",
156
156
  "@next/third-parties": "^15.3.2",
@@ -268,7 +268,7 @@
268
268
  "yaml": "^2.8.0",
269
269
  "yjs": "^13.6.27",
270
270
  "zod": "^3.25.7",
271
- "zustand": "^5.0.4",
271
+ "zustand": "5.0.4",
272
272
  "zustand-utils": "^2.1.0"
273
273
  },
274
274
  "devDependencies": {
@@ -140,7 +140,7 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open }) => {
140
140
 
141
141
  return (
142
142
  <FormModal
143
- destroyOnClose
143
+ destroyOnHidden
144
144
  height={'90%'}
145
145
  items={[
146
146
  {
@@ -256,6 +256,16 @@ const novitaChatModels: AIChatModelCard[] = [
256
256
  },
257
257
  type: 'chat',
258
258
  },
259
+ {
260
+ contextWindowTokens: 160_000,
261
+ displayName: 'Deepseek Prover V2 671B',
262
+ id: 'deepseek/deepseek-prover-v2-671b',
263
+ pricing: {
264
+ input: 0.7,
265
+ output: 2.5,
266
+ },
267
+ type: 'chat',
268
+ },
259
269
  {
260
270
  contextWindowTokens: 64_000,
261
271
  displayName: 'Deepseek V3 Turbo',
@@ -299,7 +309,7 @@ const novitaChatModels: AIChatModelCard[] = [
299
309
  enabled: true,
300
310
  id: 'deepseek/deepseek-v3-0324',
301
311
  pricing: {
302
- input: 0.37,
312
+ input: 0.33,
303
313
  output: 1.3,
304
314
  },
305
315
  type: 'chat',
@@ -479,7 +479,7 @@ export const openaiChatModels: AIChatModelCard[] = [
479
479
  {
480
480
  contextWindowTokens: 4096,
481
481
  description:
482
- 'GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125',
482
+ 'GPT 3.5 Turbo,适用于各种文本生成和理解任务,对指令遵循的优化',
483
483
  displayName: 'GPT-3.5 Turbo Instruct',
484
484
  id: 'gpt-3.5-turbo-instruct',
485
485
  pricing: {
@@ -16,8 +16,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
16
16
  organization: 'Qwen',
17
17
  pricing: {
18
18
  currency: 'CNY',
19
- input: 1.25,
20
- output: 5,
19
+ input: 2.5,
20
+ output: 10,
21
21
  },
22
22
  releasedAt: '2025-04-28',
23
23
  settings: {
@@ -38,8 +38,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
38
38
  organization: 'Qwen',
39
39
  pricing: {
40
40
  currency: 'CNY',
41
- input: 0.5,
42
- output: 2,
41
+ input: 1,
42
+ output: 4,
43
43
  },
44
44
  releasedAt: '2025-04-28',
45
45
  settings: {
@@ -60,8 +60,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
60
60
  organization: 'Qwen',
61
61
  pricing: {
62
62
  currency: 'CNY',
63
- input: 0.35,
64
- output: 1.4,
63
+ input: 0.7,
64
+ output: 2.8,
65
65
  },
66
66
  releasedAt: '2025-04-28',
67
67
  settings: {
@@ -82,8 +82,8 @@ const siliconcloudChatModels: AIChatModelCard[] = [
82
82
  organization: 'Qwen',
83
83
  pricing: {
84
84
  currency: 'CNY',
85
- input: 0.25,
86
- output: 1,
85
+ input: 0.5,
86
+ output: 2,
87
87
  },
88
88
  releasedAt: '2025-04-28',
89
89
  settings: {
@@ -56,9 +56,9 @@ export class LobeAzureAI implements LobeRuntimeAI {
56
56
  model,
57
57
  ...params,
58
58
  stream: enableStreaming,
59
- temperature: model.includes('o3') ? undefined : temperature,
59
+ temperature: (model.includes('o3') || model.includes('o4')) ? undefined : temperature,
60
60
  tool_choice: params.tools ? 'auto' : undefined,
61
- top_p: model.includes('o3') ? undefined : top_p,
61
+ top_p: (model.includes('o3') || model.includes('o4')) ? undefined : top_p,
62
62
  },
63
63
  });
64
64
 
@@ -42,7 +42,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
42
42
  "contextWindowTokens": undefined,
43
43
  "displayName": undefined,
44
44
  "enabled": false,
45
- "functionCall": true,
45
+ "functionCall": false,
46
46
  "id": "gpt-3.5-turbo-16k",
47
47
  "reasoning": false,
48
48
  "vision": false,
@@ -69,7 +69,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
69
69
  "contextWindowTokens": undefined,
70
70
  "displayName": undefined,
71
71
  "enabled": false,
72
- "functionCall": true,
72
+ "functionCall": false,
73
73
  "id": "gpt-3.5-turbo-16k-0613",
74
74
  "reasoning": false,
75
75
  "vision": false,
@@ -87,16 +87,16 @@ exports[`LobeOpenAI > models > should get models 1`] = `
87
87
  "contextWindowTokens": undefined,
88
88
  "displayName": undefined,
89
89
  "enabled": false,
90
- "functionCall": true,
90
+ "functionCall": false,
91
91
  "id": "gpt-4-1106-vision-preview",
92
92
  "reasoning": false,
93
- "vision": true,
93
+ "vision": false,
94
94
  },
95
95
  {
96
96
  "contextWindowTokens": undefined,
97
97
  "displayName": undefined,
98
98
  "enabled": false,
99
- "functionCall": true,
99
+ "functionCall": false,
100
100
  "id": "gpt-3.5-turbo-instruct-0914",
101
101
  "reasoning": false,
102
102
  "vision": false,
@@ -123,7 +123,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
123
123
  "contextWindowTokens": 4096,
124
124
  "displayName": "GPT-3.5 Turbo Instruct",
125
125
  "enabled": false,
126
- "functionCall": true,
126
+ "functionCall": false,
127
127
  "id": "gpt-3.5-turbo-instruct",
128
128
  "reasoning": false,
129
129
  "vision": false,
@@ -132,7 +132,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
132
132
  "contextWindowTokens": undefined,
133
133
  "displayName": undefined,
134
134
  "enabled": false,
135
- "functionCall": true,
135
+ "functionCall": false,
136
136
  "id": "gpt-3.5-turbo-0301",
137
137
  "reasoning": false,
138
138
  "vision": false,
@@ -141,7 +141,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
141
141
  "contextWindowTokens": undefined,
142
142
  "displayName": undefined,
143
143
  "enabled": false,
144
- "functionCall": true,
144
+ "functionCall": false,
145
145
  "id": "gpt-3.5-turbo-0613",
146
146
  "reasoning": false,
147
147
  "vision": false,
@@ -204,7 +204,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
204
204
  "contextWindowTokens": 128000,
205
205
  "displayName": "GPT 4 Turbo with Vision Preview",
206
206
  "enabled": false,
207
- "functionCall": true,
207
+ "functionCall": false,
208
208
  "id": "gpt-4-vision-preview",
209
209
  "reasoning": false,
210
210
  "vision": true,
@@ -8,13 +8,15 @@ export interface OpenAIModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
+ const prunePrefixes = ['o1', 'o3', 'o4'];
12
+
11
13
  export const LobeOpenAI = LobeOpenAICompatibleFactory({
12
14
  baseURL: 'https://api.openai.com/v1',
13
15
  chatCompletion: {
14
16
  handlePayload: (payload) => {
15
17
  const { model } = payload;
16
18
 
17
- if (model.startsWith('o1') || model.startsWith('o3')) {
19
+ if (prunePrefixes.some(prefix => model.startsWith(prefix))) {
18
20
  return pruneReasoningPayload(payload) as any;
19
21
  }
20
22
 
@@ -38,11 +40,11 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
38
40
  models: async ({ client }) => {
39
41
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
40
42
 
41
- const functionCallKeywords = ['gpt-4', 'gpt-3.5', 'o3-mini'];
43
+ const functionCallKeywords = ['4o', '4.1', 'o3', 'o4'];
42
44
 
43
- const visionKeywords = ['gpt-4o', 'vision'];
45
+ const visionKeywords = ['4o', '4.1', 'o4'];
44
46
 
45
- const reasoningKeywords = ['o1', 'o3'];
47
+ const reasoningKeywords = ['o1', 'o3', 'o4'];
46
48
 
47
49
  const modelsPage = (await client.models.list()) as any;
48
50
  const modelList: OpenAIModelCard[] = modelsPage.data;