@lobehub/chat 1.96.11 → 1.96.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,39 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.96.12](https://github.com/lobehub/lobe-chat/compare/v1.96.11...v1.96.12)
6
+
7
+ <sup>Released on **2025-06-30**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Pin `antd@5.26.2` to fix build error.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Add DeepResearch models from OpenAI.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Pin `antd@5.26.2` to fix build error, closes [#8303](https://github.com/lobehub/lobe-chat/issues/8303) ([44b6b01](https://github.com/lobehub/lobe-chat/commit/44b6b01))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Add DeepResearch models from OpenAI, closes [#8291](https://github.com/lobehub/lobe-chat/issues/8291) ([87a5cbc](https://github.com/lobehub/lobe-chat/commit/87a5cbc))
29
+
30
+ </details>
31
+
32
+ <div align="right">
33
+
34
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
35
+
36
+ </div>
37
+
5
38
  ### [Version 1.96.11](https://github.com/lobehub/lobe-chat/compare/v1.96.10...v1.96.11)
6
39
 
7
40
  <sup>Released on **2025-06-28**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,16 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Pin antd@5.26.2 to fix build error."
6
+ ],
7
+ "improvements": [
8
+ "Add DeepResearch models from OpenAI."
9
+ ]
10
+ },
11
+ "date": "2025-06-30",
12
+ "version": "1.96.12"
13
+ },
2
14
  {
3
15
  "children": {},
4
16
  "date": "2025-06-28",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.96.11",
3
+ "version": "1.96.12",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -170,7 +170,7 @@
170
170
  "@xterm/xterm": "^5.5.0",
171
171
  "ahooks": "^3.8.5",
172
172
  "ai": "^3.4.33",
173
- "antd": "^5.25.4",
173
+ "antd": "5.26.2",
174
174
  "antd-style": "^3.7.1",
175
175
  "brotli-wasm": "^3.0.1",
176
176
  "chroma-js": "^3.1.2",
@@ -12,21 +12,25 @@ export const openaiChatModels: AIChatModelCard[] = [
12
12
  abilities: {
13
13
  functionCall: true,
14
14
  reasoning: true,
15
+ search: true,
15
16
  vision: true,
16
17
  },
17
18
  contextWindowTokens: 200_000,
18
19
  description:
19
- 'o3-pro 模型使用更多的计算来更深入地思考并始终提供更好的答案,仅支持 Responses API 下使用。',
20
- displayName: 'o3-pro',
21
- id: 'o3-pro',
20
+ 'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
21
+ displayName: 'o4-mini',
22
+ enabled: true,
23
+ id: 'o4-mini',
22
24
  maxOutput: 100_000,
23
25
  pricing: {
24
- input: 20,
25
- output: 80,
26
+ cachedInput: 0.275,
27
+ input: 1.1,
28
+ output: 4.4,
26
29
  },
27
- releasedAt: '2025-06-10',
30
+ releasedAt: '2025-04-17',
28
31
  settings: {
29
32
  extendParams: ['reasoningEffort'],
33
+ searchImpl: 'params',
30
34
  },
31
35
  type: 'chat',
32
36
  },
@@ -34,23 +38,24 @@ export const openaiChatModels: AIChatModelCard[] = [
34
38
  abilities: {
35
39
  functionCall: true,
36
40
  reasoning: true,
41
+ search: true,
37
42
  vision: true,
38
43
  },
39
44
  contextWindowTokens: 200_000,
40
45
  description:
41
- 'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
42
- displayName: 'o3',
43
- enabled: true,
44
- id: 'o3',
46
+ 'o4-mini-deep-research 是我们更快速、更实惠的深度研究模型——非常适合处理复杂的多步骤研究任务。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。',
47
+ displayName: 'o4-mini Deep Research',
48
+ id: 'o4-mini-deep-research',
45
49
  maxOutput: 100_000,
46
50
  pricing: {
47
51
  cachedInput: 0.5,
48
52
  input: 2,
49
53
  output: 8,
50
54
  },
51
- releasedAt: '2025-04-16',
55
+ releasedAt: '2025-06-26',
52
56
  settings: {
53
57
  extendParams: ['reasoningEffort'],
58
+ searchImpl: 'params',
54
59
  },
55
60
  type: 'chat',
56
61
  },
@@ -58,45 +63,48 @@ export const openaiChatModels: AIChatModelCard[] = [
58
63
  abilities: {
59
64
  functionCall: true,
60
65
  reasoning: true,
66
+ search: true,
61
67
  vision: true,
62
68
  },
63
69
  contextWindowTokens: 200_000,
64
70
  description:
65
- 'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
66
- displayName: 'o4-mini',
67
- enabled: true,
68
- id: 'o4-mini',
71
+ 'o3-pro 模型使用更多的计算来更深入地思考并始终提供更好的答案,仅支持 Responses API 下使用。',
72
+ displayName: 'o3-pro',
73
+ id: 'o3-pro',
69
74
  maxOutput: 100_000,
70
75
  pricing: {
71
- cachedInput: 0.275,
72
- input: 1.1,
73
- output: 4.4,
76
+ input: 20,
77
+ output: 80,
74
78
  },
75
- releasedAt: '2025-04-17',
79
+ releasedAt: '2025-06-10',
76
80
  settings: {
77
81
  extendParams: ['reasoningEffort'],
82
+ searchImpl: 'params',
78
83
  },
79
84
  type: 'chat',
80
85
  },
81
86
  {
82
87
  abilities: {
83
88
  functionCall: true,
89
+ reasoning: true,
84
90
  search: true,
85
91
  vision: true,
86
92
  },
87
- contextWindowTokens: 1_047_576,
88
- description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
89
- displayName: 'GPT-4.1',
93
+ contextWindowTokens: 200_000,
94
+ description:
95
+ 'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
96
+ displayName: 'o3',
90
97
  enabled: true,
91
- id: 'gpt-4.1',
92
- maxOutput: 32_768,
98
+ id: 'o3',
99
+ maxOutput: 100_000,
93
100
  pricing: {
94
101
  cachedInput: 0.5,
95
102
  input: 2,
96
103
  output: 8,
97
104
  },
98
- releasedAt: '2025-04-14',
105
+ releasedAt: '2025-04-16',
99
106
  settings: {
107
+ extendParams: ['reasoningEffort'],
100
108
  searchImpl: 'params',
101
109
  },
102
110
  type: 'chat',
@@ -104,45 +112,28 @@ export const openaiChatModels: AIChatModelCard[] = [
104
112
  {
105
113
  abilities: {
106
114
  functionCall: true,
115
+ reasoning: true,
107
116
  search: true,
108
117
  vision: true,
109
118
  },
110
- contextWindowTokens: 1_047_576,
119
+ contextWindowTokens: 200_000,
111
120
  description:
112
- 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
113
- displayName: 'GPT-4.1 mini',
114
- enabled: true,
115
- id: 'gpt-4.1-mini',
116
- maxOutput: 32_768,
121
+ 'o3-deep-research 是我们最先进的深度研究模型,专为处理复杂的多步骤研究任务而设计。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。',
122
+ displayName: 'o3 Deep Research',
123
+ id: 'o3-deep-research',
124
+ maxOutput: 100_000,
117
125
  pricing: {
118
- cachedInput: 0.1,
119
- input: 0.4,
120
- output: 1.6,
126
+ cachedInput: 2.5,
127
+ input: 10,
128
+ output: 40,
121
129
  },
122
- releasedAt: '2025-04-14',
130
+ releasedAt: '2025-06-26',
123
131
  settings: {
132
+ extendParams: ['reasoningEffort'],
124
133
  searchImpl: 'params',
125
134
  },
126
135
  type: 'chat',
127
136
  },
128
- {
129
- abilities: {
130
- functionCall: true,
131
- vision: true,
132
- },
133
- contextWindowTokens: 1_047_576,
134
- description: 'GPT-4.1 nano 是最快,最具成本效益的GPT-4.1模型。',
135
- displayName: 'GPT-4.1 nano',
136
- id: 'gpt-4.1-nano',
137
- maxOutput: 32_768,
138
- pricing: {
139
- cachedInput: 0.025,
140
- input: 0.1,
141
- output: 0.4,
142
- },
143
- releasedAt: '2025-04-14',
144
- type: 'chat',
145
- },
146
137
  {
147
138
  abilities: {
148
139
  functionCall: true,
@@ -251,6 +242,71 @@ export const openaiChatModels: AIChatModelCard[] = [
251
242
  },
252
243
  type: 'chat',
253
244
  },
245
+ {
246
+ abilities: {
247
+ functionCall: true,
248
+ search: true,
249
+ vision: true,
250
+ },
251
+ contextWindowTokens: 1_047_576,
252
+ description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
253
+ displayName: 'GPT-4.1',
254
+ enabled: true,
255
+ id: 'gpt-4.1',
256
+ maxOutput: 32_768,
257
+ pricing: {
258
+ cachedInput: 0.5,
259
+ input: 2,
260
+ output: 8,
261
+ },
262
+ releasedAt: '2025-04-14',
263
+ settings: {
264
+ searchImpl: 'params',
265
+ },
266
+ type: 'chat',
267
+ },
268
+ {
269
+ abilities: {
270
+ functionCall: true,
271
+ search: true,
272
+ vision: true,
273
+ },
274
+ contextWindowTokens: 1_047_576,
275
+ description:
276
+ 'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
277
+ displayName: 'GPT-4.1 mini',
278
+ enabled: true,
279
+ id: 'gpt-4.1-mini',
280
+ maxOutput: 32_768,
281
+ pricing: {
282
+ cachedInput: 0.1,
283
+ input: 0.4,
284
+ output: 1.6,
285
+ },
286
+ releasedAt: '2025-04-14',
287
+ settings: {
288
+ searchImpl: 'params',
289
+ },
290
+ type: 'chat',
291
+ },
292
+ {
293
+ abilities: {
294
+ functionCall: true,
295
+ vision: true,
296
+ },
297
+ contextWindowTokens: 1_047_576,
298
+ description: 'GPT-4.1 nano 是最快,最具成本效益的GPT-4.1模型。',
299
+ displayName: 'GPT-4.1 nano',
300
+ id: 'gpt-4.1-nano',
301
+ maxOutput: 32_768,
302
+ pricing: {
303
+ cachedInput: 0.025,
304
+ input: 0.1,
305
+ output: 0.4,
306
+ },
307
+ releasedAt: '2025-04-14',
308
+ type: 'chat',
309
+ },
254
310
  {
255
311
  abilities: {
256
312
  functionCall: true,
@@ -26,8 +26,12 @@ export const disableStreamModels = new Set([
26
26
  export const responsesAPIModels = new Set([
27
27
  'o1-pro',
28
28
  'o1-pro-2025-03-19',
29
+ 'o3-deep-research',
30
+ 'o3-deep-research-2025-06-26',
29
31
  'o3-pro',
30
32
  'o3-pro-2025-06-10',
33
+ 'o4-mini-deep-research',
34
+ 'o4-mini-deep-research-2025-06-26',
31
35
  'codex-mini-latest',
32
36
  'computer-use-preview',
33
37
  'computer-use-preview-2025-03-11',
@@ -76,18 +76,17 @@ export const LobeOpenAI = createOpenAICompatibleRuntime({
76
76
  : tools;
77
77
 
78
78
  if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
79
- if (!payload.reasoning) {
80
- payload.reasoning = { summary: 'auto' };
81
- } else {
82
- payload.reasoning.summary = 'auto';
83
- }
84
-
85
- // computer-use series must set truncation as auto
86
- if (model.startsWith('computer-use')) {
87
- payload.truncation = 'auto';
88
- }
89
-
90
- return pruneReasoningPayload(payload) as any;
79
+ return pruneReasoningPayload({
80
+ ...rest,
81
+ model,
82
+ reasoning: payload.reasoning ?
83
+ { ...payload.reasoning, summary: 'auto' } :
84
+ { summary: 'auto' },
85
+ stream: payload.stream ?? true,
86
+ tools: openaiTools as any,
87
+ // computer-use series must set truncation as auto
88
+ ...(model.startsWith('computer-use') && { truncation: 'auto' }),
89
+ }) as any;
91
90
  }
92
91
 
93
92
  return { ...rest, model, stream: payload.stream ?? true, tools: openaiTools } as any;