@lobehub/chat 1.110.7 → 1.111.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.111.0](https://github.com/lobehub/lobe-chat/compare/v1.110.7...v1.111.0)
6
+
7
+ <sup>Released on **2025-08-08**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add GPT-5 series models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add GPT-5 series models, closes [#8711](https://github.com/lobehub/lobe-chat/issues/8711) ([600c29b](https://github.com/lobehub/lobe-chat/commit/600c29b))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.110.7](https://github.com/lobehub/lobe-chat/compare/v1.110.6...v1.110.7)
6
31
 
7
32
  <sup>Released on **2025-08-07**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Add GPT-5 series models."
6
+ ]
7
+ },
8
+ "date": "2025-08-08",
9
+ "version": "1.111.0"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.110.7",
3
+ "version": "1.111.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,6 +1,98 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
2
 
3
3
  const aihubmixModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ functionCall: true,
7
+ imageOutput: true,
8
+ reasoning: true,
9
+ search: true,
10
+ vision: true,
11
+ },
12
+ contextWindowTokens: 400_000,
13
+ description:
14
+ '跨领域编码和代理任务的最佳模型。GPT-5 在准确性、速度、推理、上下文识别、结构化思维和问题解决方面实现了飞跃。',
15
+ displayName: 'GPT-5',
16
+ enabled: true,
17
+ id: 'gpt-5',
18
+ maxOutput: 128_000,
19
+ pricing: {
20
+ cachedInput: 0.13,
21
+ input: 1.25,
22
+ output: 10,
23
+ },
24
+ releasedAt: '2025-08-07',
25
+ settings: {
26
+ extendParams: ['reasoningEffort'],
27
+ searchImpl: 'params',
28
+ },
29
+ type: 'chat',
30
+ },
31
+ {
32
+ abilities: {
33
+ functionCall: true,
34
+ reasoning: true,
35
+ search: true,
36
+ vision: true,
37
+ },
38
+ contextWindowTokens: 400_000,
39
+ description:
40
+ '更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
41
+ displayName: 'GPT-5 mini',
42
+ enabled: true,
43
+ id: 'gpt-5-mini',
44
+ maxOutput: 128_000,
45
+ pricing: {
46
+ cachedInput: 0.03,
47
+ input: 0.25,
48
+ output: 2,
49
+ },
50
+ releasedAt: '2025-08-07',
51
+ settings: {
52
+ extendParams: ['reasoningEffort'],
53
+ searchImpl: 'params',
54
+ },
55
+ type: 'chat',
56
+ },
57
+ {
58
+ abilities: {
59
+ functionCall: true,
60
+ reasoning: true,
61
+ vision: true,
62
+ },
63
+ contextWindowTokens: 400_000,
64
+ description: '最快、最经济高效的 GPT-5 版本。非常适合需要快速响应且成本敏感的应用场景。',
65
+ displayName: 'GPT-5 nano',
66
+ enabled: true,
67
+ id: 'gpt-5-nano',
68
+ maxOutput: 128_000,
69
+ pricing: {
70
+ cachedInput: 0.01,
71
+ input: 0.05,
72
+ output: 0.4,
73
+ },
74
+ releasedAt: '2025-08-07',
75
+ type: 'chat',
76
+ },
77
+ {
78
+ abilities: {
79
+ vision: true,
80
+ },
81
+ contextWindowTokens: 400_000,
82
+ description:
83
+ 'ChatGPT 中使用的 GPT-5 模型。结合了强大的语言理解与生成能力,适合对话式交互应用。',
84
+ displayName: 'GPT-5 Chat',
85
+ enabled: true,
86
+ id: 'gpt-5-chat-latest',
87
+ maxOutput: 128_000,
88
+ pricing: {
89
+ cachedInput: 0.13,
90
+ input: 1.25,
91
+ output: 10,
92
+ },
93
+ releasedAt: '2025-08-07',
94
+ type: 'chat',
95
+ },
4
96
  {
5
97
  abilities: {
6
98
  functionCall: true,
@@ -136,7 +228,6 @@ const aihubmixModels: AIChatModelCard[] = [
136
228
  contextWindowTokens: 1_047_576,
137
229
  description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
138
230
  displayName: 'GPT-4.1',
139
- enabled: true,
140
231
  id: 'gpt-4.1',
141
232
  maxOutput: 32_768,
142
233
  pricing: {
@@ -199,7 +290,6 @@ const aihubmixModels: AIChatModelCard[] = [
199
290
  description:
200
291
  'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
201
292
  displayName: 'ChatGPT-4o',
202
- enabled: true,
203
293
  id: 'chatgpt-4o-latest',
204
294
  pricing: {
205
295
  input: 5,
@@ -18,6 +18,97 @@ export const gptImage1ParamsSchema: ModelParamsSchema = {
18
18
  };
19
19
 
20
20
  export const openaiChatModels: AIChatModelCard[] = [
21
+ {
22
+ abilities: {
23
+ functionCall: true,
24
+ imageOutput: true,
25
+ reasoning: true,
26
+ search: true,
27
+ vision: true,
28
+ },
29
+ contextWindowTokens: 400_000,
30
+ description:
31
+ '跨领域编码和代理任务的最佳模型。GPT-5 在准确性、速度、推理、上下文识别、结构化思维和问题解决方面实现了飞跃。',
32
+ displayName: 'GPT-5',
33
+ enabled: true,
34
+ id: 'gpt-5',
35
+ maxOutput: 128_000,
36
+ pricing: {
37
+ cachedInput: 0.13,
38
+ input: 1.25,
39
+ output: 10,
40
+ },
41
+ releasedAt: '2025-08-07',
42
+ settings: {
43
+ extendParams: ['reasoningEffort'],
44
+ searchImpl: 'params',
45
+ },
46
+ type: 'chat',
47
+ },
48
+ {
49
+ abilities: {
50
+ functionCall: true,
51
+ reasoning: true,
52
+ search: true,
53
+ vision: true,
54
+ },
55
+ contextWindowTokens: 400_000,
56
+ description:
57
+ '更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
58
+ displayName: 'GPT-5 mini',
59
+ enabled: true,
60
+ id: 'gpt-5-mini',
61
+ maxOutput: 128_000,
62
+ pricing: {
63
+ cachedInput: 0.03,
64
+ input: 0.25,
65
+ output: 2,
66
+ },
67
+ releasedAt: '2025-08-07',
68
+ settings: {
69
+ extendParams: ['reasoningEffort'],
70
+ searchImpl: 'params',
71
+ },
72
+ type: 'chat',
73
+ },
74
+ {
75
+ abilities: {
76
+ functionCall: true,
77
+ reasoning: true,
78
+ vision: true,
79
+ },
80
+ contextWindowTokens: 400_000,
81
+ description: '最快、最经济高效的 GPT-5 版本。非常适合需要快速响应且成本敏感的应用场景。',
82
+ displayName: 'GPT-5 nano',
83
+ id: 'gpt-5-nano',
84
+ maxOutput: 128_000,
85
+ pricing: {
86
+ cachedInput: 0.01,
87
+ input: 0.05,
88
+ output: 0.4,
89
+ },
90
+ releasedAt: '2025-08-07',
91
+ type: 'chat',
92
+ },
93
+ {
94
+ abilities: {
95
+ vision: true,
96
+ },
97
+ contextWindowTokens: 400_000,
98
+ description:
99
+ 'ChatGPT 中使用的 GPT-5 模型。结合了强大的语言理解与生成能力,适合对话式交互应用。',
100
+ displayName: 'GPT-5 Chat',
101
+ enabled: true,
102
+ id: 'gpt-5-chat-latest',
103
+ maxOutput: 128_000,
104
+ pricing: {
105
+ cachedInput: 0.13,
106
+ input: 1.25,
107
+ output: 10,
108
+ },
109
+ releasedAt: '2025-08-07',
110
+ type: 'chat',
111
+ },
21
112
  {
22
113
  abilities: {
23
114
  functionCall: true,
@@ -261,7 +352,6 @@ export const openaiChatModels: AIChatModelCard[] = [
261
352
  contextWindowTokens: 1_047_576,
262
353
  description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
263
354
  displayName: 'GPT-4.1',
264
- enabled: true,
265
355
  id: 'gpt-4.1',
266
356
  maxOutput: 32_768,
267
357
  pricing: {
@@ -285,7 +375,6 @@ export const openaiChatModels: AIChatModelCard[] = [
285
375
  description:
286
376
  'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
287
377
  displayName: 'GPT-4.1 mini',
288
- enabled: true,
289
378
  id: 'gpt-4.1-mini',
290
379
  maxOutput: 32_768,
291
380
  pricing: {
@@ -516,7 +605,6 @@ export const openaiChatModels: AIChatModelCard[] = [
516
605
  description:
517
606
  'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
518
607
  displayName: 'ChatGPT-4o',
519
- enabled: true,
520
608
  id: 'chatgpt-4o-latest',
521
609
  pricing: {
522
610
  input: 5,
@@ -4,12 +4,28 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const OpenAI: ModelProviderCard = {
5
5
  apiKeyUrl: 'https://platform.openai.com/api-keys?utm_source=lobehub',
6
6
  chatModels: [
7
+ {
8
+ contextWindowTokens: 400_000,
9
+ description:
10
+ '更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
11
+ displayName: 'GPT-5 mini',
12
+ enabled: true,
13
+ functionCall: true,
14
+ id: 'gpt-5-mini',
15
+ maxOutput: 128_000,
16
+ pricing: {
17
+ cachedInput: 0.03,
18
+ input: 0.25,
19
+ output: 2,
20
+ },
21
+ releasedAt: '2025-08-07',
22
+ vision: true,
23
+ },
7
24
  {
8
25
  contextWindowTokens: 1_047_576,
9
26
  description:
10
27
  'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
11
28
  displayName: 'GPT-4.1 mini',
12
- enabled: true,
13
29
  functionCall: true,
14
30
  id: 'gpt-4.1-mini',
15
31
  maxOutput: 32_768,
@@ -35,6 +35,8 @@ export const responsesAPIModels = new Set([
35
35
  'codex-mini-latest',
36
36
  'computer-use-preview',
37
37
  'computer-use-preview-2025-03-11',
38
+ 'gpt-5',
39
+ 'gpt-5-mini',
38
40
  ]);
39
41
 
40
42
  /**
@@ -14,7 +14,7 @@ export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
14
14
  },
15
15
  });
16
16
 
17
- export const DEFAULT_MODEL = 'gpt-4.1-mini';
17
+ export const DEFAULT_MODEL = 'gpt-5-mini';
18
18
 
19
19
  export const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
20
20
  export const DEFAULT_EMBEDDING_PROVIDER = ModelProvider.OpenAI;
@@ -9,7 +9,7 @@ export interface OpenAIModelCard {
9
9
  id: string;
10
10
  }
11
11
 
12
- const prunePrefixes = ['o1', 'o3', 'o4', 'codex', 'computer-use'];
12
+ const prunePrefixes = ['o1', 'o3', 'o4', 'codex', 'computer-use', 'gpt-5'];
13
13
  const oaiSearchContextSize = process.env.OPENAI_SEARCH_CONTEXT_SIZE; // low, medium, high
14
14
 
15
15
  export const LobeOpenAI = createOpenAICompatibleRuntime({
@@ -1,4 +1,4 @@
1
- import { ChatErrorType , TracePayload, TraceTagMap } from '@lobechat/types';
1
+ import { ChatErrorType, TracePayload, TraceTagMap } from '@lobechat/types';
2
2
  import { PluginRequestPayload, createHeadersWithPluginSettings } from '@lobehub/chat-plugin-sdk';
3
3
  import { produce } from 'immer';
4
4
  import { merge } from 'lodash-es';
@@ -12,7 +12,7 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
12
12
  "historyCount": 20,
13
13
  "reasoningBudgetToken": 1024,
14
14
  "searchFCModel": {
15
- "model": "gpt-4.1-mini",
15
+ "model": "gpt-5-mini",
16
16
  "provider": "openai",
17
17
  },
18
18
  "searchMode": "off",
@@ -45,22 +45,6 @@ describe('modelProviderSelectors', () => {
45
45
  });
46
46
 
47
47
  describe('defaultEnabledProviderModels', () => {
48
- it('should return enabled models for a given provider', () => {
49
- const s = merge(initialState, {}) as unknown as UserStore;
50
-
51
- const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
52
- expect(result).toEqual([
53
- 'gpt-4.1-mini',
54
- 'o1-mini',
55
- 'o1-2024-12-17',
56
- 'o1-preview',
57
- 'gpt-4o-mini',
58
- 'gpt-4o-2024-11-20',
59
- 'gpt-4o',
60
- 'chatgpt-4o-latest',
61
- ]);
62
- });
63
-
64
48
  it('should return undefined for a non-existing provider', () => {
65
49
  const s = merge(initialState, {}) as unknown as UserStore;
66
50
 
@@ -51,34 +51,34 @@ exports[`settingsSelectors > currentSettings > should merge DEFAULT_SETTINGS and
51
51
  exports[`settingsSelectors > currentSystemAgent > should merge DEFAULT_SYSTEM_AGENT_CONFIG and s.settings.systemAgent correctly 1`] = `
52
52
  {
53
53
  "agentMeta": {
54
- "model": "gpt-4.1-mini",
54
+ "model": "gpt-5-mini",
55
55
  "provider": "openai",
56
56
  },
57
57
  "enableAutoReply": true,
58
58
  "generationTopic": {
59
- "model": "gpt-4.1-mini",
59
+ "model": "gpt-5-mini",
60
60
  "provider": "openai",
61
61
  },
62
62
  "historyCompress": {
63
- "model": "gpt-4.1-mini",
63
+ "model": "gpt-5-mini",
64
64
  "provider": "openai",
65
65
  },
66
66
  "queryRewrite": {
67
67
  "enabled": true,
68
- "model": "gpt-4.1-mini",
68
+ "model": "gpt-5-mini",
69
69
  "provider": "openai",
70
70
  },
71
71
  "replyMessage": "Custom auto reply",
72
72
  "thread": {
73
- "model": "gpt-4.1-mini",
73
+ "model": "gpt-5-mini",
74
74
  "provider": "openai",
75
75
  },
76
76
  "topic": {
77
- "model": "gpt-4.1-mini",
77
+ "model": "gpt-5-mini",
78
78
  "provider": "openai",
79
79
  },
80
80
  "translation": {
81
- "model": "gpt-4.1-mini",
81
+ "model": "gpt-5-mini",
82
82
  "provider": "openai",
83
83
  },
84
84
  }
@@ -115,7 +115,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
115
115
  "historyCount": 20,
116
116
  "reasoningBudgetToken": 1024,
117
117
  "searchFCModel": {
118
- "model": "gpt-4.1-mini",
118
+ "model": "gpt-5-mini",
119
119
  "provider": "openai",
120
120
  },
121
121
  "searchMode": "off",
@@ -159,7 +159,7 @@ exports[`settingsSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CON
159
159
  "historyCount": 20,
160
160
  "reasoningBudgetToken": 1024,
161
161
  "searchFCModel": {
162
- "model": "gpt-4.1-mini",
162
+ "model": "gpt-5-mini",
163
163
  "provider": "openai",
164
164
  },
165
165
  "searchMode": "off",