@lobehub/chat 1.110.7 → 1.111.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/src/config/aiModels/aihubmix.ts +92 -2
- package/src/config/aiModels/fal.ts +43 -6
- package/src/config/aiModels/openai.ts +91 -3
- package/src/config/modelProviders/openai.ts +17 -1
- package/src/const/models.ts +2 -0
- package/src/const/settings/llm.ts +1 -1
- package/src/libs/model-runtime/fal/index.ts +8 -2
- package/src/libs/model-runtime/openai/index.ts +1 -1
- package/src/services/chat.ts +1 -1
- package/src/store/agent/slices/chat/selectors/__snapshots__/agent.test.ts.snap +1 -1
- package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +0 -16
- package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +9 -9
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.111.1](https://github.com/lobehub/lobe-chat/compare/v1.111.0...v1.111.1)
|
6
|
+
|
7
|
+
<sup>Released on **2025-08-08**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Add descriptions for the FLUX.1 Krea and Qwen Image.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Add descriptions for the FLUX.1 Krea and Qwen Image, closes [#8678](https://github.com/lobehub/lobe-chat/issues/8678) ([769fda0](https://github.com/lobehub/lobe-chat/commit/769fda0))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
## [Version 1.111.0](https://github.com/lobehub/lobe-chat/compare/v1.110.7...v1.111.0)
|
31
|
+
|
32
|
+
<sup>Released on **2025-08-08**</sup>
|
33
|
+
|
34
|
+
#### ✨ Features
|
35
|
+
|
36
|
+
- **misc**: Add GPT-5 series models.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### What's improved
|
44
|
+
|
45
|
+
- **misc**: Add GPT-5 series models, closes [#8711](https://github.com/lobehub/lobe-chat/issues/8711) ([600c29b](https://github.com/lobehub/lobe-chat/commit/600c29b))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.110.7](https://github.com/lobehub/lobe-chat/compare/v1.110.6...v1.110.7)
|
6
56
|
|
7
57
|
<sup>Released on **2025-08-07**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Add descriptions for the FLUX.1 Krea and Qwen Image."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-08-08",
|
9
|
+
"version": "1.111.1"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"features": [
|
14
|
+
"Add GPT-5 series models."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-08-08",
|
18
|
+
"version": "1.111.0"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"fixes": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.111.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,6 +1,98 @@
|
|
1
1
|
import { AIChatModelCard } from '@/types/aiModel';
|
2
2
|
|
3
3
|
const aihubmixModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
imageOutput: true,
|
8
|
+
reasoning: true,
|
9
|
+
search: true,
|
10
|
+
vision: true,
|
11
|
+
},
|
12
|
+
contextWindowTokens: 400_000,
|
13
|
+
description:
|
14
|
+
'跨领域编码和代理任务的最佳模型。GPT-5 在准确性、速度、推理、上下文识别、结构化思维和问题解决方面实现了飞跃。',
|
15
|
+
displayName: 'GPT-5',
|
16
|
+
enabled: true,
|
17
|
+
id: 'gpt-5',
|
18
|
+
maxOutput: 128_000,
|
19
|
+
pricing: {
|
20
|
+
cachedInput: 0.13,
|
21
|
+
input: 1.25,
|
22
|
+
output: 10,
|
23
|
+
},
|
24
|
+
releasedAt: '2025-08-07',
|
25
|
+
settings: {
|
26
|
+
extendParams: ['reasoningEffort'],
|
27
|
+
searchImpl: 'params',
|
28
|
+
},
|
29
|
+
type: 'chat',
|
30
|
+
},
|
31
|
+
{
|
32
|
+
abilities: {
|
33
|
+
functionCall: true,
|
34
|
+
reasoning: true,
|
35
|
+
search: true,
|
36
|
+
vision: true,
|
37
|
+
},
|
38
|
+
contextWindowTokens: 400_000,
|
39
|
+
description:
|
40
|
+
'更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
|
41
|
+
displayName: 'GPT-5 mini',
|
42
|
+
enabled: true,
|
43
|
+
id: 'gpt-5-mini',
|
44
|
+
maxOutput: 128_000,
|
45
|
+
pricing: {
|
46
|
+
cachedInput: 0.03,
|
47
|
+
input: 0.25,
|
48
|
+
output: 2,
|
49
|
+
},
|
50
|
+
releasedAt: '2025-08-07',
|
51
|
+
settings: {
|
52
|
+
extendParams: ['reasoningEffort'],
|
53
|
+
searchImpl: 'params',
|
54
|
+
},
|
55
|
+
type: 'chat',
|
56
|
+
},
|
57
|
+
{
|
58
|
+
abilities: {
|
59
|
+
functionCall: true,
|
60
|
+
reasoning: true,
|
61
|
+
vision: true,
|
62
|
+
},
|
63
|
+
contextWindowTokens: 400_000,
|
64
|
+
description: '最快、最经济高效的 GPT-5 版本。非常适合需要快速响应且成本敏感的应用场景。',
|
65
|
+
displayName: 'GPT-5 nano',
|
66
|
+
enabled: true,
|
67
|
+
id: 'gpt-5-nano',
|
68
|
+
maxOutput: 128_000,
|
69
|
+
pricing: {
|
70
|
+
cachedInput: 0.01,
|
71
|
+
input: 0.05,
|
72
|
+
output: 0.4,
|
73
|
+
},
|
74
|
+
releasedAt: '2025-08-07',
|
75
|
+
type: 'chat',
|
76
|
+
},
|
77
|
+
{
|
78
|
+
abilities: {
|
79
|
+
vision: true,
|
80
|
+
},
|
81
|
+
contextWindowTokens: 400_000,
|
82
|
+
description:
|
83
|
+
'ChatGPT 中使用的 GPT-5 模型。结合了强大的语言理解与生成能力,适合对话式交互应用。',
|
84
|
+
displayName: 'GPT-5 Chat',
|
85
|
+
enabled: true,
|
86
|
+
id: 'gpt-5-chat-latest',
|
87
|
+
maxOutput: 128_000,
|
88
|
+
pricing: {
|
89
|
+
cachedInput: 0.13,
|
90
|
+
input: 1.25,
|
91
|
+
output: 10,
|
92
|
+
},
|
93
|
+
releasedAt: '2025-08-07',
|
94
|
+
type: 'chat',
|
95
|
+
},
|
4
96
|
{
|
5
97
|
abilities: {
|
6
98
|
functionCall: true,
|
@@ -136,7 +228,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
136
228
|
contextWindowTokens: 1_047_576,
|
137
229
|
description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
|
138
230
|
displayName: 'GPT-4.1',
|
139
|
-
enabled: true,
|
140
231
|
id: 'gpt-4.1',
|
141
232
|
maxOutput: 32_768,
|
142
233
|
pricing: {
|
@@ -199,7 +290,6 @@ const aihubmixModels: AIChatModelCard[] = [
|
|
199
290
|
description:
|
200
291
|
'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
|
201
292
|
displayName: 'ChatGPT-4o',
|
202
|
-
enabled: true,
|
203
293
|
id: 'chatgpt-4o-latest',
|
204
294
|
pricing: {
|
205
295
|
input: 5,
|
@@ -5,14 +5,34 @@ export const fluxSchnellParamsSchema: ModelParamsSchema = {
|
|
5
5
|
height: { default: 1024, max: 1536, min: 512, step: 1 },
|
6
6
|
prompt: { default: '' },
|
7
7
|
seed: { default: null },
|
8
|
-
steps: { default: 4, max: 12, min: 1 },
|
8
|
+
steps: { default: 4, max: 12, min: 1, step: 1 },
|
9
9
|
width: { default: 1024, max: 1536, min: 512, step: 1 },
|
10
10
|
};
|
11
11
|
|
12
|
+
export const fluxKreaParamsSchema: ModelParamsSchema = {
|
13
|
+
cfg: { default: 7.5, max: 20, min: 0, step: 0.1 },
|
14
|
+
height: { default: 1248, max: 2048, min: 512, step: 1 },
|
15
|
+
prompt: { default: '' },
|
16
|
+
seed: { default: null },
|
17
|
+
steps: { default: 28, max: 50, min: 1, step: 1 },
|
18
|
+
width: { default: 832, max: 2048, min: 512, step: 1 },
|
19
|
+
};
|
20
|
+
|
21
|
+
export const qwenImageParamsSchema: ModelParamsSchema = {
|
22
|
+
cfg: { default: 2.5, max: 20, min: 0, step: 0.1 },
|
23
|
+
height: { default: 1328, max: 1536, min: 512, step: 1 },
|
24
|
+
prompt: { default: '' },
|
25
|
+
seed: { default: null },
|
26
|
+
steps: { default: 30, max: 50, min: 2, step: 1 },
|
27
|
+
width: { default: 1328, max: 1536, min: 512, step: 1 },
|
28
|
+
};
|
29
|
+
|
30
|
+
|
31
|
+
|
12
32
|
const falImageModels: AIImageModelCard[] = [
|
13
33
|
{
|
14
|
-
description: '
|
15
|
-
displayName: 'FLUX.1 Kontext
|
34
|
+
description: '专注于图像编辑任务的FLUX.1模型,支持文本和图像输入。',
|
35
|
+
displayName: 'FLUX.1 Kontext [dev]',
|
16
36
|
enabled: true,
|
17
37
|
id: 'flux-kontext/dev',
|
18
38
|
parameters: {
|
@@ -43,8 +63,7 @@ const falImageModels: AIImageModelCard[] = [
|
|
43
63
|
type: 'image',
|
44
64
|
},
|
45
65
|
{
|
46
|
-
description:
|
47
|
-
'FLUX.1 [schnell] 是一个拥有120亿参数的流式转换器模型,能够在1到4步内从文本生成高质量图像,适合个人和商业用途。',
|
66
|
+
description: 'FLUX.1 [schnell] 是一个具有120亿参数的图像生成模型,专注于快速生成高质量图像。',
|
48
67
|
displayName: 'FLUX.1 Schnell',
|
49
68
|
enabled: true,
|
50
69
|
id: 'flux/schnell',
|
@@ -53,7 +72,16 @@ const falImageModels: AIImageModelCard[] = [
|
|
53
72
|
type: 'image',
|
54
73
|
},
|
55
74
|
{
|
56
|
-
description: '
|
75
|
+
description: 'Flux Krea [dev] 是一个有美学偏好的图像生成模型,目标是生成更加真实、自然的图像。',
|
76
|
+
displayName: 'FLUX.1 Krea [dev]',
|
77
|
+
enabled: true,
|
78
|
+
id: 'flux/krea',
|
79
|
+
parameters: fluxKreaParamsSchema,
|
80
|
+
releasedAt: '2025-07-31',
|
81
|
+
type: 'image',
|
82
|
+
},
|
83
|
+
{
|
84
|
+
description: 'Google 提供的高质量的图像生成模型',
|
57
85
|
displayName: 'Imagen 4',
|
58
86
|
enabled: true,
|
59
87
|
id: 'imagen4/preview',
|
@@ -69,6 +97,15 @@ const falImageModels: AIImageModelCard[] = [
|
|
69
97
|
releasedAt: '2025-05-21',
|
70
98
|
type: 'image',
|
71
99
|
},
|
100
|
+
{
|
101
|
+
description: 'Qwen团队带来的强大生图模型,具有令人印象深刻的中文文字生成能力和多样图片视觉风格。',
|
102
|
+
displayName: 'Qwen Image',
|
103
|
+
enabled: true,
|
104
|
+
id: 'qwen-image',
|
105
|
+
parameters: qwenImageParamsSchema,
|
106
|
+
releasedAt: '2025-08-04',
|
107
|
+
type: 'image',
|
108
|
+
},
|
72
109
|
];
|
73
110
|
|
74
111
|
export const allModels = [...falImageModels];
|
@@ -18,6 +18,97 @@ export const gptImage1ParamsSchema: ModelParamsSchema = {
|
|
18
18
|
};
|
19
19
|
|
20
20
|
export const openaiChatModels: AIChatModelCard[] = [
|
21
|
+
{
|
22
|
+
abilities: {
|
23
|
+
functionCall: true,
|
24
|
+
imageOutput: true,
|
25
|
+
reasoning: true,
|
26
|
+
search: true,
|
27
|
+
vision: true,
|
28
|
+
},
|
29
|
+
contextWindowTokens: 400_000,
|
30
|
+
description:
|
31
|
+
'跨领域编码和代理任务的最佳模型。GPT-5 在准确性、速度、推理、上下文识别、结构化思维和问题解决方面实现了飞跃。',
|
32
|
+
displayName: 'GPT-5',
|
33
|
+
enabled: true,
|
34
|
+
id: 'gpt-5',
|
35
|
+
maxOutput: 128_000,
|
36
|
+
pricing: {
|
37
|
+
cachedInput: 0.13,
|
38
|
+
input: 1.25,
|
39
|
+
output: 10,
|
40
|
+
},
|
41
|
+
releasedAt: '2025-08-07',
|
42
|
+
settings: {
|
43
|
+
extendParams: ['reasoningEffort'],
|
44
|
+
searchImpl: 'params',
|
45
|
+
},
|
46
|
+
type: 'chat',
|
47
|
+
},
|
48
|
+
{
|
49
|
+
abilities: {
|
50
|
+
functionCall: true,
|
51
|
+
reasoning: true,
|
52
|
+
search: true,
|
53
|
+
vision: true,
|
54
|
+
},
|
55
|
+
contextWindowTokens: 400_000,
|
56
|
+
description:
|
57
|
+
'更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
|
58
|
+
displayName: 'GPT-5 mini',
|
59
|
+
enabled: true,
|
60
|
+
id: 'gpt-5-mini',
|
61
|
+
maxOutput: 128_000,
|
62
|
+
pricing: {
|
63
|
+
cachedInput: 0.03,
|
64
|
+
input: 0.25,
|
65
|
+
output: 2,
|
66
|
+
},
|
67
|
+
releasedAt: '2025-08-07',
|
68
|
+
settings: {
|
69
|
+
extendParams: ['reasoningEffort'],
|
70
|
+
searchImpl: 'params',
|
71
|
+
},
|
72
|
+
type: 'chat',
|
73
|
+
},
|
74
|
+
{
|
75
|
+
abilities: {
|
76
|
+
functionCall: true,
|
77
|
+
reasoning: true,
|
78
|
+
vision: true,
|
79
|
+
},
|
80
|
+
contextWindowTokens: 400_000,
|
81
|
+
description: '最快、最经济高效的 GPT-5 版本。非常适合需要快速响应且成本敏感的应用场景。',
|
82
|
+
displayName: 'GPT-5 nano',
|
83
|
+
id: 'gpt-5-nano',
|
84
|
+
maxOutput: 128_000,
|
85
|
+
pricing: {
|
86
|
+
cachedInput: 0.01,
|
87
|
+
input: 0.05,
|
88
|
+
output: 0.4,
|
89
|
+
},
|
90
|
+
releasedAt: '2025-08-07',
|
91
|
+
type: 'chat',
|
92
|
+
},
|
93
|
+
{
|
94
|
+
abilities: {
|
95
|
+
vision: true,
|
96
|
+
},
|
97
|
+
contextWindowTokens: 400_000,
|
98
|
+
description:
|
99
|
+
'ChatGPT 中使用的 GPT-5 模型。结合了强大的语言理解与生成能力,适合对话式交互应用。',
|
100
|
+
displayName: 'GPT-5 Chat',
|
101
|
+
enabled: true,
|
102
|
+
id: 'gpt-5-chat-latest',
|
103
|
+
maxOutput: 128_000,
|
104
|
+
pricing: {
|
105
|
+
cachedInput: 0.13,
|
106
|
+
input: 1.25,
|
107
|
+
output: 10,
|
108
|
+
},
|
109
|
+
releasedAt: '2025-08-07',
|
110
|
+
type: 'chat',
|
111
|
+
},
|
21
112
|
{
|
22
113
|
abilities: {
|
23
114
|
functionCall: true,
|
@@ -261,7 +352,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
261
352
|
contextWindowTokens: 1_047_576,
|
262
353
|
description: 'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
|
263
354
|
displayName: 'GPT-4.1',
|
264
|
-
enabled: true,
|
265
355
|
id: 'gpt-4.1',
|
266
356
|
maxOutput: 32_768,
|
267
357
|
pricing: {
|
@@ -285,7 +375,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
285
375
|
description:
|
286
376
|
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
287
377
|
displayName: 'GPT-4.1 mini',
|
288
|
-
enabled: true,
|
289
378
|
id: 'gpt-4.1-mini',
|
290
379
|
maxOutput: 32_768,
|
291
380
|
pricing: {
|
@@ -516,7 +605,6 @@ export const openaiChatModels: AIChatModelCard[] = [
|
|
516
605
|
description:
|
517
606
|
'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
|
518
607
|
displayName: 'ChatGPT-4o',
|
519
|
-
enabled: true,
|
520
608
|
id: 'chatgpt-4o-latest',
|
521
609
|
pricing: {
|
522
610
|
input: 5,
|
@@ -4,12 +4,28 @@ import { ModelProviderCard } from '@/types/llm';
|
|
4
4
|
const OpenAI: ModelProviderCard = {
|
5
5
|
apiKeyUrl: 'https://platform.openai.com/api-keys?utm_source=lobehub',
|
6
6
|
chatModels: [
|
7
|
+
{
|
8
|
+
contextWindowTokens: 400_000,
|
9
|
+
description:
|
10
|
+
'更快、更经济高效的 GPT-5 版本,适用于明确定义的任务。在保持高质量输出的同时,提供更快的响应速度。',
|
11
|
+
displayName: 'GPT-5 mini',
|
12
|
+
enabled: true,
|
13
|
+
functionCall: true,
|
14
|
+
id: 'gpt-5-mini',
|
15
|
+
maxOutput: 128_000,
|
16
|
+
pricing: {
|
17
|
+
cachedInput: 0.03,
|
18
|
+
input: 0.25,
|
19
|
+
output: 2,
|
20
|
+
},
|
21
|
+
releasedAt: '2025-08-07',
|
22
|
+
vision: true,
|
23
|
+
},
|
7
24
|
{
|
8
25
|
contextWindowTokens: 1_047_576,
|
9
26
|
description:
|
10
27
|
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
11
28
|
displayName: 'GPT-4.1 mini',
|
12
|
-
enabled: true,
|
13
29
|
functionCall: true,
|
14
30
|
id: 'gpt-4.1-mini',
|
15
31
|
maxOutput: 32_768,
|
package/src/const/models.ts
CHANGED
@@ -14,7 +14,7 @@ export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
|
|
14
14
|
},
|
15
15
|
});
|
16
16
|
|
17
|
-
export const DEFAULT_MODEL = 'gpt-
|
17
|
+
export const DEFAULT_MODEL = 'gpt-5-mini';
|
18
18
|
|
19
19
|
export const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
|
20
20
|
export const DEFAULT_EMBEDDING_PROVIDER = ModelProvider.OpenAI;
|
@@ -35,11 +35,11 @@ export class LobeFalAI implements LobeRuntimeAI {
|
|
35
35
|
['imageUrl', 'image_url'],
|
36
36
|
]);
|
37
37
|
|
38
|
-
const defaultInput = {
|
38
|
+
const defaultInput: Record<string, unknown> = {
|
39
39
|
enable_safety_checker: false,
|
40
40
|
num_images: 1,
|
41
41
|
};
|
42
|
-
const userInput = Object.fromEntries(
|
42
|
+
const userInput: Record<string, unknown> = Object.fromEntries(
|
43
43
|
(Object.entries(params) as [keyof typeof params, any][]).map(([key, value]) => [
|
44
44
|
paramsMap.get(key) ?? key,
|
45
45
|
value,
|
@@ -55,6 +55,12 @@ export class LobeFalAI implements LobeRuntimeAI {
|
|
55
55
|
delete userInput.height;
|
56
56
|
}
|
57
57
|
|
58
|
+
const modelsAcceleratedByDefault = new Set<string>(['flux/krea']);
|
59
|
+
if (modelsAcceleratedByDefault.has(model)) {
|
60
|
+
defaultInput['acceleration'] = 'high';
|
61
|
+
log('Applied default acceleration=high for model: %s', model);
|
62
|
+
}
|
63
|
+
|
58
64
|
const endpoint = `fal-ai/${model}`;
|
59
65
|
log('Calling fal.subscribe with endpoint: %s and input: %O', endpoint, {
|
60
66
|
...defaultInput,
|
@@ -9,7 +9,7 @@ export interface OpenAIModelCard {
|
|
9
9
|
id: string;
|
10
10
|
}
|
11
11
|
|
12
|
-
const prunePrefixes = ['o1', 'o3', 'o4', 'codex', 'computer-use'];
|
12
|
+
const prunePrefixes = ['o1', 'o3', 'o4', 'codex', 'computer-use', 'gpt-5'];
|
13
13
|
const oaiSearchContextSize = process.env.OPENAI_SEARCH_CONTEXT_SIZE; // low, medium, high
|
14
14
|
|
15
15
|
export const LobeOpenAI = createOpenAICompatibleRuntime({
|
package/src/services/chat.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { ChatErrorType
|
1
|
+
import { ChatErrorType, TracePayload, TraceTagMap } from '@lobechat/types';
|
2
2
|
import { PluginRequestPayload, createHeadersWithPluginSettings } from '@lobehub/chat-plugin-sdk';
|
3
3
|
import { produce } from 'immer';
|
4
4
|
import { merge } from 'lodash-es';
|
@@ -12,7 +12,7 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
|
|
12
12
|
"historyCount": 20,
|
13
13
|
"reasoningBudgetToken": 1024,
|
14
14
|
"searchFCModel": {
|
15
|
-
"model": "gpt-
|
15
|
+
"model": "gpt-5-mini",
|
16
16
|
"provider": "openai",
|
17
17
|
},
|
18
18
|
"searchMode": "off",
|
@@ -45,22 +45,6 @@ describe('modelProviderSelectors', () => {
|
|
45
45
|
});
|
46
46
|
|
47
47
|
describe('defaultEnabledProviderModels', () => {
|
48
|
-
it('should return enabled models for a given provider', () => {
|
49
|
-
const s = merge(initialState, {}) as unknown as UserStore;
|
50
|
-
|
51
|
-
const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
|
52
|
-
expect(result).toEqual([
|
53
|
-
'gpt-4.1-mini',
|
54
|
-
'o1-mini',
|
55
|
-
'o1-2024-12-17',
|
56
|
-
'o1-preview',
|
57
|
-
'gpt-4o-mini',
|
58
|
-
'gpt-4o-2024-11-20',
|
59
|
-
'gpt-4o',
|
60
|
-
'chatgpt-4o-latest',
|
61
|
-
]);
|
62
|
-
});
|
63
|
-
|
64
48
|
it('should return undefined for a non-existing provider', () => {
|
65
49
|
const s = merge(initialState, {}) as unknown as UserStore;
|
66
50
|
|
@@ -51,34 +51,34 @@ exports[`settingsSelectors > currentSettings > should merge DEFAULT_SETTINGS and
|
|
51
51
|
exports[`settingsSelectors > currentSystemAgent > should merge DEFAULT_SYSTEM_AGENT_CONFIG and s.settings.systemAgent correctly 1`] = `
|
52
52
|
{
|
53
53
|
"agentMeta": {
|
54
|
-
"model": "gpt-
|
54
|
+
"model": "gpt-5-mini",
|
55
55
|
"provider": "openai",
|
56
56
|
},
|
57
57
|
"enableAutoReply": true,
|
58
58
|
"generationTopic": {
|
59
|
-
"model": "gpt-
|
59
|
+
"model": "gpt-5-mini",
|
60
60
|
"provider": "openai",
|
61
61
|
},
|
62
62
|
"historyCompress": {
|
63
|
-
"model": "gpt-
|
63
|
+
"model": "gpt-5-mini",
|
64
64
|
"provider": "openai",
|
65
65
|
},
|
66
66
|
"queryRewrite": {
|
67
67
|
"enabled": true,
|
68
|
-
"model": "gpt-
|
68
|
+
"model": "gpt-5-mini",
|
69
69
|
"provider": "openai",
|
70
70
|
},
|
71
71
|
"replyMessage": "Custom auto reply",
|
72
72
|
"thread": {
|
73
|
-
"model": "gpt-
|
73
|
+
"model": "gpt-5-mini",
|
74
74
|
"provider": "openai",
|
75
75
|
},
|
76
76
|
"topic": {
|
77
|
-
"model": "gpt-
|
77
|
+
"model": "gpt-5-mini",
|
78
78
|
"provider": "openai",
|
79
79
|
},
|
80
80
|
"translation": {
|
81
|
-
"model": "gpt-
|
81
|
+
"model": "gpt-5-mini",
|
82
82
|
"provider": "openai",
|
83
83
|
},
|
84
84
|
}
|
@@ -115,7 +115,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
|
|
115
115
|
"historyCount": 20,
|
116
116
|
"reasoningBudgetToken": 1024,
|
117
117
|
"searchFCModel": {
|
118
|
-
"model": "gpt-
|
118
|
+
"model": "gpt-5-mini",
|
119
119
|
"provider": "openai",
|
120
120
|
},
|
121
121
|
"searchMode": "off",
|
@@ -159,7 +159,7 @@ exports[`settingsSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CON
|
|
159
159
|
"historyCount": 20,
|
160
160
|
"reasoningBudgetToken": 1024,
|
161
161
|
"searchFCModel": {
|
162
|
-
"model": "gpt-
|
162
|
+
"model": "gpt-5-mini",
|
163
163
|
"provider": "openai",
|
164
164
|
},
|
165
165
|
"searchMode": "off",
|