@lobehub/chat 1.80.2 → 1.80.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/next.config.ts +5 -1
- package/package.json +1 -1
- package/src/config/aiModels/azure.ts +52 -8
- package/src/config/aiModels/azureai.ts +40 -1
- package/src/config/aiModels/cohere.ts +17 -20
- package/src/config/aiModels/github.ts +131 -16
- package/src/config/aiModels/groq.ts +17 -101
- package/src/config/aiModels/openai.ts +47 -8
- package/src/config/aiModels/openrouter.ts +163 -25
- package/src/config/aiModels/volcengine.ts +90 -2
- package/src/database/models/user.ts +13 -1
- package/src/layout/AuthProvider/NextAuth/UserUpdater.tsx +18 -11
- package/src/libs/oidc-provider/adapter.ts +5 -6
- package/src/libs/oidc-provider/provider.ts +1 -0
- package/src/server/routers/lambda/user.ts +9 -2
- package/src/services/user/client.ts +5 -2
- package/src/store/user/slices/common/action.ts +9 -2
- package/src/types/user/index.ts +6 -1
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.80.4](https://github.com/lobehub/lobe-chat/compare/v1.80.3...v1.80.4)
|
6
|
+
|
7
|
+
<sup>Released on **2025-04-17**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Add o3/o4-mini models.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Add o3/o4-mini models, closes [#7448](https://github.com/lobehub/lobe-chat/issues/7448) ([3fdba72](https://github.com/lobehub/lobe-chat/commit/3fdba72))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.80.3](https://github.com/lobehub/lobe-chat/compare/v1.80.2...v1.80.3)
|
31
|
+
|
32
|
+
<sup>Released on **2025-04-16**</sup>
|
33
|
+
|
34
|
+
#### ♻ Code Refactoring
|
35
|
+
|
36
|
+
- **misc**: Refactor to get user info from api.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Code refactoring
|
44
|
+
|
45
|
+
- **misc**: Refactor to get user info from api, closes [#7444](https://github.com/lobehub/lobe-chat/issues/7444) ([4c1fb4a](https://github.com/lobehub/lobe-chat/commit/4c1fb4a))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.80.2](https://github.com/lobehub/lobe-chat/compare/v1.80.1...v1.80.2)
|
6
56
|
|
7
57
|
<sup>Released on **2025-04-16**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Add o3/o4-mini models."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-04-17",
|
9
|
+
"version": "1.80.4"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Refactor to get user info from api."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-04-16",
|
18
|
+
"version": "1.80.3"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"fixes": [
|
package/next.config.ts
CHANGED
@@ -33,8 +33,12 @@ const nextConfig: NextConfig = {
|
|
33
33
|
'@lobehub/ui',
|
34
34
|
'gpt-tokenizer',
|
35
35
|
],
|
36
|
+
// oidc provider depend on constructor.name
|
37
|
+
// but swc minification will remove the name
|
38
|
+
// so we need to disable it
|
39
|
+
// refs: https://github.com/lobehub/lobe-chat/pull/7430
|
40
|
+
serverMinification: false,
|
36
41
|
webVitalsAttribution: ['CLS', 'LCP'],
|
37
|
-
webpackMemoryOptimizations: true,
|
38
42
|
},
|
39
43
|
async headers() {
|
40
44
|
return [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.80.
|
3
|
+
"version": "1.80.4",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,6 +1,54 @@
|
|
1
1
|
import { AIChatModelCard } from '@/types/aiModel';
|
2
2
|
|
3
3
|
const azureChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
functionCall: true,
|
7
|
+
reasoning: true,
|
8
|
+
vision: true,
|
9
|
+
},
|
10
|
+
config: {
|
11
|
+
deploymentName: 'o3',
|
12
|
+
},
|
13
|
+
contextWindowTokens: 200_000,
|
14
|
+
description:
|
15
|
+
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
16
|
+
displayName: 'o3',
|
17
|
+
enabled: true,
|
18
|
+
id: 'o3',
|
19
|
+
maxOutput: 100_000,
|
20
|
+
pricing: {
|
21
|
+
cachedInput: 2.5,
|
22
|
+
input: 10,
|
23
|
+
output: 40,
|
24
|
+
},
|
25
|
+
releasedAt: '2025-04-17',
|
26
|
+
type: 'chat',
|
27
|
+
},
|
28
|
+
{
|
29
|
+
abilities: {
|
30
|
+
functionCall: true,
|
31
|
+
reasoning: true,
|
32
|
+
vision: true,
|
33
|
+
},
|
34
|
+
config: {
|
35
|
+
deploymentName: 'o4-mini',
|
36
|
+
},
|
37
|
+
contextWindowTokens: 200_000,
|
38
|
+
description:
|
39
|
+
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
40
|
+
displayName: 'o4-mini',
|
41
|
+
enabled: true,
|
42
|
+
id: 'o4-mini',
|
43
|
+
maxOutput: 100_000,
|
44
|
+
pricing: {
|
45
|
+
cachedInput: 0.275,
|
46
|
+
input: 1.1,
|
47
|
+
output: 4.4,
|
48
|
+
},
|
49
|
+
releasedAt: '2025-04-17',
|
50
|
+
type: 'chat',
|
51
|
+
},
|
4
52
|
{
|
5
53
|
abilities: {
|
6
54
|
functionCall: true,
|
@@ -59,7 +107,6 @@ const azureChatModels: AIChatModelCard[] = [
|
|
59
107
|
description:
|
60
108
|
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
61
109
|
displayName: 'GPT-4.1 nano',
|
62
|
-
enabled: true,
|
63
110
|
id: 'gpt-4.1-nano',
|
64
111
|
maxOutput: 32_768,
|
65
112
|
pricing: {
|
@@ -81,7 +128,7 @@ const azureChatModels: AIChatModelCard[] = [
|
|
81
128
|
contextWindowTokens: 200_000,
|
82
129
|
description:
|
83
130
|
'o3-mini 是我们最新的小型推理模型,在与 o1-mini 相同的成本和延迟目标下提供高智能。',
|
84
|
-
displayName: '
|
131
|
+
displayName: 'o3-mini',
|
85
132
|
id: 'o3-mini',
|
86
133
|
maxOutput: 100_000,
|
87
134
|
pricing: {
|
@@ -102,8 +149,7 @@ const azureChatModels: AIChatModelCard[] = [
|
|
102
149
|
contextWindowTokens: 128_000,
|
103
150
|
description:
|
104
151
|
'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
|
105
|
-
displayName: '
|
106
|
-
enabled: true,
|
152
|
+
displayName: 'o1-mini',
|
107
153
|
id: 'o1-mini',
|
108
154
|
maxOutput: 65_536,
|
109
155
|
pricing: {
|
@@ -124,8 +170,7 @@ const azureChatModels: AIChatModelCard[] = [
|
|
124
170
|
contextWindowTokens: 200_000,
|
125
171
|
description:
|
126
172
|
'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
|
127
|
-
displayName: '
|
128
|
-
enabled: true,
|
173
|
+
displayName: 'o1',
|
129
174
|
id: 'o1',
|
130
175
|
maxOutput: 100_000,
|
131
176
|
pricing: {
|
@@ -146,7 +191,7 @@ const azureChatModels: AIChatModelCard[] = [
|
|
146
191
|
contextWindowTokens: 128_000,
|
147
192
|
description:
|
148
193
|
'o1是OpenAI新的推理模型,适用于需要广泛通用知识的复杂任务。该模型具有128K上下文和2023年10月的知识截止日期。',
|
149
|
-
displayName: '
|
194
|
+
displayName: 'o1-preview',
|
150
195
|
id: 'o1-preview',
|
151
196
|
maxOutput: 32_768,
|
152
197
|
pricing: {
|
@@ -205,7 +250,6 @@ const azureChatModels: AIChatModelCard[] = [
|
|
205
250
|
contextWindowTokens: 128_000,
|
206
251
|
description: 'GPT-4o Mini,小型高效模型,具备与GPT-4o相似的卓越性能。',
|
207
252
|
displayName: 'GPT 4o Mini',
|
208
|
-
enabled: true,
|
209
253
|
id: 'gpt-4o-mini',
|
210
254
|
maxOutput: 4096,
|
211
255
|
pricing: {
|
@@ -27,6 +27,46 @@ const azureChatModels: AIChatModelCard[] = [
|
|
27
27
|
},
|
28
28
|
type: 'chat',
|
29
29
|
},
|
30
|
+
{
|
31
|
+
abilities: {
|
32
|
+
functionCall: true,
|
33
|
+
reasoning: true,
|
34
|
+
vision: true,
|
35
|
+
},
|
36
|
+
contextWindowTokens: 200_000,
|
37
|
+
description:
|
38
|
+
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
39
|
+
displayName: 'o3',
|
40
|
+
id: 'o3',
|
41
|
+
maxOutput: 100_000,
|
42
|
+
pricing: {
|
43
|
+
cachedInput: 2.5,
|
44
|
+
input: 10,
|
45
|
+
output: 40,
|
46
|
+
},
|
47
|
+
releasedAt: '2025-04-17',
|
48
|
+
type: 'chat',
|
49
|
+
},
|
50
|
+
{
|
51
|
+
abilities: {
|
52
|
+
functionCall: true,
|
53
|
+
reasoning: true,
|
54
|
+
vision: true,
|
55
|
+
},
|
56
|
+
contextWindowTokens: 200_000,
|
57
|
+
description:
|
58
|
+
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
59
|
+
displayName: 'o4-mini',
|
60
|
+
id: 'o4-mini',
|
61
|
+
maxOutput: 100_000,
|
62
|
+
pricing: {
|
63
|
+
cachedInput: 0.275,
|
64
|
+
input: 1.1,
|
65
|
+
output: 4.4,
|
66
|
+
},
|
67
|
+
releasedAt: '2025-04-17',
|
68
|
+
type: 'chat',
|
69
|
+
},
|
30
70
|
{
|
31
71
|
abilities: {
|
32
72
|
functionCall: true,
|
@@ -76,7 +116,6 @@ const azureChatModels: AIChatModelCard[] = [
|
|
76
116
|
description:
|
77
117
|
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
78
118
|
displayName: 'GPT-4.1 nano',
|
79
|
-
enabled: true,
|
80
119
|
id: 'gpt-4.1-nano',
|
81
120
|
maxOutput: 32_768,
|
82
121
|
pricing: {
|
@@ -8,7 +8,7 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
8
8
|
contextWindowTokens: 256_000,
|
9
9
|
description:
|
10
10
|
'Command A 是我们迄今为止性能最强的模型,在工具使用、代理、检索增强生成(RAG)和多语言应用场景方面表现出色。Command A 具有 256K 的上下文长度,仅需两块 GPU 即可运行,并且相比于 Command R+ 08-2024,吞吐量提高了 150%。',
|
11
|
-
displayName: 'Command A
|
11
|
+
displayName: 'Command A 2503',
|
12
12
|
enabled: true,
|
13
13
|
id: 'command-a-03-2025',
|
14
14
|
maxOutput: 8000,
|
@@ -25,13 +25,12 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
25
25
|
contextWindowTokens: 128_000,
|
26
26
|
description:
|
27
27
|
'command-r-plus 是 command-r-plus-04-2024 的别名,因此如果您在 API 中使用 command-r-plus,实际上指向的就是该模型。',
|
28
|
-
displayName: 'Command R+',
|
29
|
-
|
30
|
-
id: 'command-r-plus',
|
28
|
+
displayName: 'Command R+ 2404',
|
29
|
+
id: 'command-r-plus-04-2024',
|
31
30
|
maxOutput: 4000,
|
32
31
|
pricing: {
|
33
|
-
input:
|
34
|
-
output:
|
32
|
+
input: 3,
|
33
|
+
output: 15
|
35
34
|
},
|
36
35
|
type: 'chat'
|
37
36
|
},
|
@@ -42,12 +41,13 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
42
41
|
contextWindowTokens: 128_000,
|
43
42
|
description:
|
44
43
|
'Command R+ 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它最适用于复杂的 RAG 工作流和多步工具使用。',
|
45
|
-
displayName: 'Command R+
|
46
|
-
|
44
|
+
displayName: 'Command R+ 2408',
|
45
|
+
enabled: true,
|
46
|
+
id: 'command-r-plus-08-2024',
|
47
47
|
maxOutput: 4000,
|
48
48
|
pricing: {
|
49
|
-
input:
|
50
|
-
output:
|
49
|
+
input: 2.5,
|
50
|
+
output: 10
|
51
51
|
},
|
52
52
|
type: 'chat'
|
53
53
|
},
|
@@ -57,10 +57,9 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
57
57
|
},
|
58
58
|
contextWindowTokens: 128_000,
|
59
59
|
description:
|
60
|
-
'command-r
|
61
|
-
displayName: 'Command R',
|
62
|
-
|
63
|
-
id: 'command-r',
|
60
|
+
'command-r 是一种遵循指令的会话模型,与以前的模型相比,它以更高的质量、更可靠的方式和更长的上下文执行语言任务。它可用于复杂的工作流程,如代码生成、检索增强生成(RAG)、工具使用和代理。',
|
61
|
+
displayName: 'Command R 2403',
|
62
|
+
id: 'command-r-03-2024',
|
64
63
|
maxOutput: 4000,
|
65
64
|
pricing: {
|
66
65
|
input: 0.15,
|
@@ -75,7 +74,8 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
75
74
|
contextWindowTokens: 128_000,
|
76
75
|
description:
|
77
76
|
'command-r-08-2024 是 Command R 模型的更新版本,于 2024 年 8 月发布。',
|
78
|
-
displayName: 'Command R
|
77
|
+
displayName: 'Command R 2408',
|
78
|
+
enabled: true,
|
79
79
|
id: 'command-r-08-2024',
|
80
80
|
maxOutput: 4000,
|
81
81
|
pricing: {
|
@@ -91,7 +91,7 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
91
91
|
contextWindowTokens: 128_000,
|
92
92
|
description:
|
93
93
|
'Command R 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它可用于复杂的工作流程,如代码生成、检索增强生成(RAG)、工具使用和代理。',
|
94
|
-
displayName: 'Command R
|
94
|
+
displayName: 'Command R 2403',
|
95
95
|
id: 'command-r-03-2024',
|
96
96
|
maxOutput: 4000,
|
97
97
|
pricing: {
|
@@ -107,8 +107,7 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
107
107
|
contextWindowTokens: 128_000,
|
108
108
|
description:
|
109
109
|
'command-r7b-12-2024 是一个小型且高效的更新版本,于 2024 年 12 月发布。它在 RAG、工具使用、代理等需要复杂推理和多步处理的任务中表现出色。',
|
110
|
-
displayName: 'Command R7B
|
111
|
-
enabled: true,
|
110
|
+
displayName: 'Command R7B 2412',
|
112
111
|
id: 'command-r7b-12-2024',
|
113
112
|
maxOutput: 4000,
|
114
113
|
pricing: {
|
@@ -122,7 +121,6 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
122
121
|
description:
|
123
122
|
'一个遵循指令的对话模型,在语言任务中表现出高质量、更可靠,并且相比我们的基础生成模型具有更长的上下文长度。',
|
124
123
|
displayName: 'Command',
|
125
|
-
enabled: true,
|
126
124
|
id: 'command',
|
127
125
|
maxOutput: 4000,
|
128
126
|
pricing: {
|
@@ -152,7 +150,6 @@ const cohereChatModels: AIChatModelCard[] = [
|
|
152
150
|
description:
|
153
151
|
'一个更小、更快的 Command 版本,几乎同样强大,但速度更快。',
|
154
152
|
displayName: 'Command Light',
|
155
|
-
enabled: true,
|
156
153
|
id: 'command-light',
|
157
154
|
maxOutput: 4000,
|
158
155
|
pricing: {
|
@@ -5,28 +5,137 @@ const githubChatModels: AIChatModelCard[] = [
|
|
5
5
|
abilities: {
|
6
6
|
functionCall: true,
|
7
7
|
reasoning: true,
|
8
|
+
vision: true,
|
8
9
|
},
|
9
10
|
contextWindowTokens: 200_000,
|
10
11
|
description:
|
11
|
-
'o3
|
12
|
-
displayName: '
|
12
|
+
'o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。',
|
13
|
+
displayName: 'o3',
|
14
|
+
id: 'o3',
|
15
|
+
maxOutput: 100_000,
|
16
|
+
pricing: {
|
17
|
+
cachedInput: 2.5,
|
18
|
+
input: 10,
|
19
|
+
output: 40,
|
20
|
+
},
|
21
|
+
releasedAt: '2025-04-17',
|
22
|
+
type: 'chat',
|
23
|
+
},
|
24
|
+
{
|
25
|
+
abilities: {
|
26
|
+
functionCall: true,
|
27
|
+
reasoning: true,
|
28
|
+
vision: true,
|
29
|
+
},
|
30
|
+
contextWindowTokens: 200_000,
|
31
|
+
description:
|
32
|
+
'o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。',
|
33
|
+
displayName: 'o4-mini',
|
34
|
+
enabled: true,
|
35
|
+
id: 'o4-mini',
|
36
|
+
maxOutput: 100_000,
|
37
|
+
pricing: {
|
38
|
+
cachedInput: 0.275,
|
39
|
+
input: 1.1,
|
40
|
+
output: 4.4,
|
41
|
+
},
|
42
|
+
releasedAt: '2025-04-17',
|
43
|
+
type: 'chat',
|
44
|
+
},
|
45
|
+
{
|
46
|
+
abilities: {
|
47
|
+
functionCall: true,
|
48
|
+
vision: true,
|
49
|
+
},
|
50
|
+
contextWindowTokens: 1_047_576,
|
51
|
+
description:
|
52
|
+
'GPT-4.1 是我们用于复杂任务的旗舰模型。它非常适合跨领域解决问题。',
|
53
|
+
displayName: 'GPT-4.1',
|
54
|
+
enabled: true,
|
55
|
+
id: 'gpt-4.1',
|
56
|
+
maxOutput: 32_768,
|
57
|
+
pricing: {
|
58
|
+
cachedInput: 0.5,
|
59
|
+
input: 2,
|
60
|
+
output: 8,
|
61
|
+
},
|
62
|
+
releasedAt: '2025-04-14',
|
63
|
+
type: 'chat',
|
64
|
+
},
|
65
|
+
{
|
66
|
+
abilities: {
|
67
|
+
functionCall: true,
|
68
|
+
vision: true,
|
69
|
+
},
|
70
|
+
contextWindowTokens: 1_047_576,
|
71
|
+
description:
|
72
|
+
'GPT-4.1 mini 提供了智能、速度和成本之间的平衡,使其成为许多用例中有吸引力的模型。',
|
73
|
+
displayName: 'GPT-4.1 mini',
|
13
74
|
enabled: true,
|
75
|
+
id: 'gpt-4.1-mini',
|
76
|
+
maxOutput: 32_768,
|
77
|
+
pricing: {
|
78
|
+
cachedInput: 0.1,
|
79
|
+
input: 0.4,
|
80
|
+
output: 1.6,
|
81
|
+
},
|
82
|
+
releasedAt: '2025-04-14',
|
83
|
+
type: 'chat',
|
84
|
+
},
|
85
|
+
{
|
86
|
+
abilities: {
|
87
|
+
functionCall: true,
|
88
|
+
vision: true,
|
89
|
+
},
|
90
|
+
contextWindowTokens: 1_047_576,
|
91
|
+
description:
|
92
|
+
'GPT-4.1 nano 是最快,最具成本效益的GPT-4.1模型。',
|
93
|
+
displayName: 'GPT-4.1 nano',
|
94
|
+
id: 'gpt-4.1-nano',
|
95
|
+
maxOutput: 32_768,
|
96
|
+
pricing: {
|
97
|
+
cachedInput: 0.025,
|
98
|
+
input: 0.1,
|
99
|
+
output: 0.4,
|
100
|
+
},
|
101
|
+
releasedAt: '2025-04-14',
|
102
|
+
type: 'chat',
|
103
|
+
},
|
104
|
+
{
|
105
|
+
abilities: {
|
106
|
+
functionCall: true,
|
107
|
+
reasoning: true,
|
108
|
+
},
|
109
|
+
contextWindowTokens: 200_000,
|
110
|
+
description:
|
111
|
+
'o3-mini 是我们最新的小型推理模型,在与 o1-mini 相同的成本和延迟目标下提供高智能。',
|
112
|
+
displayName: 'o3-mini',
|
14
113
|
id: 'o3-mini',
|
15
114
|
maxOutput: 100_000,
|
115
|
+
pricing: {
|
116
|
+
cachedInput: 0.55,
|
117
|
+
input: 1.1,
|
118
|
+
output: 4.4,
|
119
|
+
},
|
16
120
|
releasedAt: '2025-01-31',
|
17
121
|
type: 'chat',
|
18
122
|
},
|
19
123
|
{
|
20
124
|
abilities: {
|
21
125
|
reasoning: true,
|
22
|
-
vision: true,
|
23
126
|
},
|
24
127
|
contextWindowTokens: 128_000,
|
25
|
-
description:
|
26
|
-
|
27
|
-
|
128
|
+
description:
|
129
|
+
'o1-mini是一款针对编程、数学和科学应用场景而设计的快速、经济高效的推理模型。该模型具有128K上下文和2023年10月的知识截止日期。',
|
130
|
+
displayName: 'o1-mini',
|
28
131
|
id: 'o1-mini',
|
29
132
|
maxOutput: 65_536,
|
133
|
+
pricing: {
|
134
|
+
cachedInput: 0.55,
|
135
|
+
input: 1.1,
|
136
|
+
output: 4.4,
|
137
|
+
},
|
138
|
+
releasedAt: '2024-09-12',
|
30
139
|
type: 'chat',
|
31
140
|
},
|
32
141
|
{
|
@@ -37,24 +146,32 @@ const githubChatModels: AIChatModelCard[] = [
|
|
37
146
|
contextWindowTokens: 200_000,
|
38
147
|
description:
|
39
148
|
'o1是OpenAI新的推理模型,支持图文输入并输出文本,适用于需要广泛通用知识的复杂任务。该模型具有200K上下文和2023年10月的知识截止日期。',
|
40
|
-
displayName: '
|
41
|
-
enabled: true,
|
149
|
+
displayName: 'o1',
|
42
150
|
id: 'o1',
|
43
151
|
maxOutput: 100_000,
|
152
|
+
pricing: {
|
153
|
+
cachedInput: 7.5,
|
154
|
+
input: 15,
|
155
|
+
output: 60,
|
156
|
+
},
|
157
|
+
releasedAt: '2024-12-17',
|
44
158
|
type: 'chat',
|
45
159
|
},
|
46
160
|
{
|
47
161
|
abilities: {
|
48
162
|
reasoning: true,
|
49
|
-
vision: true,
|
50
163
|
},
|
51
164
|
contextWindowTokens: 128_000,
|
52
165
|
description:
|
53
|
-
'
|
54
|
-
displayName: '
|
55
|
-
enabled: true,
|
166
|
+
'o1是OpenAI新的推理模型,适用于需要广泛通用知识的复杂任务。该模型具有128K上下文和2023年10月的知识截止日期。',
|
167
|
+
displayName: 'o1-preview',
|
56
168
|
id: 'o1-preview',
|
57
169
|
maxOutput: 32_768,
|
170
|
+
pricing: {
|
171
|
+
input: 15,
|
172
|
+
output: 60,
|
173
|
+
},
|
174
|
+
releasedAt: '2024-09-12',
|
58
175
|
type: 'chat',
|
59
176
|
},
|
60
177
|
{
|
@@ -64,8 +181,7 @@ const githubChatModels: AIChatModelCard[] = [
|
|
64
181
|
},
|
65
182
|
contextWindowTokens: 134_144,
|
66
183
|
description: '一种经济高效的AI解决方案,适用于多种文本和图像任务。',
|
67
|
-
displayName: '
|
68
|
-
enabled: true,
|
184
|
+
displayName: 'GPT-4o mini',
|
69
185
|
id: 'gpt-4o-mini',
|
70
186
|
maxOutput: 4096,
|
71
187
|
type: 'chat',
|
@@ -77,8 +193,7 @@ const githubChatModels: AIChatModelCard[] = [
|
|
77
193
|
},
|
78
194
|
contextWindowTokens: 134_144,
|
79
195
|
description: 'OpenAI GPT-4系列中最先进的多模态模型,可以处理文本和图像输入。',
|
80
|
-
displayName: '
|
81
|
-
enabled: true,
|
196
|
+
displayName: 'GPT-4o',
|
82
197
|
id: 'gpt-4o',
|
83
198
|
maxOutput: 16_384,
|
84
199
|
type: 'chat',
|