@lobehub/chat 1.16.6 → 1.16.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @lobehub/chat might be problematic. Click here for more details.
- package/CHANGELOG.md +50 -0
- package/README.md +8 -8
- package/README.zh-CN.md +8 -8
- package/package.json +1 -1
- package/src/components/ModelSelect/index.tsx +1 -9
- package/src/config/modelProviders/ai360.ts +23 -68
- package/src/config/modelProviders/anthropic.ts +39 -2
- package/src/config/modelProviders/baichuan.ts +30 -11
- package/src/config/modelProviders/bedrock.ts +80 -19
- package/src/config/modelProviders/deepseek.ts +11 -2
- package/src/config/modelProviders/fireworksai.ts +6 -2
- package/src/config/modelProviders/google.ts +53 -2
- package/src/config/modelProviders/groq.ts +46 -7
- package/src/config/modelProviders/minimax.ts +2 -0
- package/src/config/modelProviders/openai.ts +102 -1
- package/src/const/discover.ts +1 -0
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +56 -0
- package/src/server/routers/edge/config/__snapshots__/index.test.ts.snap +32 -0
- package/src/server/routers/edge/config/index.test.ts +4 -0
- package/src/types/llm.ts +29 -1
- package/src/utils/format.test.ts +42 -1
- package/src/utils/format.ts +11 -0
- package/src/utils/parseModels.test.ts +8 -0
@@ -1,7 +1,6 @@
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
2
2
|
|
3
3
|
// ref:
|
4
|
-
// https://platform.openai.com/docs/models
|
5
4
|
// https://platform.openai.com/docs/deprecations
|
6
5
|
const OpenAI: ModelProviderCard = {
|
7
6
|
chatModels: [
|
@@ -12,6 +11,10 @@ const OpenAI: ModelProviderCard = {
|
|
12
11
|
functionCall: true,
|
13
12
|
id: 'gpt-4o-mini',
|
14
13
|
maxOutput: 16_385,
|
14
|
+
pricing: {
|
15
|
+
input: 0.15,
|
16
|
+
output: 0.6,
|
17
|
+
},
|
15
18
|
tokens: 128_000,
|
16
19
|
vision: true,
|
17
20
|
},
|
@@ -21,6 +24,10 @@ const OpenAI: ModelProviderCard = {
|
|
21
24
|
enabled: true,
|
22
25
|
functionCall: true,
|
23
26
|
id: 'gpt-4o',
|
27
|
+
pricing: {
|
28
|
+
input: 5,
|
29
|
+
output: 15,
|
30
|
+
},
|
24
31
|
tokens: 128_000,
|
25
32
|
vision: true,
|
26
33
|
},
|
@@ -30,6 +37,22 @@ const OpenAI: ModelProviderCard = {
|
|
30
37
|
enabled: true,
|
31
38
|
functionCall: true,
|
32
39
|
id: 'gpt-4o-2024-08-06',
|
40
|
+
pricing: {
|
41
|
+
input: 2.5,
|
42
|
+
output: 10,
|
43
|
+
},
|
44
|
+
tokens: 128_000,
|
45
|
+
vision: true,
|
46
|
+
},
|
47
|
+
{
|
48
|
+
description: 'Latest GPT-4o snapshot that supports Structured Outputs',
|
49
|
+
displayName: 'GPT-4o (240513)',
|
50
|
+
functionCall: true,
|
51
|
+
id: 'gpt-4o-2024-05-13',
|
52
|
+
pricing: {
|
53
|
+
input: 5,
|
54
|
+
output: 15,
|
55
|
+
},
|
33
56
|
tokens: 128_000,
|
34
57
|
vision: true,
|
35
58
|
},
|
@@ -38,6 +61,10 @@ const OpenAI: ModelProviderCard = {
|
|
38
61
|
displayName: 'ChatGPT-4o',
|
39
62
|
enabled: true,
|
40
63
|
id: 'chatgpt-4o-latest',
|
64
|
+
pricing: {
|
65
|
+
input: 5,
|
66
|
+
output: 15,
|
67
|
+
},
|
41
68
|
tokens: 128_000,
|
42
69
|
vision: true,
|
43
70
|
},
|
@@ -46,6 +73,10 @@ const OpenAI: ModelProviderCard = {
|
|
46
73
|
displayName: 'GPT-4 Turbo',
|
47
74
|
functionCall: true,
|
48
75
|
id: 'gpt-4-turbo',
|
76
|
+
pricing: {
|
77
|
+
input: 10,
|
78
|
+
output: 30,
|
79
|
+
},
|
49
80
|
tokens: 128_000,
|
50
81
|
vision: true,
|
51
82
|
},
|
@@ -54,6 +85,10 @@ const OpenAI: ModelProviderCard = {
|
|
54
85
|
displayName: 'GPT-4 Turbo Vision (240409)',
|
55
86
|
functionCall: true,
|
56
87
|
id: 'gpt-4-turbo-2024-04-09',
|
88
|
+
pricing: {
|
89
|
+
input: 10,
|
90
|
+
output: 30,
|
91
|
+
},
|
57
92
|
tokens: 128_000,
|
58
93
|
vision: true,
|
59
94
|
},
|
@@ -62,24 +97,40 @@ const OpenAI: ModelProviderCard = {
|
|
62
97
|
displayName: 'GPT-4 Turbo Preview',
|
63
98
|
functionCall: true,
|
64
99
|
id: 'gpt-4-turbo-preview',
|
100
|
+
pricing: {
|
101
|
+
input: 10,
|
102
|
+
output: 30,
|
103
|
+
},
|
65
104
|
tokens: 128_000,
|
66
105
|
},
|
67
106
|
{
|
68
107
|
displayName: 'GPT-4 Turbo Preview (0125)',
|
69
108
|
functionCall: true,
|
70
109
|
id: 'gpt-4-0125-preview',
|
110
|
+
pricing: {
|
111
|
+
input: 10,
|
112
|
+
output: 30,
|
113
|
+
},
|
71
114
|
tokens: 128_000,
|
72
115
|
},
|
73
116
|
{
|
74
117
|
description: 'Currently points to gpt-4-1106-vision-preview', // Will be discontinued on December 6, 2024
|
75
118
|
displayName: 'GPT-4 Turbo Vision Preview',
|
76
119
|
id: 'gpt-4-vision-preview',
|
120
|
+
pricing: {
|
121
|
+
input: 10,
|
122
|
+
output: 30,
|
123
|
+
},
|
77
124
|
tokens: 128_000,
|
78
125
|
vision: true,
|
79
126
|
},
|
80
127
|
{
|
81
128
|
displayName: 'GPT-4 Turbo Vision Preview (1106)', // Will be discontinued on December 6, 2024
|
82
129
|
id: 'gpt-4-1106-vision-preview',
|
130
|
+
pricing: {
|
131
|
+
input: 10,
|
132
|
+
output: 30,
|
133
|
+
},
|
83
134
|
tokens: 128_000,
|
84
135
|
vision: true,
|
85
136
|
},
|
@@ -87,6 +138,10 @@ const OpenAI: ModelProviderCard = {
|
|
87
138
|
displayName: 'GPT-4 Turbo Preview (1106)',
|
88
139
|
functionCall: true,
|
89
140
|
id: 'gpt-4-1106-preview',
|
141
|
+
pricing: {
|
142
|
+
input: 10,
|
143
|
+
output: 30,
|
144
|
+
},
|
90
145
|
tokens: 128_000,
|
91
146
|
},
|
92
147
|
{
|
@@ -94,12 +149,20 @@ const OpenAI: ModelProviderCard = {
|
|
94
149
|
displayName: 'GPT-4',
|
95
150
|
functionCall: true,
|
96
151
|
id: 'gpt-4',
|
152
|
+
pricing: {
|
153
|
+
input: 30,
|
154
|
+
output: 60,
|
155
|
+
},
|
97
156
|
tokens: 8192,
|
98
157
|
},
|
99
158
|
{
|
100
159
|
displayName: 'GPT-4 (0613)',
|
101
160
|
functionCall: true,
|
102
161
|
id: 'gpt-4-0613',
|
162
|
+
pricing: {
|
163
|
+
input: 30,
|
164
|
+
output: 60,
|
165
|
+
},
|
103
166
|
tokens: 8192,
|
104
167
|
},
|
105
168
|
{
|
@@ -107,12 +170,20 @@ const OpenAI: ModelProviderCard = {
|
|
107
170
|
displayName: 'GPT-4 32K',
|
108
171
|
functionCall: true,
|
109
172
|
id: 'gpt-4-32k',
|
173
|
+
pricing: {
|
174
|
+
input: 60,
|
175
|
+
output: 120,
|
176
|
+
},
|
110
177
|
tokens: 32_768,
|
111
178
|
},
|
112
179
|
{
|
113
180
|
displayName: 'GPT-4 32K (0613)', // Will be discontinued on June 6, 2025
|
114
181
|
functionCall: true,
|
115
182
|
id: 'gpt-4-32k-0613',
|
183
|
+
pricing: {
|
184
|
+
input: 60,
|
185
|
+
output: 120,
|
186
|
+
},
|
116
187
|
tokens: 32_768,
|
117
188
|
},
|
118
189
|
{
|
@@ -121,23 +192,39 @@ const OpenAI: ModelProviderCard = {
|
|
121
192
|
displayName: 'GPT-3.5 Turbo',
|
122
193
|
functionCall: true,
|
123
194
|
id: 'gpt-3.5-turbo',
|
195
|
+
pricing: {
|
196
|
+
input: 0.5,
|
197
|
+
output: 1.5,
|
198
|
+
},
|
124
199
|
tokens: 16_385,
|
125
200
|
},
|
126
201
|
{
|
127
202
|
displayName: 'GPT-3.5 Turbo (0125)',
|
128
203
|
functionCall: true,
|
129
204
|
id: 'gpt-3.5-turbo-0125',
|
205
|
+
pricing: {
|
206
|
+
input: 0.5,
|
207
|
+
output: 1.5,
|
208
|
+
},
|
130
209
|
tokens: 16_385,
|
131
210
|
},
|
132
211
|
{
|
133
212
|
displayName: 'GPT-3.5 Turbo (1106)',
|
134
213
|
functionCall: true,
|
135
214
|
id: 'gpt-3.5-turbo-1106',
|
215
|
+
pricing: {
|
216
|
+
input: 1,
|
217
|
+
output: 2,
|
218
|
+
},
|
136
219
|
tokens: 16_385,
|
137
220
|
},
|
138
221
|
{
|
139
222
|
displayName: 'GPT-3.5 Turbo Instruct',
|
140
223
|
id: 'gpt-3.5-turbo-instruct',
|
224
|
+
pricing: {
|
225
|
+
input: 1.5,
|
226
|
+
output: 2,
|
227
|
+
},
|
141
228
|
tokens: 4096,
|
142
229
|
},
|
143
230
|
{
|
@@ -145,18 +232,30 @@ const OpenAI: ModelProviderCard = {
|
|
145
232
|
displayName: 'GPT-3.5 Turbo 16K',
|
146
233
|
id: 'gpt-3.5-turbo-16k',
|
147
234
|
legacy: true,
|
235
|
+
pricing: {
|
236
|
+
input: 3,
|
237
|
+
output: 4,
|
238
|
+
},
|
148
239
|
tokens: 16_385,
|
149
240
|
},
|
150
241
|
{
|
151
242
|
displayName: 'GPT-3.5 Turbo (0613)', // Will be discontinued on September 13, 2024
|
152
243
|
id: 'gpt-3.5-turbo-0613',
|
153
244
|
legacy: true,
|
245
|
+
pricing: {
|
246
|
+
input: 1.5,
|
247
|
+
output: 2,
|
248
|
+
},
|
154
249
|
tokens: 4096,
|
155
250
|
},
|
156
251
|
{
|
157
252
|
description: 'Currently points to gpt-3.5-turbo-16k-0613', // Will be discontinued on September 13, 2024
|
158
253
|
id: 'gpt-3.5-turbo-16k-0613',
|
159
254
|
legacy: true,
|
255
|
+
pricing: {
|
256
|
+
input: 3,
|
257
|
+
output: 4,
|
258
|
+
},
|
160
259
|
tokens: 16_385,
|
161
260
|
},
|
162
261
|
],
|
@@ -164,7 +263,9 @@ const OpenAI: ModelProviderCard = {
|
|
164
263
|
enabled: true,
|
165
264
|
id: 'openai',
|
166
265
|
modelList: { showModelFetcher: true },
|
266
|
+
modelsUrl: 'https://platform.openai.com/docs/models',
|
167
267
|
name: 'OpenAI',
|
268
|
+
url: 'https://openai.com',
|
168
269
|
};
|
169
270
|
|
170
271
|
export default OpenAI;
|
@@ -0,0 +1 @@
|
|
1
|
+
export const CNYtoUSD = 7.14;
|
@@ -7,6 +7,10 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
7
7
|
"displayName": "GPT-3.5 Turbo",
|
8
8
|
"functionCall": true,
|
9
9
|
"id": "gpt-3.5-turbo",
|
10
|
+
"pricing": {
|
11
|
+
"input": 0.5,
|
12
|
+
"output": 1.5,
|
13
|
+
},
|
10
14
|
"tokens": 16385,
|
11
15
|
},
|
12
16
|
{
|
@@ -14,17 +18,29 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
14
18
|
"displayName": "GPT-3.5 Turbo 16K",
|
15
19
|
"id": "gpt-3.5-turbo-16k",
|
16
20
|
"legacy": true,
|
21
|
+
"pricing": {
|
22
|
+
"input": 3,
|
23
|
+
"output": 4,
|
24
|
+
},
|
17
25
|
"tokens": 16385,
|
18
26
|
},
|
19
27
|
{
|
20
28
|
"description": "Currently points to gpt-3.5-turbo-16k-0613",
|
21
29
|
"id": "gpt-3.5-turbo-16k-0613",
|
22
30
|
"legacy": true,
|
31
|
+
"pricing": {
|
32
|
+
"input": 3,
|
33
|
+
"output": 4,
|
34
|
+
},
|
23
35
|
"tokens": 16385,
|
24
36
|
},
|
25
37
|
{
|
26
38
|
"displayName": "GPT-4 Turbo Vision Preview (1106)",
|
27
39
|
"id": "gpt-4-1106-vision-preview",
|
40
|
+
"pricing": {
|
41
|
+
"input": 10,
|
42
|
+
"output": 30,
|
43
|
+
},
|
28
44
|
"tokens": 128000,
|
29
45
|
"vision": true,
|
30
46
|
},
|
@@ -35,6 +51,10 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
35
51
|
"displayName": "GPT-4 Turbo Preview (0125)",
|
36
52
|
"functionCall": true,
|
37
53
|
"id": "gpt-4-0125-preview",
|
54
|
+
"pricing": {
|
55
|
+
"input": 10,
|
56
|
+
"output": 30,
|
57
|
+
},
|
38
58
|
"tokens": 128000,
|
39
59
|
},
|
40
60
|
{
|
@@ -42,11 +62,19 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
42
62
|
"displayName": "GPT-4 Turbo Preview",
|
43
63
|
"functionCall": true,
|
44
64
|
"id": "gpt-4-turbo-preview",
|
65
|
+
"pricing": {
|
66
|
+
"input": 10,
|
67
|
+
"output": 30,
|
68
|
+
},
|
45
69
|
"tokens": 128000,
|
46
70
|
},
|
47
71
|
{
|
48
72
|
"displayName": "GPT-3.5 Turbo Instruct",
|
49
73
|
"id": "gpt-3.5-turbo-instruct",
|
74
|
+
"pricing": {
|
75
|
+
"input": 1.5,
|
76
|
+
"output": 2,
|
77
|
+
},
|
50
78
|
"tokens": 4096,
|
51
79
|
},
|
52
80
|
{
|
@@ -56,24 +84,40 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
56
84
|
"displayName": "GPT-3.5 Turbo (0613)",
|
57
85
|
"id": "gpt-3.5-turbo-0613",
|
58
86
|
"legacy": true,
|
87
|
+
"pricing": {
|
88
|
+
"input": 1.5,
|
89
|
+
"output": 2,
|
90
|
+
},
|
59
91
|
"tokens": 4096,
|
60
92
|
},
|
61
93
|
{
|
62
94
|
"displayName": "GPT-3.5 Turbo (1106)",
|
63
95
|
"functionCall": true,
|
64
96
|
"id": "gpt-3.5-turbo-1106",
|
97
|
+
"pricing": {
|
98
|
+
"input": 1,
|
99
|
+
"output": 2,
|
100
|
+
},
|
65
101
|
"tokens": 16385,
|
66
102
|
},
|
67
103
|
{
|
68
104
|
"displayName": "GPT-4 Turbo Preview (1106)",
|
69
105
|
"functionCall": true,
|
70
106
|
"id": "gpt-4-1106-preview",
|
107
|
+
"pricing": {
|
108
|
+
"input": 10,
|
109
|
+
"output": 30,
|
110
|
+
},
|
71
111
|
"tokens": 128000,
|
72
112
|
},
|
73
113
|
{
|
74
114
|
"description": "Currently points to gpt-4-1106-vision-preview",
|
75
115
|
"displayName": "GPT-4 Turbo Vision Preview",
|
76
116
|
"id": "gpt-4-vision-preview",
|
117
|
+
"pricing": {
|
118
|
+
"input": 10,
|
119
|
+
"output": 30,
|
120
|
+
},
|
77
121
|
"tokens": 128000,
|
78
122
|
"vision": true,
|
79
123
|
},
|
@@ -82,18 +126,30 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
|
82
126
|
"displayName": "GPT-4",
|
83
127
|
"functionCall": true,
|
84
128
|
"id": "gpt-4",
|
129
|
+
"pricing": {
|
130
|
+
"input": 30,
|
131
|
+
"output": 60,
|
132
|
+
},
|
85
133
|
"tokens": 8192,
|
86
134
|
},
|
87
135
|
{
|
88
136
|
"displayName": "GPT-3.5 Turbo (0125)",
|
89
137
|
"functionCall": true,
|
90
138
|
"id": "gpt-3.5-turbo-0125",
|
139
|
+
"pricing": {
|
140
|
+
"input": 0.5,
|
141
|
+
"output": 1.5,
|
142
|
+
},
|
91
143
|
"tokens": 16385,
|
92
144
|
},
|
93
145
|
{
|
94
146
|
"displayName": "GPT-4 (0613)",
|
95
147
|
"functionCall": true,
|
96
148
|
"id": "gpt-4-0613",
|
149
|
+
"pricing": {
|
150
|
+
"input": 30,
|
151
|
+
"output": 60,
|
152
|
+
},
|
97
153
|
"tokens": 8192,
|
98
154
|
},
|
99
155
|
]
|
@@ -24,6 +24,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
24
24
|
"enabled": true,
|
25
25
|
"functionCall": true,
|
26
26
|
"id": "gpt-4-0125-preview",
|
27
|
+
"pricing": {
|
28
|
+
"input": 10,
|
29
|
+
"output": 30,
|
30
|
+
},
|
27
31
|
"tokens": 128000,
|
28
32
|
},
|
29
33
|
],
|
@@ -37,6 +41,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
37
41
|
"enabled": true,
|
38
42
|
"functionCall": true,
|
39
43
|
"id": "gpt-3.5-turbo-1106",
|
44
|
+
"pricing": {
|
45
|
+
"input": 1,
|
46
|
+
"output": 2,
|
47
|
+
},
|
40
48
|
"tokens": 16385,
|
41
49
|
},
|
42
50
|
{
|
@@ -45,6 +53,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
45
53
|
"enabled": true,
|
46
54
|
"functionCall": true,
|
47
55
|
"id": "gpt-3.5-turbo",
|
56
|
+
"pricing": {
|
57
|
+
"input": 0.5,
|
58
|
+
"output": 1.5,
|
59
|
+
},
|
48
60
|
"tokens": 16385,
|
49
61
|
},
|
50
62
|
{
|
@@ -53,6 +65,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
53
65
|
"enabled": true,
|
54
66
|
"id": "gpt-3.5-turbo-16k",
|
55
67
|
"legacy": true,
|
68
|
+
"pricing": {
|
69
|
+
"input": 3,
|
70
|
+
"output": 4,
|
71
|
+
},
|
56
72
|
"tokens": 16385,
|
57
73
|
},
|
58
74
|
{
|
@@ -61,6 +77,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
61
77
|
"enabled": true,
|
62
78
|
"functionCall": true,
|
63
79
|
"id": "gpt-4",
|
80
|
+
"pricing": {
|
81
|
+
"input": 30,
|
82
|
+
"output": 60,
|
83
|
+
},
|
64
84
|
"tokens": 8192,
|
65
85
|
},
|
66
86
|
{
|
@@ -69,6 +89,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
69
89
|
"enabled": true,
|
70
90
|
"functionCall": true,
|
71
91
|
"id": "gpt-4-32k",
|
92
|
+
"pricing": {
|
93
|
+
"input": 60,
|
94
|
+
"output": 120,
|
95
|
+
},
|
72
96
|
"tokens": 32768,
|
73
97
|
},
|
74
98
|
{
|
@@ -76,6 +100,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
76
100
|
"enabled": true,
|
77
101
|
"functionCall": true,
|
78
102
|
"id": "gpt-4-1106-preview",
|
103
|
+
"pricing": {
|
104
|
+
"input": 10,
|
105
|
+
"output": 30,
|
106
|
+
},
|
79
107
|
"tokens": 128000,
|
80
108
|
},
|
81
109
|
{
|
@@ -83,6 +111,10 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
83
111
|
"displayName": "GPT-4 Turbo Vision Preview",
|
84
112
|
"enabled": true,
|
85
113
|
"id": "gpt-4-vision-preview",
|
114
|
+
"pricing": {
|
115
|
+
"input": 10,
|
116
|
+
"output": 30,
|
117
|
+
},
|
86
118
|
"tokens": 128000,
|
87
119
|
"vision": true,
|
88
120
|
},
|
package/src/types/llm.ts
CHANGED
@@ -33,10 +33,29 @@ export interface ChatModelCard {
|
|
33
33
|
*/
|
34
34
|
legacy?: boolean;
|
35
35
|
maxOutput?: number;
|
36
|
+
pricing?: {
|
37
|
+
cachedInput?: number;
|
38
|
+
/**
|
39
|
+
* the currency of the pricing
|
40
|
+
* @default USD
|
41
|
+
*/
|
42
|
+
currency?: 'CNY' | 'USD';
|
43
|
+
/**
|
44
|
+
* the input pricing, e.g. $1 / 1M tokens
|
45
|
+
*/
|
46
|
+
input?: number;
|
47
|
+
/**
|
48
|
+
* the output pricing, e.g. $2 / 1M tokens
|
49
|
+
*/
|
50
|
+
output?: number;
|
51
|
+
writeCacheInput?: number;
|
52
|
+
};
|
53
|
+
releasedAt?: string;
|
36
54
|
/**
|
37
55
|
* the context window (or input + output tokens limit)
|
38
56
|
*/
|
39
57
|
tokens?: number;
|
58
|
+
|
40
59
|
/**
|
41
60
|
* whether model supports vision
|
42
61
|
*/
|
@@ -79,6 +98,10 @@ export interface ModelProviderCard {
|
|
79
98
|
placeholder?: string;
|
80
99
|
showModelFetcher?: boolean;
|
81
100
|
};
|
101
|
+
/**
|
102
|
+
* the url show the all models in the provider
|
103
|
+
*/
|
104
|
+
modelsUrl?: string;
|
82
105
|
/**
|
83
106
|
* the name show for end user
|
84
107
|
*/
|
@@ -90,16 +113,21 @@ export interface ModelProviderCard {
|
|
90
113
|
title?: string;
|
91
114
|
}
|
92
115
|
| false;
|
116
|
+
|
93
117
|
/**
|
94
118
|
* whether show api key in the provider config
|
95
119
|
* so provider like ollama don't need api key field
|
96
120
|
*/
|
97
121
|
showApiKey?: boolean;
|
98
|
-
|
99
122
|
/**
|
100
123
|
* whether to smoothing the output
|
101
124
|
*/
|
102
125
|
smoothing?: SmoothingParams;
|
126
|
+
|
127
|
+
/**
|
128
|
+
* provider's website url
|
129
|
+
*/
|
130
|
+
url?: string;
|
103
131
|
}
|
104
132
|
|
105
133
|
// 语言模型的设置参数
|
package/src/utils/format.test.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { describe, expect, it } from 'vitest';
|
2
2
|
|
3
|
-
import { formatSize, formatSpeed, formatTime } from './format';
|
3
|
+
import { formatSize, formatSpeed, formatTime, formatTokenNumber } from './format';
|
4
4
|
|
5
5
|
describe('formatSize', () => {
|
6
6
|
it('should format bytes to KB correctly', () => {
|
@@ -73,3 +73,44 @@ describe('formatTime', () => {
|
|
73
73
|
expect(formatTime(3599.99)).toBe('60.0 min');
|
74
74
|
});
|
75
75
|
});
|
76
|
+
|
77
|
+
describe('formatTokenNumber', () => {
|
78
|
+
it('should return "1K" for numbers between 1 and 1023', () => {
|
79
|
+
expect(formatTokenNumber(500)).toBe('1K');
|
80
|
+
expect(formatTokenNumber(1000)).toBe('1K');
|
81
|
+
});
|
82
|
+
|
83
|
+
it('should format numbers between 1024 and 41,983 correctly', () => {
|
84
|
+
expect(formatTokenNumber(1024)).toBe('1K');
|
85
|
+
expect(formatTokenNumber(2000)).toBe('2K');
|
86
|
+
expect(formatTokenNumber(2048)).toBe('2K');
|
87
|
+
expect(formatTokenNumber(4000)).toBe('4K');
|
88
|
+
expect(formatTokenNumber(4096)).toBe('4K');
|
89
|
+
expect(formatTokenNumber(32000)).toBe('32K');
|
90
|
+
expect(formatTokenNumber(65536)).toBe('64K');
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should format numbers between 41,984 and 127,999 correctly', () => {
|
94
|
+
expect(formatTokenNumber(41984)).toBe('41K');
|
95
|
+
expect(formatTokenNumber(100000)).toBe('97K');
|
96
|
+
expect(formatTokenNumber(127999)).toBe('124K');
|
97
|
+
});
|
98
|
+
|
99
|
+
it('should return "128K" for 131,072', () => {
|
100
|
+
expect(formatTokenNumber(131072)).toBe('128K'); // Qwen
|
101
|
+
});
|
102
|
+
|
103
|
+
it('should format numbers between 128,000 and 999,999 correctly', () => {
|
104
|
+
expect(formatTokenNumber(128000)).toBe('128K');
|
105
|
+
expect(formatTokenNumber(200000)).toBe('200K'); // Claude
|
106
|
+
expect(formatTokenNumber(999999)).toBe('999K');
|
107
|
+
});
|
108
|
+
|
109
|
+
it('should format numbers 1,000,000 and above correctly', () => {
|
110
|
+
expect(formatTokenNumber(1000000)).toBe('1M');
|
111
|
+
expect(formatTokenNumber(1024000)).toBe('1M');
|
112
|
+
expect(formatTokenNumber(1048576)).toBe('1M'); // Gemini Flash
|
113
|
+
expect(formatTokenNumber(2000000)).toBe('2M');
|
114
|
+
expect(formatTokenNumber(2097152)).toBe('2M'); // Gemini Pro
|
115
|
+
});
|
116
|
+
});
|
package/src/utils/format.ts
CHANGED
@@ -54,3 +54,14 @@ export const formatTime = (timeInSeconds: number): string => {
|
|
54
54
|
export const formatNumber = (num: any) => {
|
55
55
|
return new Intl.NumberFormat('en-US').format(num);
|
56
56
|
};
|
57
|
+
|
58
|
+
export const formatTokenNumber = (num: number): string => {
|
59
|
+
if (num > 0 && num < 1024) return '1K';
|
60
|
+
|
61
|
+
let kiloToken = Math.floor(num / 1024);
|
62
|
+
if (num >= 1024 && num < 1024 * 41 || num >= 128_000) {
|
63
|
+
kiloToken = Math.floor(num / 1000);
|
64
|
+
}
|
65
|
+
if (num === 131_072) return '128K';
|
66
|
+
return kiloToken < 1000 ? `${kiloToken}K` : `${Math.floor(kiloToken / 1000)}M`;
|
67
|
+
};
|
@@ -274,6 +274,10 @@ describe('transformToChatModelCards', () => {
|
|
274
274
|
enabled: true,
|
275
275
|
id: 'gpt-4-0125-preview',
|
276
276
|
tokens: 128000,
|
277
|
+
pricing: {
|
278
|
+
input: 10,
|
279
|
+
output: 30,
|
280
|
+
},
|
277
281
|
},
|
278
282
|
{
|
279
283
|
description: 'GPT-4 Turbo 视觉版 (240409)',
|
@@ -284,6 +288,10 @@ describe('transformToChatModelCards', () => {
|
|
284
288
|
id: 'gpt-4-turbo-2024-04-09',
|
285
289
|
tokens: 128000,
|
286
290
|
vision: true,
|
291
|
+
pricing: {
|
292
|
+
input: 10,
|
293
|
+
output: 30,
|
294
|
+
},
|
287
295
|
},
|
288
296
|
]);
|
289
297
|
});
|