@lobehub/chat 1.100.0 → 1.100.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/rules/testing-guide/testing-guide.mdc +173 -0
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +25 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +25 -0
- package/docs/self-hosting/faq/vercel-ai-image-timeout.mdx +65 -0
- package/docs/self-hosting/faq/vercel-ai-image-timeout.zh-CN.mdx +63 -0
- package/docs/usage/providers/fal.mdx +6 -6
- package/docs/usage/providers/fal.zh-CN.mdx +6 -6
- package/package.json +1 -1
- package/src/app/(backend)/middleware/auth/index.ts +28 -7
- package/src/const/auth.ts +1 -0
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +1 -1
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +2 -1
- package/src/libs/oidc-provider/jwt.ts +22 -13
- package/src/libs/trpc/lambda/context.ts +8 -2
- package/src/server/globalConfig/genServerAiProviderConfig.test.ts +235 -0
- package/src/server/globalConfig/genServerAiProviderConfig.ts +9 -10
- package/src/store/aiInfra/slices/aiProvider/action.ts +2 -1
- package/src/utils/getFallbackModelProperty.test.ts +193 -0
- package/src/utils/getFallbackModelProperty.ts +36 -0
- package/src/utils/parseModels.test.ts +150 -48
- package/src/utils/parseModels.ts +26 -11
@@ -4,11 +4,12 @@ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
|
4
4
|
import { openaiChatModels } from '@/config/aiModels/openai';
|
5
5
|
import { AiFullModelCard } from '@/types/aiModel';
|
6
6
|
|
7
|
-
import { parseModelString,
|
7
|
+
import { extractEnabledModels, parseModelString, transformToAiModelList } from './parseModels';
|
8
8
|
|
9
9
|
describe('parseModelString', () => {
|
10
10
|
it('custom deletion, addition, and renaming of models', () => {
|
11
11
|
const result = parseModelString(
|
12
|
+
'test-provider',
|
12
13
|
'-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k',
|
13
14
|
);
|
14
15
|
|
@@ -16,24 +17,30 @@ describe('parseModelString', () => {
|
|
16
17
|
});
|
17
18
|
|
18
19
|
it('duplicate naming model', () => {
|
19
|
-
const result = parseModelString(
|
20
|
+
const result = parseModelString(
|
21
|
+
'test-provider',
|
22
|
+
'gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k',
|
23
|
+
);
|
20
24
|
expect(result).toMatchSnapshot();
|
21
25
|
});
|
22
26
|
|
23
27
|
it('only add the model', () => {
|
24
|
-
const result = parseModelString('model1,model2,model3,model4');
|
28
|
+
const result = parseModelString('test-provider', 'model1,model2,model3,model4');
|
25
29
|
|
26
30
|
expect(result).toMatchSnapshot();
|
27
31
|
});
|
28
32
|
|
29
33
|
it('empty string model', () => {
|
30
|
-
const result = parseModelString(
|
34
|
+
const result = parseModelString(
|
35
|
+
'test-provider',
|
36
|
+
'gpt-4-1106-preview=gpt-4-turbo,, ,\n ,+claude-2',
|
37
|
+
);
|
31
38
|
expect(result).toMatchSnapshot();
|
32
39
|
});
|
33
40
|
|
34
41
|
describe('extension capabilities', () => {
|
35
42
|
it('with token', () => {
|
36
|
-
const result = parseModelString('chatglm-6b=ChatGLM 6B<4096>');
|
43
|
+
const result = parseModelString('test-provider', 'chatglm-6b=ChatGLM 6B<4096>');
|
37
44
|
|
38
45
|
expect(result.add[0]).toEqual({
|
39
46
|
displayName: 'ChatGLM 6B',
|
@@ -45,7 +52,7 @@ describe('parseModelString', () => {
|
|
45
52
|
});
|
46
53
|
|
47
54
|
it('token and function calling', () => {
|
48
|
-
const result = parseModelString('spark-v3.5=讯飞星火 v3.5<8192:fc>');
|
55
|
+
const result = parseModelString('test-provider', 'spark-v3.5=讯飞星火 v3.5<8192:fc>');
|
49
56
|
|
50
57
|
expect(result.add[0]).toEqual({
|
51
58
|
displayName: '讯飞星火 v3.5',
|
@@ -59,7 +66,7 @@ describe('parseModelString', () => {
|
|
59
66
|
});
|
60
67
|
|
61
68
|
it('token and reasoning', () => {
|
62
|
-
const result = parseModelString('deepseek-r1=Deepseek R1<65536:reasoning>');
|
69
|
+
const result = parseModelString('test-provider', 'deepseek-r1=Deepseek R1<65536:reasoning>');
|
63
70
|
|
64
71
|
expect(result.add[0]).toEqual({
|
65
72
|
displayName: 'Deepseek R1',
|
@@ -73,7 +80,7 @@ describe('parseModelString', () => {
|
|
73
80
|
});
|
74
81
|
|
75
82
|
it('token and search', () => {
|
76
|
-
const result = parseModelString('qwen-max-latest=Qwen Max<32768:search>');
|
83
|
+
const result = parseModelString('test-provider', 'qwen-max-latest=Qwen Max<32768:search>');
|
77
84
|
|
78
85
|
expect(result.add[0]).toEqual({
|
79
86
|
displayName: 'Qwen Max',
|
@@ -88,6 +95,7 @@ describe('parseModelString', () => {
|
|
88
95
|
|
89
96
|
it('token and image output', () => {
|
90
97
|
const result = parseModelString(
|
98
|
+
'test-provider',
|
91
99
|
'gemini-2.0-flash-exp-image-generation=Gemini 2.0 Flash (Image Generation) Experimental<32768:imageOutput>',
|
92
100
|
);
|
93
101
|
|
@@ -104,6 +112,7 @@ describe('parseModelString', () => {
|
|
104
112
|
|
105
113
|
it('multi models', () => {
|
106
114
|
const result = parseModelString(
|
115
|
+
'test-provider',
|
107
116
|
'gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>,gpt-4-all=ChatGPT Plus<128000:fc:vision:file>',
|
108
117
|
);
|
109
118
|
|
@@ -133,6 +142,7 @@ describe('parseModelString', () => {
|
|
133
142
|
|
134
143
|
it('should have file with builtin models like gpt-4-0125-preview', () => {
|
135
144
|
const result = parseModelString(
|
145
|
+
'openai',
|
136
146
|
'-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
|
137
147
|
);
|
138
148
|
expect(result.add).toEqual([
|
@@ -161,7 +171,7 @@ describe('parseModelString', () => {
|
|
161
171
|
});
|
162
172
|
|
163
173
|
it('should handle empty extension capability value', () => {
|
164
|
-
const result = parseModelString('model1<1024:>');
|
174
|
+
const result = parseModelString('test-provider', 'model1<1024:>');
|
165
175
|
expect(result.add[0]).toEqual({
|
166
176
|
abilities: {},
|
167
177
|
type: 'chat',
|
@@ -171,7 +181,7 @@ describe('parseModelString', () => {
|
|
171
181
|
});
|
172
182
|
|
173
183
|
it('should handle empty extension capability name', () => {
|
174
|
-
const result = parseModelString('model1<1024::file>');
|
184
|
+
const result = parseModelString('test-provider', 'model1<1024::file>');
|
175
185
|
expect(result.add[0]).toEqual({
|
176
186
|
id: 'model1',
|
177
187
|
contextWindowTokens: 1024,
|
@@ -183,7 +193,7 @@ describe('parseModelString', () => {
|
|
183
193
|
});
|
184
194
|
|
185
195
|
it('should handle duplicate extension capabilities', () => {
|
186
|
-
const result = parseModelString('model1<1024:vision:vision>');
|
196
|
+
const result = parseModelString('test-provider', 'model1<1024:vision:vision>');
|
187
197
|
expect(result.add[0]).toEqual({
|
188
198
|
id: 'model1',
|
189
199
|
contextWindowTokens: 1024,
|
@@ -195,7 +205,7 @@ describe('parseModelString', () => {
|
|
195
205
|
});
|
196
206
|
|
197
207
|
it('should handle case-sensitive extension capability names', () => {
|
198
|
-
const result = parseModelString('model1<1024:VISION:FC:file>');
|
208
|
+
const result = parseModelString('test-provider', 'model1<1024:VISION:FC:file>');
|
199
209
|
expect(result.add[0]).toEqual({
|
200
210
|
id: 'model1',
|
201
211
|
contextWindowTokens: 1024,
|
@@ -207,7 +217,7 @@ describe('parseModelString', () => {
|
|
207
217
|
});
|
208
218
|
|
209
219
|
it('should handle case-sensitive extension capability values', () => {
|
210
|
-
const result = parseModelString('model1<1024:vision:Fc:File>');
|
220
|
+
const result = parseModelString('test-provider', 'model1<1024:vision:Fc:File>');
|
211
221
|
expect(result.add[0]).toEqual({
|
212
222
|
id: 'model1',
|
213
223
|
contextWindowTokens: 1024,
|
@@ -219,12 +229,12 @@ describe('parseModelString', () => {
|
|
219
229
|
});
|
220
230
|
|
221
231
|
it('should handle empty angle brackets', () => {
|
222
|
-
const result = parseModelString('model1<>');
|
232
|
+
const result = parseModelString('test-provider', 'model1<>');
|
223
233
|
expect(result.add[0]).toEqual({ id: 'model1', abilities: {}, type: 'chat' });
|
224
234
|
});
|
225
235
|
|
226
236
|
it('should handle not close angle brackets', () => {
|
227
|
-
const result = parseModelString('model1<,model2');
|
237
|
+
const result = parseModelString('test-provider', 'model1<,model2');
|
228
238
|
expect(result.add).toEqual([
|
229
239
|
{ id: 'model1', abilities: {}, type: 'chat' },
|
230
240
|
{ id: 'model2', abilities: {}, type: 'chat' },
|
@@ -232,7 +242,7 @@ describe('parseModelString', () => {
|
|
232
242
|
});
|
233
243
|
|
234
244
|
it('should handle multi close angle brackets', () => {
|
235
|
-
const result = parseModelString('model1<>>,model2');
|
245
|
+
const result = parseModelString('test-provider', 'model1<>>,model2');
|
236
246
|
expect(result.add).toEqual([
|
237
247
|
{ id: 'model1', abilities: {}, type: 'chat' },
|
238
248
|
{ id: 'model2', abilities: {}, type: 'chat' },
|
@@ -240,22 +250,22 @@ describe('parseModelString', () => {
|
|
240
250
|
});
|
241
251
|
|
242
252
|
it('should handle only colon inside angle brackets', () => {
|
243
|
-
const result = parseModelString('model1<:>');
|
253
|
+
const result = parseModelString('test-provider', 'model1<:>');
|
244
254
|
expect(result.add[0]).toEqual({ id: 'model1', abilities: {}, type: 'chat' });
|
245
255
|
});
|
246
256
|
|
247
257
|
it('should handle only non-digit characters inside angle brackets', () => {
|
248
|
-
const result = parseModelString('model1<abc>');
|
258
|
+
const result = parseModelString('test-provider', 'model1<abc>');
|
249
259
|
expect(result.add[0]).toEqual({ id: 'model1', abilities: {}, type: 'chat' });
|
250
260
|
});
|
251
261
|
|
252
262
|
it('should handle non-digit characters followed by digits inside angle brackets', () => {
|
253
|
-
const result = parseModelString('model1<abc123>');
|
263
|
+
const result = parseModelString('test-provider', 'model1<abc123>');
|
254
264
|
expect(result.add[0]).toEqual({ id: 'model1', abilities: {}, type: 'chat' });
|
255
265
|
});
|
256
266
|
|
257
267
|
it('should handle digits followed by non-colon characters inside angle brackets', () => {
|
258
|
-
const result = parseModelString('model1<1024abc>');
|
268
|
+
const result = parseModelString('test-provider', 'model1<1024abc>');
|
259
269
|
expect(result.add[0]).toEqual({
|
260
270
|
id: 'model1',
|
261
271
|
contextWindowTokens: 1024,
|
@@ -265,7 +275,7 @@ describe('parseModelString', () => {
|
|
265
275
|
});
|
266
276
|
|
267
277
|
it('should handle digits followed by multiple colons inside angle brackets', () => {
|
268
|
-
const result = parseModelString('model1<1024::>');
|
278
|
+
const result = parseModelString('test-provider', 'model1<1024::>');
|
269
279
|
expect(result.add[0]).toEqual({
|
270
280
|
id: 'model1',
|
271
281
|
contextWindowTokens: 1024,
|
@@ -275,7 +285,7 @@ describe('parseModelString', () => {
|
|
275
285
|
});
|
276
286
|
|
277
287
|
it('should handle digits followed by a colon and non-letter characters inside angle brackets', () => {
|
278
|
-
const result = parseModelString('model1<1024:123>');
|
288
|
+
const result = parseModelString('test-provider', 'model1<1024:123>');
|
279
289
|
expect(result.add[0]).toEqual({
|
280
290
|
id: 'model1',
|
281
291
|
contextWindowTokens: 1024,
|
@@ -285,7 +295,7 @@ describe('parseModelString', () => {
|
|
285
295
|
});
|
286
296
|
|
287
297
|
it('should handle digits followed by a colon and spaces inside angle brackets', () => {
|
288
|
-
const result = parseModelString('model1<1024: vision>');
|
298
|
+
const result = parseModelString('test-provider', 'model1<1024: vision>');
|
289
299
|
expect(result.add[0]).toEqual({
|
290
300
|
id: 'model1',
|
291
301
|
contextWindowTokens: 1024,
|
@@ -295,7 +305,7 @@ describe('parseModelString', () => {
|
|
295
305
|
});
|
296
306
|
|
297
307
|
it('should handle digits followed by multiple colons and spaces inside angle brackets', () => {
|
298
|
-
const result = parseModelString('model1<1024: : vision>');
|
308
|
+
const result = parseModelString('test-provider', 'model1<1024: : vision>');
|
299
309
|
expect(result.add[0]).toEqual({
|
300
310
|
id: 'model1',
|
301
311
|
contextWindowTokens: 1024,
|
@@ -305,9 +315,64 @@ describe('parseModelString', () => {
|
|
305
315
|
});
|
306
316
|
});
|
307
317
|
|
318
|
+
describe('FAL image models', () => {
|
319
|
+
it('should correctly parse FAL image model ids with slash and custom display names', () => {
|
320
|
+
const result = parseModelString(
|
321
|
+
'fal',
|
322
|
+
'-all,+flux-kontext/dev=KontextDev,+flux-pro/kontext=KontextPro,+flux/schnell=Schnell,+imagen4/preview=Imagen4',
|
323
|
+
);
|
324
|
+
expect(result.add).toEqual([
|
325
|
+
{
|
326
|
+
id: 'flux-kontext/dev',
|
327
|
+
displayName: 'KontextDev',
|
328
|
+
abilities: {},
|
329
|
+
type: 'image',
|
330
|
+
},
|
331
|
+
{
|
332
|
+
id: 'flux-pro/kontext',
|
333
|
+
displayName: 'KontextPro',
|
334
|
+
abilities: {},
|
335
|
+
type: 'image',
|
336
|
+
},
|
337
|
+
{
|
338
|
+
id: 'flux/schnell',
|
339
|
+
displayName: 'Schnell',
|
340
|
+
abilities: {},
|
341
|
+
type: 'image',
|
342
|
+
},
|
343
|
+
{
|
344
|
+
id: 'imagen4/preview',
|
345
|
+
displayName: 'Imagen4',
|
346
|
+
abilities: {},
|
347
|
+
type: 'image',
|
348
|
+
},
|
349
|
+
]);
|
350
|
+
expect(result.removeAll).toBe(true);
|
351
|
+
expect(result.removed).toEqual(['all']);
|
352
|
+
});
|
353
|
+
|
354
|
+
it('should correctly parse FAL image model ids with slash (no displayName)', () => {
|
355
|
+
const result = parseModelString('fal', '-all,+flux-kontext/dev,+flux-pro/kontext');
|
356
|
+
expect(result.add).toEqual([
|
357
|
+
{
|
358
|
+
id: 'flux-kontext/dev',
|
359
|
+
abilities: {},
|
360
|
+
type: 'image',
|
361
|
+
},
|
362
|
+
{
|
363
|
+
id: 'flux-pro/kontext',
|
364
|
+
abilities: {},
|
365
|
+
type: 'image',
|
366
|
+
},
|
367
|
+
]);
|
368
|
+
expect(result.removeAll).toBe(true);
|
369
|
+
expect(result.removed).toEqual(['all']);
|
370
|
+
});
|
371
|
+
});
|
372
|
+
|
308
373
|
describe('deployment name', () => {
|
309
374
|
it('should have no deployment name', () => {
|
310
|
-
const result = parseModelString('model1=Model 1', true);
|
375
|
+
const result = parseModelString('test-provider', 'model1=Model 1', true);
|
311
376
|
expect(result.add[0]).toEqual({
|
312
377
|
id: 'model1',
|
313
378
|
displayName: 'Model 1',
|
@@ -317,7 +382,7 @@ describe('parseModelString', () => {
|
|
317
382
|
});
|
318
383
|
|
319
384
|
it('should have diff deployment name as id', () => {
|
320
|
-
const result = parseModelString('gpt-35-turbo->my-deploy=GPT 3.5 Turbo', true);
|
385
|
+
const result = parseModelString('azure', 'gpt-35-turbo->my-deploy=GPT 3.5 Turbo', true);
|
321
386
|
expect(result.add[0]).toEqual({
|
322
387
|
id: 'gpt-35-turbo',
|
323
388
|
displayName: 'GPT 3.5 Turbo',
|
@@ -331,6 +396,7 @@ describe('parseModelString', () => {
|
|
331
396
|
|
332
397
|
it('should handle with multi deployName', () => {
|
333
398
|
const result = parseModelString(
|
399
|
+
'azure',
|
334
400
|
'gpt-4o->id1=GPT-4o,gpt-4o-mini->id2=gpt-4o-mini,o1-mini->id3=O1 mini',
|
335
401
|
true,
|
336
402
|
);
|
@@ -361,6 +427,42 @@ describe('parseModelString', () => {
|
|
361
427
|
});
|
362
428
|
});
|
363
429
|
|
430
|
+
describe('extractEnabledModels', () => {
|
431
|
+
it('should return undefined when no models are added', () => {
|
432
|
+
const result = extractEnabledModels('test-provider', '-all');
|
433
|
+
expect(result).toBeUndefined();
|
434
|
+
});
|
435
|
+
|
436
|
+
it('should return undefined when modelString is empty', () => {
|
437
|
+
const result = extractEnabledModels('test-provider', '');
|
438
|
+
expect(result).toBeUndefined();
|
439
|
+
});
|
440
|
+
|
441
|
+
it('should return array of model IDs when models are added', () => {
|
442
|
+
const result = extractEnabledModels('test-provider', '+model1,+model2,+model3');
|
443
|
+
expect(result).toEqual(['model1', 'model2', 'model3']);
|
444
|
+
});
|
445
|
+
|
446
|
+
it('should handle mixed add/remove operations and return only added models', () => {
|
447
|
+
const result = extractEnabledModels('test-provider', '+model1,-model2,+model3');
|
448
|
+
expect(result).toEqual(['model1', 'model3']);
|
449
|
+
});
|
450
|
+
|
451
|
+
it('should handle deployment names when withDeploymentName is true', () => {
|
452
|
+
const result = extractEnabledModels(
|
453
|
+
'azure',
|
454
|
+
'+gpt-4->deployment1,+gpt-35-turbo->deployment2',
|
455
|
+
true,
|
456
|
+
);
|
457
|
+
expect(result).toEqual(['gpt-4', 'gpt-35-turbo']);
|
458
|
+
});
|
459
|
+
|
460
|
+
it('should handle complex model strings with custom names', () => {
|
461
|
+
const result = extractEnabledModels('openai', '+gpt-4=Custom GPT-4,+claude-2=Custom Claude');
|
462
|
+
expect(result).toEqual(['gpt-4', 'claude-2']);
|
463
|
+
});
|
464
|
+
});
|
465
|
+
|
364
466
|
describe('transformToChatModelCards', () => {
|
365
467
|
const defaultChatModels: AiFullModelCard[] = [
|
366
468
|
{ id: 'model1', displayName: 'Model 1', enabled: true, type: 'chat' },
|
@@ -368,27 +470,27 @@ describe('transformToChatModelCards', () => {
|
|
368
470
|
];
|
369
471
|
|
370
472
|
it('should return undefined when modelString is empty', () => {
|
371
|
-
const result =
|
473
|
+
const result = transformToAiModelList({
|
372
474
|
modelString: '',
|
373
|
-
defaultChatModels,
|
475
|
+
defaultModels: defaultChatModels,
|
374
476
|
providerId: 'openai',
|
375
477
|
});
|
376
478
|
expect(result).toBeUndefined();
|
377
479
|
});
|
378
480
|
|
379
481
|
it('should remove all models when removeAll is true', () => {
|
380
|
-
const result =
|
482
|
+
const result = transformToAiModelList({
|
381
483
|
modelString: '-all',
|
382
|
-
defaultChatModels,
|
484
|
+
defaultModels: defaultChatModels,
|
383
485
|
providerId: 'openai',
|
384
486
|
});
|
385
487
|
expect(result).toEqual([]);
|
386
488
|
});
|
387
489
|
|
388
490
|
it('should remove specified models', () => {
|
389
|
-
const result =
|
491
|
+
const result = transformToAiModelList({
|
390
492
|
modelString: '-model1',
|
391
|
-
defaultChatModels,
|
493
|
+
defaultModels: defaultChatModels,
|
392
494
|
providerId: 'openai',
|
393
495
|
});
|
394
496
|
expect(result).toEqual([
|
@@ -398,9 +500,9 @@ describe('transformToChatModelCards', () => {
|
|
398
500
|
|
399
501
|
it('should add a new known model', () => {
|
400
502
|
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => m.providerId === 'ai21')!;
|
401
|
-
const result =
|
503
|
+
const result = transformToAiModelList({
|
402
504
|
modelString: `${knownModel.id}`,
|
403
|
-
defaultChatModels,
|
505
|
+
defaultModels: defaultChatModels,
|
404
506
|
providerId: 'ai21',
|
405
507
|
});
|
406
508
|
|
@@ -413,9 +515,9 @@ describe('transformToChatModelCards', () => {
|
|
413
515
|
|
414
516
|
it('should update an existing known model', () => {
|
415
517
|
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => m.providerId === 'openai')!;
|
416
|
-
const result =
|
518
|
+
const result = transformToAiModelList({
|
417
519
|
modelString: `+${knownModel.id}=Updated Model`,
|
418
|
-
|
520
|
+
defaultModels: [knownModel],
|
419
521
|
providerId: 'openai',
|
420
522
|
});
|
421
523
|
|
@@ -427,9 +529,9 @@ describe('transformToChatModelCards', () => {
|
|
427
529
|
});
|
428
530
|
|
429
531
|
it('should add a new custom model', () => {
|
430
|
-
const result =
|
532
|
+
const result = transformToAiModelList({
|
431
533
|
modelString: '+custom_model=Custom Model',
|
432
|
-
defaultChatModels,
|
534
|
+
defaultModels: defaultChatModels,
|
433
535
|
providerId: 'openai',
|
434
536
|
});
|
435
537
|
expect(result).toContainEqual({
|
@@ -442,10 +544,10 @@ describe('transformToChatModelCards', () => {
|
|
442
544
|
});
|
443
545
|
|
444
546
|
it('should have file with builtin models like gpt-4-0125-preview', () => {
|
445
|
-
const result =
|
547
|
+
const result = transformToAiModelList({
|
446
548
|
modelString:
|
447
549
|
'-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
|
448
|
-
|
550
|
+
defaultModels: openaiChatModels,
|
449
551
|
providerId: 'openai',
|
450
552
|
});
|
451
553
|
|
@@ -457,9 +559,9 @@ describe('transformToChatModelCards', () => {
|
|
457
559
|
(m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
|
458
560
|
);
|
459
561
|
const defaultChatModels: AiFullModelCard[] = [];
|
460
|
-
const result =
|
562
|
+
const result = transformToAiModelList({
|
461
563
|
modelString: '+deepseek-r1',
|
462
|
-
defaultChatModels,
|
564
|
+
defaultModels: defaultChatModels,
|
463
565
|
providerId: 'volcengine',
|
464
566
|
withDeploymentName: true,
|
465
567
|
});
|
@@ -474,9 +576,9 @@ describe('transformToChatModelCards', () => {
|
|
474
576
|
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
475
577
|
(m) => m.id === 'deepseek-r1' && m.providerId === 'volcengine',
|
476
578
|
);
|
477
|
-
const result =
|
579
|
+
const result = transformToAiModelList({
|
478
580
|
modelString: `+deepseek-r1->my-custom-deploy`,
|
479
|
-
defaultChatModels,
|
581
|
+
defaultModels: defaultChatModels,
|
480
582
|
providerId: 'volcengine',
|
481
583
|
withDeploymentName: true,
|
482
584
|
});
|
@@ -489,9 +591,9 @@ describe('transformToChatModelCards', () => {
|
|
489
591
|
|
490
592
|
it('should set both id and deploymentName to the full string when no -> is used and withDeploymentName is true', () => {
|
491
593
|
const defaultChatModels: AiFullModelCard[] = [];
|
492
|
-
const result =
|
594
|
+
const result = transformToAiModelList({
|
493
595
|
modelString: `+my_model`,
|
494
|
-
defaultChatModels,
|
596
|
+
defaultModels: defaultChatModels,
|
495
597
|
providerId: 'volcengine',
|
496
598
|
withDeploymentName: true,
|
497
599
|
});
|
@@ -602,9 +704,9 @@ describe('transformToChatModelCards', () => {
|
|
602
704
|
const modelString =
|
603
705
|
'-all,gpt-4o->id1=GPT-4o,gpt-4o-mini->id2=GPT 4o Mini,o1-mini->id3=OpenAI o1-mini';
|
604
706
|
|
605
|
-
const data =
|
707
|
+
const data = transformToAiModelList({
|
606
708
|
modelString,
|
607
|
-
defaultChatModels,
|
709
|
+
defaultModels: defaultChatModels,
|
608
710
|
providerId: 'azure',
|
609
711
|
withDeploymentName: true,
|
610
712
|
});
|
package/src/utils/parseModels.ts
CHANGED
@@ -1,13 +1,18 @@
|
|
1
1
|
import { produce } from 'immer';
|
2
2
|
|
3
3
|
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
|
4
|
-
import { AiFullModelCard } from '@/types/aiModel';
|
4
|
+
import { AiFullModelCard, AiModelType } from '@/types/aiModel';
|
5
|
+
import { getModelPropertyWithFallback } from '@/utils/getFallbackModelProperty';
|
5
6
|
import { merge } from '@/utils/merge';
|
6
7
|
|
7
8
|
/**
|
8
9
|
* Parse model string to add or remove models.
|
9
10
|
*/
|
10
|
-
export const parseModelString = (
|
11
|
+
export const parseModelString = (
|
12
|
+
providerId: string,
|
13
|
+
modelString: string = '',
|
14
|
+
withDeploymentName = false,
|
15
|
+
) => {
|
11
16
|
let models: AiFullModelCard[] = [];
|
12
17
|
let removeAll = false;
|
13
18
|
const removedModels: string[] = [];
|
@@ -46,12 +51,18 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
|
|
46
51
|
models.splice(existingIndex, 1);
|
47
52
|
}
|
48
53
|
|
54
|
+
// Use new type lookup function, prioritizing same provider first, then fallback to other providers
|
55
|
+
const modelType: AiModelType = getModelPropertyWithFallback<AiModelType>(
|
56
|
+
id,
|
57
|
+
'type',
|
58
|
+
providerId,
|
59
|
+
);
|
60
|
+
|
49
61
|
const model: AiFullModelCard = {
|
50
62
|
abilities: {},
|
51
63
|
displayName: displayName || undefined,
|
52
64
|
id,
|
53
|
-
|
54
|
-
type: 'chat',
|
65
|
+
type: modelType,
|
55
66
|
};
|
56
67
|
|
57
68
|
if (deploymentName) {
|
@@ -108,21 +119,21 @@ export const parseModelString = (modelString: string = '', withDeploymentName =
|
|
108
119
|
/**
|
109
120
|
* Extract a special method to process chatModels
|
110
121
|
*/
|
111
|
-
export const
|
122
|
+
export const transformToAiModelList = ({
|
112
123
|
modelString = '',
|
113
|
-
|
124
|
+
defaultModels,
|
114
125
|
providerId,
|
115
126
|
withDeploymentName = false,
|
116
127
|
}: {
|
117
|
-
|
128
|
+
defaultModels: AiFullModelCard[];
|
118
129
|
modelString?: string;
|
119
130
|
providerId: string;
|
120
131
|
withDeploymentName?: boolean;
|
121
132
|
}): AiFullModelCard[] | undefined => {
|
122
133
|
if (!modelString) return undefined;
|
123
134
|
|
124
|
-
const modelConfig = parseModelString(modelString, withDeploymentName);
|
125
|
-
let chatModels = modelConfig.removeAll ? [] :
|
135
|
+
const modelConfig = parseModelString(providerId, modelString, withDeploymentName);
|
136
|
+
let chatModels = modelConfig.removeAll ? [] : defaultModels;
|
126
137
|
|
127
138
|
// 处理移除逻辑
|
128
139
|
if (!modelConfig.removeAll) {
|
@@ -182,8 +193,12 @@ export const transformToAiChatModelList = ({
|
|
182
193
|
});
|
183
194
|
};
|
184
195
|
|
185
|
-
export const extractEnabledModels = (
|
186
|
-
|
196
|
+
export const extractEnabledModels = (
|
197
|
+
providerId: string,
|
198
|
+
modelString: string = '',
|
199
|
+
withDeploymentName = false,
|
200
|
+
) => {
|
201
|
+
const modelConfig = parseModelString(providerId, modelString, withDeploymentName);
|
187
202
|
const list = modelConfig.add.map((m) => m.id);
|
188
203
|
|
189
204
|
if (list.length === 0) return;
|