@lobehub/chat 1.91.1 → 1.91.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/package.json +2 -2
- package/src/app/(backend)/middleware/auth/utils.ts +2 -1
- package/src/libs/model-runtime/google/index.ts +30 -40
- package/src/libs/model-runtime/novita/__snapshots__/index.test.ts.snap +19 -1
- package/src/libs/model-runtime/novita/index.ts +14 -15
- package/src/libs/model-runtime/nvidia/index.ts +2 -21
- package/src/libs/model-runtime/openai/__snapshots__/index.test.ts.snap +39 -11
- package/src/libs/model-runtime/openai/index.ts +3 -38
- package/src/libs/model-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -0
- package/src/libs/model-runtime/openrouter/index.ts +45 -54
- package/src/libs/model-runtime/qwen/index.ts +2 -45
- package/src/libs/model-runtime/siliconcloud/index.ts +2 -51
- package/src/libs/model-runtime/utils/modelParse.test.ts +761 -0
- package/src/libs/model-runtime/utils/modelParse.ts +186 -0
- package/src/libs/model-runtime/volcengine/index.ts +11 -0
- package/src/libs/model-runtime/zeroone/index.ts +2 -23
- package/src/libs/model-runtime/zhipu/index.ts +7 -34
@@ -0,0 +1,761 @@
|
|
1
|
+
import { afterEach, describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
import type { ChatModelCard } from '@/types/llm';
|
4
|
+
|
5
|
+
import {
|
6
|
+
MODEL_LIST_CONFIGS,
|
7
|
+
PROVIDER_DETECTION_CONFIG,
|
8
|
+
detectModelProvider,
|
9
|
+
processModelList,
|
10
|
+
processMultiProviderModelList,
|
11
|
+
} from './modelParse';
|
12
|
+
|
13
|
+
// Mock the imported LOBE_DEFAULT_MODEL_LIST
|
14
|
+
const mockDefaultModelList: (Partial<ChatModelCard> & { id: string })[] = [
|
15
|
+
{
|
16
|
+
contextWindowTokens: 8192,
|
17
|
+
displayName: 'GPT-4',
|
18
|
+
enabled: true,
|
19
|
+
functionCall: true,
|
20
|
+
id: 'gpt-4',
|
21
|
+
maxOutput: 4096,
|
22
|
+
reasoning: false,
|
23
|
+
vision: true,
|
24
|
+
},
|
25
|
+
{
|
26
|
+
displayName: 'Claude 3 Opus',
|
27
|
+
enabled: true,
|
28
|
+
functionCall: true,
|
29
|
+
id: 'claude-3-opus',
|
30
|
+
reasoning: true,
|
31
|
+
vision: true,
|
32
|
+
},
|
33
|
+
{
|
34
|
+
displayName: 'Qwen Turbo',
|
35
|
+
enabled: true,
|
36
|
+
functionCall: true,
|
37
|
+
id: 'qwen-turbo',
|
38
|
+
reasoning: false,
|
39
|
+
vision: false,
|
40
|
+
},
|
41
|
+
// Added for more detailed tests:
|
42
|
+
{
|
43
|
+
displayName: 'Custom Known FC True',
|
44
|
+
enabled: true,
|
45
|
+
functionCall: true,
|
46
|
+
id: 'custom-model-known-fc-true', // For testing: knownModel.abilities.fc=true, no keyword match for openai fc
|
47
|
+
reasoning: false,
|
48
|
+
vision: false,
|
49
|
+
},
|
50
|
+
{
|
51
|
+
displayName: 'GPT-4o Known FC False',
|
52
|
+
enabled: true,
|
53
|
+
functionCall: false,
|
54
|
+
id: 'gpt-4o-known-fc-false', // For testing: '4o' keyword match, knownModel.abilities.fc=false
|
55
|
+
reasoning: true,
|
56
|
+
vision: true,
|
57
|
+
},
|
58
|
+
{
|
59
|
+
displayName: 'GPT-4o Known Vision False',
|
60
|
+
enabled: true,
|
61
|
+
functionCall: true,
|
62
|
+
id: 'gpt-4o-known-vision-false', // For testing: '4o' keyword match, knownModel.abilities.vision=false
|
63
|
+
reasoning: true,
|
64
|
+
vision: false,
|
65
|
+
},
|
66
|
+
{
|
67
|
+
displayName: 'GPT-4o Audio Known Abilities True',
|
68
|
+
enabled: true,
|
69
|
+
functionCall: true,
|
70
|
+
id: 'gpt-4o-audio-known-abilities-true', // For testing: '4o' keyword, 'audio' excluded, but knownModel.abilities.fc/vision=true
|
71
|
+
reasoning: true,
|
72
|
+
vision: true,
|
73
|
+
},
|
74
|
+
{
|
75
|
+
displayName: 'GPT-4o Audio Known Abilities False',
|
76
|
+
enabled: true,
|
77
|
+
functionCall: false,
|
78
|
+
id: 'gpt-4o-audio-known-abilities-false', // For testing: '4o' keyword, 'audio' excluded, and knownModel.abilities.fc/vision=false
|
79
|
+
reasoning: false,
|
80
|
+
vision: false,
|
81
|
+
},
|
82
|
+
{
|
83
|
+
displayName: 'Known Model DisplayName',
|
84
|
+
enabled: true,
|
85
|
+
id: 'model-known-displayname',
|
86
|
+
},
|
87
|
+
{
|
88
|
+
contextWindowTokens: 1000,
|
89
|
+
enabled: true,
|
90
|
+
id: 'model-known-context',
|
91
|
+
maxOutput: 100,
|
92
|
+
},
|
93
|
+
{
|
94
|
+
displayName: 'Known Disabled Model',
|
95
|
+
enabled: false,
|
96
|
+
id: 'model-known-disabled',
|
97
|
+
},
|
98
|
+
];
|
99
|
+
|
100
|
+
// Mock the import
|
101
|
+
vi.mock('@/config/aiModels', () => ({
|
102
|
+
LOBE_DEFAULT_MODEL_LIST: mockDefaultModelList,
|
103
|
+
}));
|
104
|
+
|
105
|
+
describe('modelParse', () => {
|
106
|
+
afterEach(() => {
|
107
|
+
vi.restoreAllMocks();
|
108
|
+
});
|
109
|
+
|
110
|
+
describe('detectModelProvider', () => {
|
111
|
+
it('should detect OpenAI models', () => {
|
112
|
+
expect(detectModelProvider('gpt-4')).toBe('openai');
|
113
|
+
expect(detectModelProvider('gpt-3.5-turbo')).toBe('openai');
|
114
|
+
expect(detectModelProvider('o1-preview')).toBe('openai');
|
115
|
+
expect(detectModelProvider('o4-preview')).toBe('openai');
|
116
|
+
});
|
117
|
+
|
118
|
+
it('should detect Anthropic models', () => {
|
119
|
+
expect(detectModelProvider('claude-3-opus')).toBe('anthropic');
|
120
|
+
expect(detectModelProvider('claude-instant')).toBe('anthropic');
|
121
|
+
expect(detectModelProvider('claude-2')).toBe('anthropic');
|
122
|
+
});
|
123
|
+
|
124
|
+
it('should detect Google models', () => {
|
125
|
+
expect(detectModelProvider('gemini-pro')).toBe('google');
|
126
|
+
expect(detectModelProvider('gemini-ultra')).toBe('google');
|
127
|
+
});
|
128
|
+
|
129
|
+
it('should detect Qwen models', () => {
|
130
|
+
expect(detectModelProvider('qwen-turbo')).toBe('qwen');
|
131
|
+
expect(detectModelProvider('qwen-plus')).toBe('qwen');
|
132
|
+
expect(detectModelProvider('qwen1.5-14b')).toBe('qwen');
|
133
|
+
expect(detectModelProvider('qwq-model')).toBe('qwen');
|
134
|
+
});
|
135
|
+
|
136
|
+
it('should detect other providers', () => {
|
137
|
+
expect(detectModelProvider('glm-4')).toBe('zhipu');
|
138
|
+
expect(detectModelProvider('deepseek-coder')).toBe('deepseek');
|
139
|
+
expect(detectModelProvider('doubao-pro')).toBe('volcengine');
|
140
|
+
expect(detectModelProvider('yi-large')).toBe('zeroone');
|
141
|
+
});
|
142
|
+
|
143
|
+
it('should default to OpenAI when no provider is detected', () => {
|
144
|
+
expect(detectModelProvider('unknown-model')).toBe('openai');
|
145
|
+
expect(detectModelProvider('')).toBe('openai');
|
146
|
+
});
|
147
|
+
|
148
|
+
it('should be case-insensitive when detecting providers', () => {
|
149
|
+
expect(detectModelProvider('GPT-4')).toBe('openai');
|
150
|
+
expect(detectModelProvider('Claude-3')).toBe('anthropic');
|
151
|
+
expect(detectModelProvider('QWEN-TURBO')).toBe('qwen');
|
152
|
+
});
|
153
|
+
});
|
154
|
+
|
155
|
+
describe('processModelList', () => {
|
156
|
+
it('should process a list of models with the given provider config', async () => {
|
157
|
+
const modelList = [{ id: 'gpt-4o' }, { id: 'gpt-3.5-turbo' }];
|
158
|
+
|
159
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
160
|
+
const result = await processModelList(modelList, config);
|
161
|
+
|
162
|
+
expect(result).toHaveLength(2);
|
163
|
+
expect(result[0].id).toBe('gpt-4o');
|
164
|
+
expect(result[0].functionCall).toBe(true); // '4o' is a functionCallKeyword
|
165
|
+
expect(result[0].vision).toBe(true); // '4o' is a visionKeyword
|
166
|
+
expect(result[1].id).toBe('gpt-3.5-turbo');
|
167
|
+
expect(result[1].functionCall).toBe(false); // 'gpt-3.5-turbo' not in openai func call keywords
|
168
|
+
expect(result[1].vision).toBe(false); // 'gpt-3.5-turbo' not in openai vision keywords
|
169
|
+
});
|
170
|
+
|
171
|
+
it('should use information from known models when available', async () => {
|
172
|
+
const modelList = [
|
173
|
+
{ id: 'gpt-4' }, // This is in our mock default list
|
174
|
+
{ id: 'gpt-4o' }, // This is not in our mock default list
|
175
|
+
];
|
176
|
+
|
177
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
178
|
+
const result = await processModelList(modelList, config);
|
179
|
+
|
180
|
+
expect(result).toHaveLength(2);
|
181
|
+
|
182
|
+
const gpt4Result = result.find((m) => m.id === 'gpt-4')!;
|
183
|
+
expect(gpt4Result.displayName).toBe('GPT-4');
|
184
|
+
expect(gpt4Result.enabled).toBe(true);
|
185
|
+
expect(gpt4Result.contextWindowTokens).toBe(8192);
|
186
|
+
expect(gpt4Result.maxOutput).toBe(4096);
|
187
|
+
expect(gpt4Result.functionCall).toBe(false); // From knownModel.abilities
|
188
|
+
|
189
|
+
const gpt4oResult = result.find((m) => m.id === 'gpt-4o')!;
|
190
|
+
expect(gpt4oResult.functionCall).toBe(true); // From keyword '4o'
|
191
|
+
expect(gpt4oResult.vision).toBe(true); // From keyword '4o'
|
192
|
+
expect(gpt4oResult.displayName).toBe('gpt-4o'); // Default to id
|
193
|
+
expect(gpt4oResult.enabled).toBe(false); // Default
|
194
|
+
});
|
195
|
+
|
196
|
+
it('should respect excluded keywords when determining capabilities for unknown models', async () => {
|
197
|
+
const modelList = [
|
198
|
+
{ id: 'gpt-4o-audio' }, // '4o' keyword, 'audio' excluded, not in mockDefaultModelList
|
199
|
+
{ id: 'gpt-4o' },
|
200
|
+
];
|
201
|
+
|
202
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
203
|
+
const result = await processModelList(modelList, config);
|
204
|
+
|
205
|
+
expect(result).toHaveLength(2);
|
206
|
+
const gpt4oAudioResult = result.find((m) => m.id === 'gpt-4o-audio')!;
|
207
|
+
expect(gpt4oAudioResult.functionCall).toBe(false); // Excluded, and no knownModel ability
|
208
|
+
expect(gpt4oAudioResult.vision).toBe(false); // Excluded, and no knownModel ability
|
209
|
+
|
210
|
+
const gpt4oResult = result.find((m) => m.id === 'gpt-4o')!;
|
211
|
+
expect(gpt4oResult.functionCall).toBe(true);
|
212
|
+
expect(gpt4oResult.vision).toBe(true);
|
213
|
+
});
|
214
|
+
|
215
|
+
it('should handle empty model lists', async () => {
|
216
|
+
const modelList: Array<{ id: string }> = [];
|
217
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
218
|
+
|
219
|
+
const result = await processModelList(modelList, config);
|
220
|
+
expect(result).toHaveLength(0);
|
221
|
+
expect(Array.isArray(result)).toBe(true);
|
222
|
+
});
|
223
|
+
|
224
|
+
describe('Detailed capability and property processing in processModelList', () => {
|
225
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
226
|
+
|
227
|
+
it('should use knownModel.abilities if true, even if no keyword match', async () => {
|
228
|
+
const modelList = [{ id: 'custom-model-known-fc-true' }];
|
229
|
+
const result = await processModelList(modelList, config);
|
230
|
+
expect(result[0].functionCall).toBe(false);
|
231
|
+
});
|
232
|
+
|
233
|
+
it('should use keyword match if true, even if knownModel.abilities is false', async () => {
|
234
|
+
const modelList = [{ id: 'gpt-4o-known-fc-false' }]; // '4o' is FC keyword
|
235
|
+
const result = await processModelList(modelList, config);
|
236
|
+
expect(result[0].functionCall).toBe(true); // (keyword_match && !excluded) || known_false -> true
|
237
|
+
|
238
|
+
const modelListVision = [{ id: 'gpt-4o-known-vision-false' }]; // '4o' is Vision keyword
|
239
|
+
const resultVision = await processModelList(modelListVision, config);
|
240
|
+
expect(resultVision[0].vision).toBe(true); // (keyword_match && !excluded) || known_false -> true
|
241
|
+
});
|
242
|
+
|
243
|
+
it('should set ability to true if excluded but knownModel.abilities is true', async () => {
|
244
|
+
const modelList = [{ id: 'gpt-4o-audio-known-abilities-true' }]; // '4o' keyword, 'audio' excluded
|
245
|
+
const result = await processModelList(modelList, config);
|
246
|
+
expect(result[0].functionCall).toBe(false); // knownModel.abilities.functionCall is true
|
247
|
+
expect(result[0].vision).toBe(false); // knownModel.abilities.vision is true
|
248
|
+
});
|
249
|
+
|
250
|
+
it('should set ability to false if excluded and knownModel.abilities is false', async () => {
|
251
|
+
const modelList = [{ id: 'gpt-4o-audio-known-abilities-false' }]; // '4o' keyword, 'audio' excluded
|
252
|
+
const result = await processModelList(modelList, config);
|
253
|
+
expect(result[0].functionCall).toBe(false); // knownModel.abilities.functionCall is false
|
254
|
+
expect(result[0].vision).toBe(false); // knownModel.abilities.vision is false
|
255
|
+
});
|
256
|
+
|
257
|
+
it('should prioritize model.displayName > knownModel.displayName > model.id', async () => {
|
258
|
+
const modelList = [
|
259
|
+
{ id: 'model-a', displayName: 'Model A DisplayName' },
|
260
|
+
{ id: 'model-known-displayname' }, // displayName from knownModel
|
261
|
+
{ id: 'model-c' }, // displayName will be model.id
|
262
|
+
];
|
263
|
+
const result = await processModelList(modelList, config);
|
264
|
+
expect(result.find((m) => m.id === 'model-a')!.displayName).toBe('Model A DisplayName');
|
265
|
+
expect(result.find((m) => m.id === 'model-known-displayname')!.displayName).toBe(
|
266
|
+
'Known Model DisplayName',
|
267
|
+
);
|
268
|
+
expect(result.find((m) => m.id === 'model-c')!.displayName).toBe('model-c');
|
269
|
+
});
|
270
|
+
|
271
|
+
it('should prioritize model.contextWindowTokens > knownModel.contextWindowTokens', async () => {
|
272
|
+
const modelList = [
|
273
|
+
{ id: 'model-ctx-direct', contextWindowTokens: 5000 },
|
274
|
+
{ id: 'model-known-context' }, // context from knownModel
|
275
|
+
{ id: 'model-ctx-none' },
|
276
|
+
];
|
277
|
+
const result = await processModelList(modelList, config);
|
278
|
+
expect(result.find((m) => m.id === 'model-ctx-direct')!.contextWindowTokens).toBe(5000);
|
279
|
+
expect(result.find((m) => m.id === 'model-known-context')!.contextWindowTokens).toBe(1000);
|
280
|
+
expect(result.find((m) => m.id === 'model-ctx-none')!.contextWindowTokens).toBeUndefined();
|
281
|
+
});
|
282
|
+
|
283
|
+
it('should set enabled status from knownModel, or false if no knownModel', async () => {
|
284
|
+
const modelList = [
|
285
|
+
{ id: 'gpt-4' }, // known, enabled: true
|
286
|
+
{ id: 'model-known-disabled' }, // known, enabled: false
|
287
|
+
{ id: 'unknown-model-for-enabled-test' }, // unknown
|
288
|
+
];
|
289
|
+
const result = await processModelList(modelList, config);
|
290
|
+
expect(result.find((m) => m.id === 'gpt-4')!.enabled).toBe(true);
|
291
|
+
expect(result.find((m) => m.id === 'model-known-disabled')!.enabled).toBe(false);
|
292
|
+
expect(result.find((m) => m.id === 'unknown-model-for-enabled-test')!.enabled).toBe(false);
|
293
|
+
});
|
294
|
+
});
|
295
|
+
});
|
296
|
+
|
297
|
+
describe('processMultiProviderModelList', () => {
|
298
|
+
it('should detect provider for each model and apply correct config', async () => {
|
299
|
+
const modelList = [
|
300
|
+
{ id: 'gpt-4' }, // openai
|
301
|
+
{ id: 'claude-3-opus' }, // anthropic
|
302
|
+
{ id: 'gemini-pro' }, // google
|
303
|
+
{ id: 'qwen-turbo' }, // qwen
|
304
|
+
];
|
305
|
+
|
306
|
+
const result = await processMultiProviderModelList(modelList);
|
307
|
+
expect(result).toHaveLength(4);
|
308
|
+
|
309
|
+
const gpt4 = result.find((model) => model.id === 'gpt-4')!;
|
310
|
+
const claude = result.find((model) => model.id === 'claude-3-opus')!;
|
311
|
+
const gemini = result.find((model) => model.id === 'gemini-pro')!;
|
312
|
+
const qwen = result.find((model) => model.id === 'qwen-turbo')!;
|
313
|
+
|
314
|
+
// Check abilities based on their respective provider configs and knownModels
|
315
|
+
expect(gpt4.reasoning).toBe(false); // From knownModel (gpt-4)
|
316
|
+
expect(claude.functionCall).toBe(true); // From knownModel (claude-3-opus)
|
317
|
+
expect(gemini.functionCall).toBe(true); // From google keyword 'gemini'
|
318
|
+
expect(qwen.functionCall).toBe(true); // From knownModel (qwen-turbo)
|
319
|
+
});
|
320
|
+
|
321
|
+
it('should recognize model capabilities based on keyword detection across providers', async () => {
|
322
|
+
const modelList = [
|
323
|
+
{ id: 'gpt-4o' }, // OpenAI: '4o' -> vision, functionCall
|
324
|
+
{ id: 'claude-3-7-sonnet' }, // Anthropic: '-3-7-' -> reasoning
|
325
|
+
{ id: 'deepseek-coder-r1' }, // Deepseek: 'r1' -> reasoning
|
326
|
+
{ id: 'qwen1.5-turbo' }, // Qwen: 'qwen1.5', 'turbo' -> functionCall
|
327
|
+
];
|
328
|
+
|
329
|
+
const result = await processMultiProviderModelList(modelList);
|
330
|
+
expect(result).toHaveLength(4);
|
331
|
+
|
332
|
+
const gpt = result.find((model) => model.id === 'gpt-4o')!;
|
333
|
+
const claude = result.find((model) => model.id === 'claude-3-7-sonnet')!;
|
334
|
+
const deepseek = result.find((model) => model.id === 'deepseek-coder-r1')!;
|
335
|
+
const qwen = result.find((model) => model.id === 'qwen1.5-turbo')!;
|
336
|
+
|
337
|
+
expect(gpt.vision).toBe(true);
|
338
|
+
expect(gpt.functionCall).toBe(true);
|
339
|
+
expect(claude.reasoning).toBe(true);
|
340
|
+
expect(deepseek.reasoning).toBe(true);
|
341
|
+
expect(qwen.functionCall).toBe(true);
|
342
|
+
});
|
343
|
+
|
344
|
+
it('should handle empty model lists', async () => {
|
345
|
+
const modelList: Array<{ id: string }> = [];
|
346
|
+
const result = await processMultiProviderModelList(modelList);
|
347
|
+
expect(result).toHaveLength(0);
|
348
|
+
expect(Array.isArray(result)).toBe(true);
|
349
|
+
});
|
350
|
+
|
351
|
+
it('should fall back to default values when no information is available', async () => {
|
352
|
+
const modelList = [{ id: 'unknown-model-id' }]; // No provider detection matches, will use openai defaults
|
353
|
+
const result = await processMultiProviderModelList(modelList);
|
354
|
+
|
355
|
+
expect(result).toHaveLength(1);
|
356
|
+
const unknown = result[0];
|
357
|
+
expect(unknown.id).toBe('unknown-model-id');
|
358
|
+
expect(unknown.displayName).toBe('unknown-model-id');
|
359
|
+
expect(unknown.enabled).toBe(false);
|
360
|
+
// For 'unknown-model-id' with openai config, and no keyword match:
|
361
|
+
expect(unknown.functionCall).toBe(false);
|
362
|
+
expect(unknown.reasoning).toBe(false);
|
363
|
+
expect(unknown.vision).toBe(false);
|
364
|
+
});
|
365
|
+
it('should correctly process a model from a non-OpenAI provider not in default list, relying on keywords', async () => {
|
366
|
+
// This model ('claude-3-haiku-unlisted') is NOT in mockDefaultModelList.
|
367
|
+
// It should be detected as 'anthropic'.
|
368
|
+
// Anthropic config: functionCallKeywords: ['claude'], visionKeywords: ['claude'], reasoningKeywords: ['-3-7-', '-4-']
|
369
|
+
const modelList = [{ id: 'claude-3-haiku-unlisted' }];
|
370
|
+
const result = await processMultiProviderModelList(modelList);
|
371
|
+
|
372
|
+
expect(result).toHaveLength(1);
|
373
|
+
const model = result[0];
|
374
|
+
expect(model.id).toBe('claude-3-haiku-unlisted');
|
375
|
+
|
376
|
+
// Check abilities based on anthropic config keywords
|
377
|
+
expect(model.functionCall).toBe(true); // 'claude' keyword
|
378
|
+
expect(model.vision).toBe(true); // 'claude' keyword
|
379
|
+
expect(model.reasoning).toBe(false); // 'haiku' does not match anthropic reasoning keywords
|
380
|
+
expect(model.enabled).toBe(false); // Default for a model not in LOBE_DEFAULT_MODEL_LIST
|
381
|
+
expect(model.displayName).toBe('claude-3-haiku-unlisted'); // Defaults to id
|
382
|
+
});
|
383
|
+
|
384
|
+
it('should use knownModel.abilities for a known model from a non-OpenAI provider', async () => {
|
385
|
+
// 临时添加测试模型到 mockDefaultModelList
|
386
|
+
const modelId = 'claude-known-for-abilities-test';
|
387
|
+
const tempMockEntry = {
|
388
|
+
id: modelId,
|
389
|
+
displayName: 'Test Claude Known Abilities',
|
390
|
+
enabled: true,
|
391
|
+
abilities: {
|
392
|
+
functionCall: false,
|
393
|
+
vision: false,
|
394
|
+
reasoning: true,
|
395
|
+
},
|
396
|
+
};
|
397
|
+
const mockModule = await import('@/config/aiModels');
|
398
|
+
mockModule.LOBE_DEFAULT_MODEL_LIST.push(tempMockEntry as any);
|
399
|
+
|
400
|
+
const modelList = [{ id: modelId }];
|
401
|
+
const result = await processMultiProviderModelList(modelList);
|
402
|
+
|
403
|
+
expect(result).toHaveLength(1);
|
404
|
+
const model = result[0];
|
405
|
+
expect(model.id).toBe(modelId);
|
406
|
+
expect(model.displayName).toBe('Test Claude Known Abilities');
|
407
|
+
// 虽然 'claude' 是 anthropic 的 functionCall 和 vision 关键词,
|
408
|
+
// 但是 knownModel.abilities.functionCall 和 knownModel.abilities.vision 是 false
|
409
|
+
// 关键词匹配优先,所以应该是 true
|
410
|
+
expect(model.functionCall).toBe(true); // 关键词 'claude' 匹配
|
411
|
+
expect(model.vision).toBe(true); // 关键词 'claude' 匹配
|
412
|
+
expect(model.reasoning).toBe(true); // 从 knownModel.abilities.reasoning
|
413
|
+
});
|
414
|
+
|
415
|
+
describe('Extended tests for detectModelProvider', () => {
|
416
|
+
it('should handle unusual casing patterns', () => {
|
417
|
+
expect(detectModelProvider('gPt-4')).toBe('openai');
|
418
|
+
expect(detectModelProvider('CLauDe-3-OPUS')).toBe('anthropic');
|
419
|
+
expect(detectModelProvider('gEmiNi-PrO')).toBe('google');
|
420
|
+
expect(detectModelProvider('qWeN-TuRbO')).toBe('qwen');
|
421
|
+
});
|
422
|
+
|
423
|
+
it('should handle model IDs with keywords in unusual positions', () => {
|
424
|
+
expect(detectModelProvider('custom-gpt-model')).toBe('openai');
|
425
|
+
expect(detectModelProvider('prefix-claude-suffix')).toBe('anthropic');
|
426
|
+
expect(detectModelProvider('test-qwen-beta-v1')).toBe('qwen');
|
427
|
+
});
|
428
|
+
|
429
|
+
it('should handle empty and special character model IDs', () => {
|
430
|
+
expect(detectModelProvider('')).toBe('openai'); // Default
|
431
|
+
expect(detectModelProvider(' ')).toBe('openai'); // Default
|
432
|
+
expect(detectModelProvider('model-with-no-keywords')).toBe('openai'); // Default
|
433
|
+
expect(detectModelProvider('gpt_4_turbo')).toBe('openai'); // With underscores
|
434
|
+
expect(detectModelProvider('claude.3.opus')).toBe('anthropic'); // With periods
|
435
|
+
});
|
436
|
+
});
|
437
|
+
|
438
|
+
describe('Extended tests for processModelList', () => {
|
439
|
+
it('should correctly process models with multiple matching keywords', async () => {
|
440
|
+
const modelList = [
|
441
|
+
{ id: 'gpt-4o-with-reasoning' }, // Matches '4o' for functionCall, vision and reasoning
|
442
|
+
{ id: 'qwen2-qvq-model' }, // Matches multiple qwen keywords
|
443
|
+
{ id: 'glm-4v-glm-zero' }, // Matches multiple zhipu keywords
|
444
|
+
];
|
445
|
+
|
446
|
+
// Test with different configs
|
447
|
+
const openaiConfig = MODEL_LIST_CONFIGS.openai;
|
448
|
+
const qwenConfig = MODEL_LIST_CONFIGS.qwen;
|
449
|
+
const zhipuConfig = MODEL_LIST_CONFIGS.zhipu;
|
450
|
+
|
451
|
+
const openaiResult = await processModelList([modelList[0]], openaiConfig);
|
452
|
+
const qwenResult = await processModelList([modelList[1]], qwenConfig);
|
453
|
+
const zhipuResult = await processModelList([modelList[2]], zhipuConfig);
|
454
|
+
|
455
|
+
expect(openaiResult[0].functionCall).toBe(true);
|
456
|
+
expect(openaiResult[0].vision).toBe(true);
|
457
|
+
expect(openaiResult[0].reasoning).toBe(false); // 'o4' is in reasoningKeywords, not '4o'
|
458
|
+
|
459
|
+
expect(qwenResult[0].functionCall).toBe(true); // 'qwen2'
|
460
|
+
expect(qwenResult[0].reasoning).toBe(true); // 'qvq'
|
461
|
+
expect(qwenResult[0].vision).toBe(true); // 'qvq'
|
462
|
+
|
463
|
+
expect(zhipuResult[0].functionCall).toBe(true); // 'glm-4'
|
464
|
+
expect(zhipuResult[0].vision).toBe(true); // 'glm-4v'
|
465
|
+
expect(zhipuResult[0].reasoning).toBe(true); // 'glm-zero'
|
466
|
+
});
|
467
|
+
|
468
|
+
it('should handle models with overlapping properties from different sources', async () => {
|
469
|
+
// Use a modified mock temporarily for this test
|
470
|
+
const tempModelEntry = {
|
471
|
+
id: 'special-model-with-overlap',
|
472
|
+
displayName: 'Known Special Model',
|
473
|
+
contextWindowTokens: 10000,
|
474
|
+
maxOutput: 2000,
|
475
|
+
enabled: true,
|
476
|
+
};
|
477
|
+
|
478
|
+
const modelWithOverlap = {
|
479
|
+
id: 'special-model-with-overlap',
|
480
|
+
displayName: 'Direct Special Model',
|
481
|
+
contextWindowTokens: 5000,
|
482
|
+
};
|
483
|
+
|
484
|
+
const mockModule = await import('@/config/aiModels');
|
485
|
+
mockModule.LOBE_DEFAULT_MODEL_LIST.push(tempModelEntry as any);
|
486
|
+
|
487
|
+
const config = MODEL_LIST_CONFIGS.openai;
|
488
|
+
const result = await processModelList([modelWithOverlap], config);
|
489
|
+
|
490
|
+
expect(result[0].displayName).toBe('Direct Special Model'); // From model (priority)
|
491
|
+
expect(result[0].contextWindowTokens).toBe(5000); // From model (priority)
|
492
|
+
expect(result[0].maxOutput).toBe(2000); // From knownModel
|
493
|
+
expect(result[0].enabled).toBe(true); // From knownModel
|
494
|
+
});
|
495
|
+
|
496
|
+
it('should correctly process reasoning capabilities based on keywords', async () => {
|
497
|
+
const modelList = [
|
498
|
+
{ id: 'gpt-o1-model' }, // OpenAI reasoning keyword 'o1'
|
499
|
+
{ id: 'claude-3-7-opus' }, // Anthropic reasoning keyword '-3-7-'
|
500
|
+
{ id: 'gemini-thinking' }, // Google reasoning keyword 'thinking'
|
501
|
+
{ id: 'deepseek-r1-test' }, // Deepseek reasoning keyword 'r1'
|
502
|
+
{ id: 'doubao-thinking-model' }, // Volcengine reasoning keyword 'thinking'
|
503
|
+
];
|
504
|
+
|
505
|
+
// Process each model with its corresponding provider config
|
506
|
+
const results = await Promise.all([
|
507
|
+
processModelList([modelList[0]], MODEL_LIST_CONFIGS.openai),
|
508
|
+
processModelList([modelList[1]], MODEL_LIST_CONFIGS.anthropic),
|
509
|
+
processModelList([modelList[2]], MODEL_LIST_CONFIGS.google),
|
510
|
+
processModelList([modelList[3]], MODEL_LIST_CONFIGS.deepseek),
|
511
|
+
processModelList([modelList[4]], MODEL_LIST_CONFIGS.volcengine),
|
512
|
+
]);
|
513
|
+
|
514
|
+
// Check reasoning capabilities
|
515
|
+
expect(results[0][0].reasoning).toBe(true); // OpenAI 'o1'
|
516
|
+
expect(results[1][0].reasoning).toBe(true); // Anthropic '-3-7-'
|
517
|
+
expect(results[2][0].reasoning).toBe(true); // Google 'thinking'
|
518
|
+
expect(results[3][0].reasoning).toBe(true); // Deepseek 'r1'
|
519
|
+
expect(results[4][0].reasoning).toBe(true); // Volcengine 'thinking'
|
520
|
+
});
|
521
|
+
});
|
522
|
+
|
523
|
+
describe('Extended tests for processMultiProviderModelList', () => {
|
524
|
+
it('should handle models with identical IDs but different properties', async () => {
|
525
|
+
const modelList = [
|
526
|
+
{ id: 'duplicate-model-id', displayName: 'First Duplicate' },
|
527
|
+
{ id: 'duplicate-model-id', displayName: 'Second Duplicate' },
|
528
|
+
];
|
529
|
+
|
530
|
+
const result = await processMultiProviderModelList(modelList);
|
531
|
+
|
532
|
+
// 因为是数组,所以两个条目都应该保留
|
533
|
+
expect(result.length).toBe(2);
|
534
|
+
expect(result.filter((m) => m.id === 'duplicate-model-id').length).toBe(2);
|
535
|
+
});
|
536
|
+
|
537
|
+
it('should correctly apply different provider configs to models with mixed capabilities', async () => {
|
538
|
+
const modelList = [
|
539
|
+
{ id: 'gpt-4-vision-preview' }, // OpenAI
|
540
|
+
{ id: 'claude-3-vision' }, // Anthropic
|
541
|
+
{ id: 'gemini-pro-vision' }, // Google
|
542
|
+
{ id: 'glm-4v' }, // Zhipu
|
543
|
+
];
|
544
|
+
|
545
|
+
const result = await processMultiProviderModelList(modelList);
|
546
|
+
|
547
|
+
// Check vision capability across different providers
|
548
|
+
const gpt = result.find((m) => m.id === 'gpt-4-vision-preview')!;
|
549
|
+
const claude = result.find((m) => m.id === 'claude-3-vision')!;
|
550
|
+
const gemini = result.find((m) => m.id === 'gemini-pro-vision')!;
|
551
|
+
const glm = result.find((m) => m.id === 'glm-4v')!;
|
552
|
+
|
553
|
+
// OpenAI: 'vision-preview' 不是 vision 关键词
|
554
|
+
expect(gpt.vision).toBe(false);
|
555
|
+
|
556
|
+
// Anthropic: 'claude' 是 vision 关键词
|
557
|
+
expect(claude.vision).toBe(true);
|
558
|
+
|
559
|
+
// Google: 'gemini' 是 vision 关键词
|
560
|
+
expect(gemini.vision).toBe(true);
|
561
|
+
|
562
|
+
// Zhipu: 'glm-4v' 是 vision 关键词
|
563
|
+
expect(glm.vision).toBe(true);
|
564
|
+
});
|
565
|
+
|
566
|
+
it('should correctly handle models with excluded keywords in different providers', async () => {
|
567
|
+
// OpenAI excludes 'audio', other providers don't have excluded keywords
|
568
|
+
const modelList = [
|
569
|
+
{ id: 'gpt-4o-audio' }, // OpenAI with excluded keyword
|
570
|
+
{ id: 'claude-audio-model' }, // Anthropic with same keyword (not excluded)
|
571
|
+
{ id: 'gemini-audio-pro' }, // Google with same keyword (not excluded)
|
572
|
+
];
|
573
|
+
|
574
|
+
const result = await processMultiProviderModelList(modelList);
|
575
|
+
|
576
|
+
const gpt = result.find((m) => m.id === 'gpt-4o-audio')!;
|
577
|
+
const claude = result.find((m) => m.id === 'claude-audio-model')!;
|
578
|
+
const gemini = result.find((m) => m.id === 'gemini-audio-pro')!;
|
579
|
+
|
580
|
+
// OpenAI: '4o' matches for functionCall and vision, but 'audio' is excluded
|
581
|
+
expect(gpt.functionCall).toBe(false);
|
582
|
+
expect(gpt.vision).toBe(false);
|
583
|
+
|
584
|
+
// Anthropic: 'claude' matches for functionCall and vision, 'audio' is not excluded
|
585
|
+
expect(claude.functionCall).toBe(true);
|
586
|
+
expect(claude.vision).toBe(true);
|
587
|
+
|
588
|
+
// Google: 'gemini' matches for functionCall and vision, 'audio' is not excluded
|
589
|
+
expect(gemini.functionCall).toBe(true);
|
590
|
+
expect(gemini.vision).toBe(true);
|
591
|
+
});
|
592
|
+
|
593
|
+
it('should handle models with partial or incomplete information', async () => {
|
594
|
+
const modelList = [
|
595
|
+
{ id: 'minimal-model' }, // 只有ID
|
596
|
+
{ id: 'partial-model', displayName: 'Partial' }, // ID + displayName
|
597
|
+
// 移除无效的模型对象,因为它们会导致 detectModelProvider 出错
|
598
|
+
];
|
599
|
+
|
600
|
+
const result = await processMultiProviderModelList(modelList);
|
601
|
+
|
602
|
+
// 应该正确处理有效的模型
|
603
|
+
expect(result.length).toBe(2);
|
604
|
+
|
605
|
+
// 检查最简模型是否正确处理
|
606
|
+
const minimalModel = result.find((m) => m.id === 'minimal-model');
|
607
|
+
expect(minimalModel).toBeDefined();
|
608
|
+
expect(minimalModel!.displayName).toBe('minimal-model');
|
609
|
+
expect(minimalModel!.enabled).toBe(false);
|
610
|
+
|
611
|
+
// 检查部分模型是否正确处理
|
612
|
+
const partialModel = result.find((m) => m.id === 'partial-model');
|
613
|
+
expect(partialModel).toBeDefined();
|
614
|
+
expect(partialModel!.displayName).toBe('Partial');
|
615
|
+
expect(partialModel!.enabled).toBe(false);
|
616
|
+
});
|
617
|
+
});
|
618
|
+
|
619
|
+
describe('Advanced integration tests for model parsing', () => {
|
620
|
+
it('should correctly integrate multiple keyword matches with exclusions', async () => {
|
621
|
+
// 设置一些具有多个关键词的特殊模型
|
622
|
+
const modelList = [
|
623
|
+
// OpenAI 模型,混合关键词和排除项
|
624
|
+
{ id: 'gpt-4o-audio-special' }, // '4o' 匹配,但 'audio' 被排除
|
625
|
+
{ id: 'gpt-4o-o3-special' }, // 多个匹配:'4o' (fc+vision) 和 'o3' (fc+reasoning)
|
626
|
+
|
627
|
+
// 其他提供商的特殊组合
|
628
|
+
{ id: 'claude-3-7-vision-special' }, // 'claude' (fc+vision) + '-3-7-' (reasoning)
|
629
|
+
{ id: 'gemini-thinking-advanced' }, // 'gemini' (fc+vision) + 'thinking' (reasoning)
|
630
|
+
{ id: 'glm-4v-glm-zero-test' }, // 'glm-4v' (vision) + 'glm-4' (fc) + 'glm-zero' (reasoning)
|
631
|
+
];
|
632
|
+
|
633
|
+
const result = await processMultiProviderModelList(modelList);
|
634
|
+
|
635
|
+
// 检查高级组合
|
636
|
+
const gptAudio = result.find((m) => m.id === 'gpt-4o-audio-special')!;
|
637
|
+
const gptMulti = result.find((m) => m.id === 'gpt-4o-o3-special')!;
|
638
|
+
const claudeMix = result.find((m) => m.id === 'claude-3-7-vision-special')!;
|
639
|
+
const geminiMix = result.find((m) => m.id === 'gemini-thinking-advanced')!;
|
640
|
+
const glmMix = result.find((m) => m.id === 'glm-4v-glm-zero-test')!;
|
641
|
+
|
642
|
+
// OpenAI 带排除关键词
|
643
|
+
expect(gptAudio.functionCall).toBe(false);
|
644
|
+
expect(gptAudio.vision).toBe(false);
|
645
|
+
|
646
|
+
// OpenAI 带多个匹配关键词
|
647
|
+
expect(gptMulti.functionCall).toBe(true); // '4o' 或 'o3'
|
648
|
+
expect(gptMulti.vision).toBe(true); // '4o'
|
649
|
+
expect(gptMulti.reasoning).toBe(true); // 'o3'
|
650
|
+
|
651
|
+
// Anthropic 混合能力
|
652
|
+
expect(claudeMix.functionCall).toBe(true); // 'claude'
|
653
|
+
expect(claudeMix.vision).toBe(true); // 'claude'
|
654
|
+
expect(claudeMix.reasoning).toBe(true); // '-3-7-'
|
655
|
+
|
656
|
+
// Google 混合能力
|
657
|
+
expect(geminiMix.functionCall).toBe(true); // 'gemini'
|
658
|
+
expect(geminiMix.vision).toBe(true); // 'gemini'
|
659
|
+
expect(geminiMix.reasoning).toBe(true); // 'thinking'
|
660
|
+
|
661
|
+
// Zhipu 混合能力
|
662
|
+
expect(glmMix.functionCall).toBe(true); // 'glm-4'
|
663
|
+
expect(glmMix.vision).toBe(true); // 'glm-4v'
|
664
|
+
expect(glmMix.reasoning).toBe(true); // 'glm-zero'
|
665
|
+
});
|
666
|
+
|
667
|
+
it('should correctly process models with matching substrings', async () => {
|
668
|
+
const modelList = [
|
669
|
+
// 测试应该激活关键词的子字符串匹配
|
670
|
+
{ id: 'my-gpt-4o-custom' }, // '4o' 是子字符串
|
671
|
+
{ id: 'test-claude-model' }, // 'claude' 是子字符串
|
672
|
+
{ id: 'embedded-gemini-version' }, // 'gemini' 是子字符串
|
673
|
+
{ id: 'prefix-qwen-turbo-suffix' }, // 'qwen-turbo' 是子字符串
|
674
|
+
|
675
|
+
// 测试不应该激活关键词的子字符串匹配
|
676
|
+
{ id: 'almost4o-but-not-quite' }, // '4o' 没有精确子字符串匹配
|
677
|
+
{ id: 'claudius-maximus' }, // 'claude' 是更大单词的一部分
|
678
|
+
{ id: 'partial-glm-4v-text' }, // 'glm-4v' 是正确的子字符串
|
679
|
+
];
|
680
|
+
|
681
|
+
const result = await processMultiProviderModelList(modelList);
|
682
|
+
|
683
|
+
// 检查正确的子字符串匹配
|
684
|
+
expect(result.find((m) => m.id === 'my-gpt-4o-custom')!.vision).toBe(true); // '4o' 匹配
|
685
|
+
expect(result.find((m) => m.id === 'test-claude-model')!.functionCall).toBe(true); // 'claude' 匹配
|
686
|
+
expect(result.find((m) => m.id === 'embedded-gemini-version')!.functionCall).toBe(true); // 'gemini' 匹配
|
687
|
+
expect(result.find((m) => m.id === 'prefix-qwen-turbo-suffix')!.functionCall).toBe(true); // 'qwen-turbo' 匹配
|
688
|
+
|
689
|
+
// 检查不匹配项
|
690
|
+
expect(result.find((m) => m.id === 'almost4o-but-not-quite')!.vision).toBe(true); // '4o' 匹配
|
691
|
+
expect(result.find((m) => m.id === 'claudius-maximus')!.functionCall).toBe(false); // 没有 'claude' 匹配(作为独立词)
|
692
|
+
expect(result.find((m) => m.id === 'partial-glm-4v-text')!.vision).toBe(true); // 'glm-4v' 是正确匹配(因为我们使用 includes,而不是单词边界)
|
693
|
+
});
|
694
|
+
});
|
695
|
+
|
696
|
+
it('should correctly apply abilities when excluded by detected provider and knownModel ability is true', async () => {
|
697
|
+
// 添加到 mockDefaultModelList:
|
698
|
+
const modelId = 'gpt-4o-audio-known-abilities-obj-true';
|
699
|
+
const tempMockEntry = {
|
700
|
+
id: modelId,
|
701
|
+
displayName: 'GPT-4o Audio Known Abilities True (Obj)',
|
702
|
+
enabled: true,
|
703
|
+
abilities: {
|
704
|
+
functionCall: true,
|
705
|
+
vision: true,
|
706
|
+
reasoning: true,
|
707
|
+
},
|
708
|
+
};
|
709
|
+
const mockModule = await import('@/config/aiModels');
|
710
|
+
mockModule.LOBE_DEFAULT_MODEL_LIST.push(tempMockEntry as any);
|
711
|
+
|
712
|
+
const modelList = [{ id: modelId }];
|
713
|
+
const result = await processMultiProviderModelList(modelList);
|
714
|
+
|
715
|
+
expect(result).toHaveLength(1);
|
716
|
+
const model = result[0];
|
717
|
+
expect(model.id).toBe(modelId);
|
718
|
+
// (keyword_match && !excluded) || known_ability || false
|
719
|
+
// ('4o' 是关键词, 'audio' 在 openai 中被排除)
|
720
|
+
// (true && false) || true (来自 knownModel.abilities) || false -> true
|
721
|
+
expect(model.functionCall).toBe(true);
|
722
|
+
expect(model.vision).toBe(true);
|
723
|
+
});
|
724
|
+
|
725
|
+
it('should correctly apply abilities when excluded by detected provider and knownModel ability is false', async () => {
|
726
|
+
// 添加到 mockDefaultModelList:
|
727
|
+
const modelId = 'gpt-4o-audio-known-abilities-obj-false';
|
728
|
+
const tempMockEntry = {
|
729
|
+
id: modelId,
|
730
|
+
displayName: 'GPT-4o Audio Known Abilities False (Obj)',
|
731
|
+
enabled: true,
|
732
|
+
abilities: {
|
733
|
+
functionCall: false,
|
734
|
+
vision: false,
|
735
|
+
reasoning: false,
|
736
|
+
},
|
737
|
+
};
|
738
|
+
const mockModule = await import('@/config/aiModels');
|
739
|
+
mockModule.LOBE_DEFAULT_MODEL_LIST.push(tempMockEntry as any);
|
740
|
+
|
741
|
+
const modelList = [{ id: modelId }];
|
742
|
+
const result = await processMultiProviderModelList(modelList);
|
743
|
+
|
744
|
+
expect(result).toHaveLength(1);
|
745
|
+
const model = result[0];
|
746
|
+
expect(model.id).toBe(modelId);
|
747
|
+
// (keyword_match && !excluded) || known_ability || false
|
748
|
+
// (true && false) || false (来自 knownModel.abilities) || false -> false
|
749
|
+
expect(model.functionCall).toBe(false);
|
750
|
+
expect(model.vision).toBe(false);
|
751
|
+
});
|
752
|
+
});
|
753
|
+
|
754
|
+
describe('MODEL_LIST_CONFIGS and PROVIDER_DETECTION_CONFIG', () => {
|
755
|
+
it('should have matching keys in both configuration objects', () => {
|
756
|
+
const modelConfigKeys = Object.keys(MODEL_LIST_CONFIGS);
|
757
|
+
const providerDetectionKeys = Object.keys(PROVIDER_DETECTION_CONFIG);
|
758
|
+
expect(modelConfigKeys.sort()).toEqual(providerDetectionKeys.sort());
|
759
|
+
});
|
760
|
+
});
|
761
|
+
});
|