@lobehub/chat 1.45.5 → 1.45.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/locales/ar/modelProvider.json +2 -2
- package/locales/bg-BG/modelProvider.json +2 -2
- package/locales/de-DE/modelProvider.json +2 -2
- package/locales/en-US/modelProvider.json +2 -2
- package/locales/es-ES/modelProvider.json +2 -2
- package/locales/fa-IR/modelProvider.json +2 -2
- package/locales/fr-FR/modelProvider.json +2 -2
- package/locales/it-IT/modelProvider.json +2 -2
- package/locales/ja-JP/modelProvider.json +2 -2
- package/locales/ko-KR/modelProvider.json +2 -2
- package/locales/nl-NL/modelProvider.json +2 -2
- package/locales/pl-PL/modelProvider.json +2 -2
- package/locales/pt-BR/modelProvider.json +2 -2
- package/locales/ru-RU/modelProvider.json +2 -2
- package/locales/tr-TR/modelProvider.json +2 -2
- package/locales/vi-VN/modelProvider.json +2 -2
- package/locales/zh-CN/modelProvider.json +2 -2
- package/locales/zh-TW/modelProvider.json +2 -2
- package/package.json +3 -3
- package/src/app/(main)/chat/(workspace)/features/TelemetryNotification.tsx +1 -1
- package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/EmptyStatus.tsx +1 -1
- package/src/app/(main)/files/[id]/Header.tsx +1 -1
- package/src/app/(main)/settings/provider/features/CreateNewProvider/index.tsx +1 -1
- package/src/app/(main)/settings/sync/features/WebRTC/SyncSwitch/index.tsx +7 -7
- package/src/components/BubblesLoading/index.tsx +3 -3
- package/src/config/aiModels/index.ts +38 -0
- package/src/config/modelProviders/index.ts +3 -0
- package/src/database/repositories/aiInfra/index.ts +3 -1
- package/src/features/Conversation/Messages/Assistant/FileChunks/index.tsx +1 -1
- package/src/features/Conversation/components/History/index.tsx +1 -1
- package/src/features/InitClientDB/PGliteIcon.tsx +1 -1
- package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +6 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +94 -23
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +21 -6
- package/src/locales/default/modelProvider.ts +2 -2
- package/src/migrations/FromV3ToV4/index.ts +1 -1
- package/src/server/globalConfig/{genServerLLMConfig.test.ts → _deprecated.test.ts} +2 -4
- package/src/server/globalConfig/{genServerLLMConfig.ts → _deprecated.ts} +1 -1
- package/src/server/globalConfig/genServerAiProviderConfig.ts +42 -0
- package/src/server/globalConfig/index.ts +23 -1
- package/src/server/routers/lambda/aiModel.ts +2 -2
- package/src/server/routers/lambda/aiProvider.ts +2 -2
- package/src/types/aiModel.ts +1 -0
- package/src/types/serverConfig.ts +1 -0
- package/src/types/user/settings/modelProvider.ts +2 -0
- package/src/utils/__snapshots__/parseModels.test.ts.snap +37 -5
- package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +112 -0
- package/src/utils/_deprecated/parseModels.test.ts +276 -0
- package/src/utils/_deprecated/parseModels.ts +161 -0
- package/src/utils/fetch/__tests__/fetchSSE.test.ts +1 -1
- package/src/utils/parseModels.test.ts +153 -46
- package/src/utils/parseModels.ts +34 -21
- package/tests/setup-db.ts +0 -3
@@ -0,0 +1,112 @@
|
|
1
|
+
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
2
|
+
|
3
|
+
exports[`parseModelString > custom deletion, addition, and renaming of models 1`] = `
|
4
|
+
{
|
5
|
+
"add": [
|
6
|
+
{
|
7
|
+
"displayName": undefined,
|
8
|
+
"id": "llama",
|
9
|
+
},
|
10
|
+
{
|
11
|
+
"displayName": undefined,
|
12
|
+
"id": "claude-2",
|
13
|
+
},
|
14
|
+
{
|
15
|
+
"displayName": "gpt-4-32k",
|
16
|
+
"id": "gpt-4-1106-preview",
|
17
|
+
},
|
18
|
+
],
|
19
|
+
"removeAll": true,
|
20
|
+
"removed": [
|
21
|
+
"all",
|
22
|
+
"gpt-3.5-turbo",
|
23
|
+
],
|
24
|
+
}
|
25
|
+
`;
|
26
|
+
|
27
|
+
exports[`parseModelString > duplicate naming model 1`] = `
|
28
|
+
{
|
29
|
+
"add": [
|
30
|
+
{
|
31
|
+
"displayName": "gpt-4-32k",
|
32
|
+
"id": "gpt-4-1106-preview",
|
33
|
+
},
|
34
|
+
],
|
35
|
+
"removeAll": false,
|
36
|
+
"removed": [],
|
37
|
+
}
|
38
|
+
`;
|
39
|
+
|
40
|
+
exports[`parseModelString > empty string model 1`] = `
|
41
|
+
{
|
42
|
+
"add": [
|
43
|
+
{
|
44
|
+
"displayName": "gpt-4-turbo",
|
45
|
+
"id": "gpt-4-1106-preview",
|
46
|
+
},
|
47
|
+
{
|
48
|
+
"displayName": undefined,
|
49
|
+
"id": "claude-2",
|
50
|
+
},
|
51
|
+
],
|
52
|
+
"removeAll": false,
|
53
|
+
"removed": [],
|
54
|
+
}
|
55
|
+
`;
|
56
|
+
|
57
|
+
exports[`parseModelString > only add the model 1`] = `
|
58
|
+
{
|
59
|
+
"add": [
|
60
|
+
{
|
61
|
+
"displayName": undefined,
|
62
|
+
"id": "model1",
|
63
|
+
},
|
64
|
+
{
|
65
|
+
"displayName": undefined,
|
66
|
+
"id": "model2",
|
67
|
+
},
|
68
|
+
{
|
69
|
+
"displayName": undefined,
|
70
|
+
"id": "model3",
|
71
|
+
},
|
72
|
+
{
|
73
|
+
"displayName": undefined,
|
74
|
+
"id": "model4",
|
75
|
+
},
|
76
|
+
],
|
77
|
+
"removeAll": false,
|
78
|
+
"removed": [],
|
79
|
+
}
|
80
|
+
`;
|
81
|
+
|
82
|
+
exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = `
|
83
|
+
[
|
84
|
+
{
|
85
|
+
"contextWindowTokens": 128000,
|
86
|
+
"description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
|
87
|
+
"displayName": "ChatGPT-4",
|
88
|
+
"enabled": true,
|
89
|
+
"files": true,
|
90
|
+
"functionCall": true,
|
91
|
+
"id": "gpt-4-0125-preview",
|
92
|
+
"pricing": {
|
93
|
+
"input": 10,
|
94
|
+
"output": 30,
|
95
|
+
},
|
96
|
+
},
|
97
|
+
{
|
98
|
+
"contextWindowTokens": 128000,
|
99
|
+
"description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
|
100
|
+
"displayName": "ChatGPT-4 Vision",
|
101
|
+
"enabled": true,
|
102
|
+
"files": true,
|
103
|
+
"functionCall": true,
|
104
|
+
"id": "gpt-4-turbo-2024-04-09",
|
105
|
+
"pricing": {
|
106
|
+
"input": 10,
|
107
|
+
"output": 30,
|
108
|
+
},
|
109
|
+
"vision": true,
|
110
|
+
},
|
111
|
+
]
|
112
|
+
`;
|
@@ -0,0 +1,276 @@
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
2
|
+
|
3
|
+
import { LOBE_DEFAULT_MODEL_LIST, OpenAIProviderCard } from '@/config/modelProviders';
|
4
|
+
import { ChatModelCard } from '@/types/llm';
|
5
|
+
|
6
|
+
import { parseModelString, transformToChatModelCards } from './parseModels';
|
7
|
+
|
8
|
+
describe('parseModelString', () => {
|
9
|
+
it('custom deletion, addition, and renaming of models', () => {
|
10
|
+
const result = parseModelString(
|
11
|
+
'-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k',
|
12
|
+
);
|
13
|
+
|
14
|
+
expect(result).toMatchSnapshot();
|
15
|
+
});
|
16
|
+
|
17
|
+
it('duplicate naming model', () => {
|
18
|
+
const result = parseModelString('gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k');
|
19
|
+
expect(result).toMatchSnapshot();
|
20
|
+
});
|
21
|
+
|
22
|
+
it('only add the model', () => {
|
23
|
+
const result = parseModelString('model1,model2,model3,model4');
|
24
|
+
|
25
|
+
expect(result).toMatchSnapshot();
|
26
|
+
});
|
27
|
+
|
28
|
+
it('empty string model', () => {
|
29
|
+
const result = parseModelString('gpt-4-1106-preview=gpt-4-turbo,, ,\n ,+claude-2');
|
30
|
+
expect(result).toMatchSnapshot();
|
31
|
+
});
|
32
|
+
|
33
|
+
describe('extension capabilities', () => {
|
34
|
+
it('with token', () => {
|
35
|
+
const result = parseModelString('chatglm-6b=ChatGLM 6B<4096>');
|
36
|
+
|
37
|
+
expect(result.add[0]).toEqual({
|
38
|
+
displayName: 'ChatGLM 6B',
|
39
|
+
id: 'chatglm-6b',
|
40
|
+
contextWindowTokens: 4096,
|
41
|
+
});
|
42
|
+
});
|
43
|
+
|
44
|
+
it('token and function calling', () => {
|
45
|
+
const result = parseModelString('spark-v3.5=讯飞星火 v3.5<8192:fc>');
|
46
|
+
|
47
|
+
expect(result.add[0]).toEqual({
|
48
|
+
displayName: '讯飞星火 v3.5',
|
49
|
+
functionCall: true,
|
50
|
+
id: 'spark-v3.5',
|
51
|
+
contextWindowTokens: 8192,
|
52
|
+
});
|
53
|
+
});
|
54
|
+
|
55
|
+
it('multi models', () => {
|
56
|
+
const result = parseModelString(
|
57
|
+
'gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>,gpt-4-all=ChatGPT Plus<128000:fc:vision:file>',
|
58
|
+
);
|
59
|
+
|
60
|
+
expect(result.add).toEqual([
|
61
|
+
{
|
62
|
+
displayName: 'Gemini 1.5 Flash',
|
63
|
+
vision: true,
|
64
|
+
id: 'gemini-1.5-flash-latest',
|
65
|
+
contextWindowTokens: 16000,
|
66
|
+
},
|
67
|
+
{
|
68
|
+
displayName: 'ChatGPT Plus',
|
69
|
+
vision: true,
|
70
|
+
functionCall: true,
|
71
|
+
files: true,
|
72
|
+
id: 'gpt-4-all',
|
73
|
+
contextWindowTokens: 128000,
|
74
|
+
},
|
75
|
+
]);
|
76
|
+
});
|
77
|
+
|
78
|
+
it('should have file with builtin models like gpt-4-0125-preview', () => {
|
79
|
+
const result = parseModelString(
|
80
|
+
'-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
|
81
|
+
);
|
82
|
+
expect(result.add).toEqual([
|
83
|
+
{
|
84
|
+
displayName: 'ChatGPT-4',
|
85
|
+
files: true,
|
86
|
+
functionCall: true,
|
87
|
+
id: 'gpt-4-0125-preview',
|
88
|
+
contextWindowTokens: 128000,
|
89
|
+
},
|
90
|
+
{
|
91
|
+
displayName: 'ChatGPT-4 Vision',
|
92
|
+
files: true,
|
93
|
+
functionCall: true,
|
94
|
+
id: 'gpt-4-turbo-2024-04-09',
|
95
|
+
contextWindowTokens: 128000,
|
96
|
+
vision: true,
|
97
|
+
},
|
98
|
+
]);
|
99
|
+
});
|
100
|
+
|
101
|
+
it('should handle empty extension capability value', () => {
|
102
|
+
const result = parseModelString('model1<1024:>');
|
103
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
104
|
+
});
|
105
|
+
|
106
|
+
it('should handle empty extension capability name', () => {
|
107
|
+
const result = parseModelString('model1<1024::file>');
|
108
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, files: true });
|
109
|
+
});
|
110
|
+
|
111
|
+
it('should handle duplicate extension capabilities', () => {
|
112
|
+
const result = parseModelString('model1<1024:vision:vision>');
|
113
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, vision: true });
|
114
|
+
});
|
115
|
+
|
116
|
+
it('should handle case-sensitive extension capability names', () => {
|
117
|
+
const result = parseModelString('model1<1024:VISION:FC:file>');
|
118
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, files: true });
|
119
|
+
});
|
120
|
+
|
121
|
+
it('should handle case-sensitive extension capability values', () => {
|
122
|
+
const result = parseModelString('model1<1024:vision:Fc:File>');
|
123
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, vision: true });
|
124
|
+
});
|
125
|
+
|
126
|
+
it('should handle empty angle brackets', () => {
|
127
|
+
const result = parseModelString('model1<>');
|
128
|
+
expect(result.add[0]).toEqual({ id: 'model1' });
|
129
|
+
});
|
130
|
+
|
131
|
+
it('should handle not close angle brackets', () => {
|
132
|
+
const result = parseModelString('model1<,model2');
|
133
|
+
expect(result.add).toEqual([{ id: 'model1' }, { id: 'model2' }]);
|
134
|
+
});
|
135
|
+
|
136
|
+
it('should handle multi close angle brackets', () => {
|
137
|
+
const result = parseModelString('model1<>>,model2');
|
138
|
+
expect(result.add).toEqual([{ id: 'model1' }, { id: 'model2' }]);
|
139
|
+
});
|
140
|
+
|
141
|
+
it('should handle only colon inside angle brackets', () => {
|
142
|
+
const result = parseModelString('model1<:>');
|
143
|
+
expect(result.add[0]).toEqual({ id: 'model1' });
|
144
|
+
});
|
145
|
+
|
146
|
+
it('should handle only non-digit characters inside angle brackets', () => {
|
147
|
+
const result = parseModelString('model1<abc>');
|
148
|
+
expect(result.add[0]).toEqual({ id: 'model1' });
|
149
|
+
});
|
150
|
+
|
151
|
+
it('should handle non-digit characters followed by digits inside angle brackets', () => {
|
152
|
+
const result = parseModelString('model1<abc123>');
|
153
|
+
expect(result.add[0]).toEqual({ id: 'model1' });
|
154
|
+
});
|
155
|
+
|
156
|
+
it('should handle digits followed by non-colon characters inside angle brackets', () => {
|
157
|
+
const result = parseModelString('model1<1024abc>');
|
158
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should handle digits followed by multiple colons inside angle brackets', () => {
|
162
|
+
const result = parseModelString('model1<1024::>');
|
163
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
164
|
+
});
|
165
|
+
|
166
|
+
it('should handle digits followed by a colon and non-letter characters inside angle brackets', () => {
|
167
|
+
const result = parseModelString('model1<1024:123>');
|
168
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
169
|
+
});
|
170
|
+
|
171
|
+
it('should handle digits followed by a colon and spaces inside angle brackets', () => {
|
172
|
+
const result = parseModelString('model1<1024: vision>');
|
173
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
174
|
+
});
|
175
|
+
|
176
|
+
it('should handle digits followed by multiple colons and spaces inside angle brackets', () => {
|
177
|
+
const result = parseModelString('model1<1024: : vision>');
|
178
|
+
expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
|
179
|
+
});
|
180
|
+
});
|
181
|
+
|
182
|
+
describe('deployment name', () => {
|
183
|
+
it('should have same deployment name as id', () => {
|
184
|
+
const result = parseModelString('model1=Model 1', true);
|
185
|
+
expect(result.add[0]).toEqual({
|
186
|
+
id: 'model1',
|
187
|
+
displayName: 'Model 1',
|
188
|
+
deploymentName: 'model1',
|
189
|
+
});
|
190
|
+
});
|
191
|
+
|
192
|
+
it('should have diff deployment name as id', () => {
|
193
|
+
const result = parseModelString('gpt-35-turbo->my-deploy=GPT 3.5 Turbo', true);
|
194
|
+
expect(result.add[0]).toEqual({
|
195
|
+
id: 'gpt-35-turbo',
|
196
|
+
displayName: 'GPT 3.5 Turbo',
|
197
|
+
deploymentName: 'my-deploy',
|
198
|
+
});
|
199
|
+
});
|
200
|
+
});
|
201
|
+
});
|
202
|
+
|
203
|
+
describe('transformToChatModelCards', () => {
|
204
|
+
const defaultChatModels: ChatModelCard[] = [
|
205
|
+
{ id: 'model1', displayName: 'Model 1', enabled: true },
|
206
|
+
{ id: 'model2', displayName: 'Model 2', enabled: false },
|
207
|
+
];
|
208
|
+
|
209
|
+
it('should return undefined when modelString is empty', () => {
|
210
|
+
const result = transformToChatModelCards({
|
211
|
+
modelString: '',
|
212
|
+
defaultChatModels,
|
213
|
+
});
|
214
|
+
expect(result).toBeUndefined();
|
215
|
+
});
|
216
|
+
|
217
|
+
it('should remove all models when removeAll is true', () => {
|
218
|
+
const result = transformToChatModelCards({
|
219
|
+
modelString: '-all',
|
220
|
+
defaultChatModels,
|
221
|
+
});
|
222
|
+
expect(result).toEqual([]);
|
223
|
+
});
|
224
|
+
|
225
|
+
it('should remove specified models', () => {
|
226
|
+
const result = transformToChatModelCards({
|
227
|
+
modelString: '-model1',
|
228
|
+
defaultChatModels,
|
229
|
+
});
|
230
|
+
expect(result).toEqual([{ id: 'model2', displayName: 'Model 2', enabled: false }]);
|
231
|
+
});
|
232
|
+
|
233
|
+
it('should add a new known model', () => {
|
234
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST[0];
|
235
|
+
const result = transformToChatModelCards({
|
236
|
+
modelString: `${knownModel.id}`,
|
237
|
+
defaultChatModels,
|
238
|
+
});
|
239
|
+
expect(result).toContainEqual({
|
240
|
+
...knownModel,
|
241
|
+
displayName: knownModel.displayName || knownModel.id,
|
242
|
+
enabled: true,
|
243
|
+
});
|
244
|
+
});
|
245
|
+
|
246
|
+
it('should update an existing known model', () => {
|
247
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST[0];
|
248
|
+
const result = transformToChatModelCards({
|
249
|
+
modelString: `+${knownModel.id}=Updated Model`,
|
250
|
+
defaultChatModels: [knownModel],
|
251
|
+
});
|
252
|
+
expect(result![0]).toEqual({ ...knownModel, displayName: 'Updated Model', enabled: true });
|
253
|
+
});
|
254
|
+
|
255
|
+
it('should add a new custom model', () => {
|
256
|
+
const result = transformToChatModelCards({
|
257
|
+
modelString: '+custom_model=Custom Model',
|
258
|
+
defaultChatModels,
|
259
|
+
});
|
260
|
+
expect(result).toContainEqual({
|
261
|
+
id: 'custom_model',
|
262
|
+
displayName: 'Custom Model',
|
263
|
+
enabled: true,
|
264
|
+
});
|
265
|
+
});
|
266
|
+
|
267
|
+
it('should have file with builtin models like gpt-4-0125-preview', () => {
|
268
|
+
const result = transformToChatModelCards({
|
269
|
+
modelString:
|
270
|
+
'-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
|
271
|
+
defaultChatModels: OpenAIProviderCard.chatModels,
|
272
|
+
});
|
273
|
+
|
274
|
+
expect(result).toMatchSnapshot();
|
275
|
+
});
|
276
|
+
});
|
@@ -0,0 +1,161 @@
|
|
1
|
+
import { produce } from 'immer';
|
2
|
+
|
3
|
+
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
|
4
|
+
import { ChatModelCard } from '@/types/llm';
|
5
|
+
|
6
|
+
/**
|
7
|
+
* Parse model string to add or remove models.
|
8
|
+
*/
|
9
|
+
export const parseModelString = (modelString: string = '', withDeploymentName = false) => {
|
10
|
+
let models: ChatModelCard[] = [];
|
11
|
+
let removeAll = false;
|
12
|
+
const removedModels: string[] = [];
|
13
|
+
const modelNames = modelString.split(/[,,]/).filter(Boolean);
|
14
|
+
|
15
|
+
for (const item of modelNames) {
|
16
|
+
const disable = item.startsWith('-');
|
17
|
+
const nameConfig = item.startsWith('+') || item.startsWith('-') ? item.slice(1) : item;
|
18
|
+
const [idAndDisplayName, ...capabilities] = nameConfig.split('<');
|
19
|
+
let [id, displayName] = idAndDisplayName.split('=');
|
20
|
+
|
21
|
+
let deploymentName: string | undefined;
|
22
|
+
|
23
|
+
if (withDeploymentName) {
|
24
|
+
[id, deploymentName] = id.split('->');
|
25
|
+
if (!deploymentName) deploymentName = id;
|
26
|
+
}
|
27
|
+
|
28
|
+
if (disable) {
|
29
|
+
// Disable all models.
|
30
|
+
if (id === 'all') {
|
31
|
+
removeAll = true;
|
32
|
+
}
|
33
|
+
removedModels.push(id);
|
34
|
+
continue;
|
35
|
+
}
|
36
|
+
|
37
|
+
// remove empty model name
|
38
|
+
if (!item.trim().length) {
|
39
|
+
continue;
|
40
|
+
}
|
41
|
+
|
42
|
+
// Remove duplicate model entries.
|
43
|
+
const existingIndex = models.findIndex(({ id: n }) => n === id);
|
44
|
+
if (existingIndex !== -1) {
|
45
|
+
models.splice(existingIndex, 1);
|
46
|
+
}
|
47
|
+
|
48
|
+
const model: ChatModelCard = {
|
49
|
+
displayName: displayName || undefined,
|
50
|
+
id,
|
51
|
+
};
|
52
|
+
|
53
|
+
if (deploymentName) {
|
54
|
+
model.deploymentName = deploymentName;
|
55
|
+
}
|
56
|
+
|
57
|
+
if (capabilities.length > 0) {
|
58
|
+
const [maxTokenStr, ...capabilityList] = capabilities[0].replace('>', '').split(':');
|
59
|
+
model.contextWindowTokens = parseInt(maxTokenStr, 10) || undefined;
|
60
|
+
|
61
|
+
for (const capability of capabilityList) {
|
62
|
+
switch (capability) {
|
63
|
+
case 'vision': {
|
64
|
+
model.vision = true;
|
65
|
+
break;
|
66
|
+
}
|
67
|
+
case 'fc': {
|
68
|
+
model.functionCall = true;
|
69
|
+
break;
|
70
|
+
}
|
71
|
+
case 'file': {
|
72
|
+
model.files = true;
|
73
|
+
break;
|
74
|
+
}
|
75
|
+
default: {
|
76
|
+
console.warn(`Unknown capability: ${capability}`);
|
77
|
+
}
|
78
|
+
}
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
models.push(model);
|
83
|
+
}
|
84
|
+
|
85
|
+
return {
|
86
|
+
add: models,
|
87
|
+
removeAll,
|
88
|
+
removed: removedModels,
|
89
|
+
};
|
90
|
+
};
|
91
|
+
|
92
|
+
/**
|
93
|
+
* Extract a special method to process chatModels
|
94
|
+
*/
|
95
|
+
export const transformToChatModelCards = ({
|
96
|
+
modelString = '',
|
97
|
+
defaultChatModels,
|
98
|
+
withDeploymentName = false,
|
99
|
+
}: {
|
100
|
+
defaultChatModels: ChatModelCard[];
|
101
|
+
modelString?: string;
|
102
|
+
withDeploymentName?: boolean;
|
103
|
+
}): ChatModelCard[] | undefined => {
|
104
|
+
if (!modelString) return undefined;
|
105
|
+
|
106
|
+
const modelConfig = parseModelString(modelString, withDeploymentName);
|
107
|
+
let chatModels = modelConfig.removeAll ? [] : defaultChatModels;
|
108
|
+
|
109
|
+
// 处理移除逻辑
|
110
|
+
if (!modelConfig.removeAll) {
|
111
|
+
chatModels = chatModels.filter((m) => !modelConfig.removed.includes(m.id));
|
112
|
+
}
|
113
|
+
|
114
|
+
return produce(chatModels, (draft) => {
|
115
|
+
// 处理添加或替换逻辑
|
116
|
+
for (const toAddModel of modelConfig.add) {
|
117
|
+
// first try to find the model in LOBE_DEFAULT_MODEL_LIST to confirm if it is a known model
|
118
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === toAddModel.id);
|
119
|
+
|
120
|
+
// if the model is known, update it based on the known model
|
121
|
+
if (knownModel) {
|
122
|
+
const index = draft.findIndex((model) => model.id === toAddModel.id);
|
123
|
+
const modelInList = draft[index];
|
124
|
+
|
125
|
+
// if the model is already in chatModels, update it
|
126
|
+
if (modelInList) {
|
127
|
+
draft[index] = {
|
128
|
+
...modelInList,
|
129
|
+
...toAddModel,
|
130
|
+
displayName: toAddModel.displayName || modelInList.displayName || modelInList.id,
|
131
|
+
enabled: true,
|
132
|
+
};
|
133
|
+
} else {
|
134
|
+
// if the model is not in chatModels, add it
|
135
|
+
draft.push({
|
136
|
+
...knownModel,
|
137
|
+
...toAddModel,
|
138
|
+
displayName: toAddModel.displayName || knownModel.displayName || knownModel.id,
|
139
|
+
enabled: true,
|
140
|
+
});
|
141
|
+
}
|
142
|
+
} else {
|
143
|
+
// if the model is not in LOBE_DEFAULT_MODEL_LIST, add it as a new custom model
|
144
|
+
draft.push({
|
145
|
+
...toAddModel,
|
146
|
+
displayName: toAddModel.displayName || toAddModel.id,
|
147
|
+
enabled: true,
|
148
|
+
});
|
149
|
+
}
|
150
|
+
}
|
151
|
+
});
|
152
|
+
};
|
153
|
+
|
154
|
+
export const extractEnabledModels = (modelString: string = '', withDeploymentName = false) => {
|
155
|
+
const modelConfig = parseModelString(modelString, withDeploymentName);
|
156
|
+
const list = modelConfig.add.map((m) => m.id);
|
157
|
+
|
158
|
+
if (list.length === 0) return;
|
159
|
+
|
160
|
+
return list;
|
161
|
+
};
|