@lobehub/chat 1.45.4 → 1.45.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/modelProvider.json +1 -1
  4. package/locales/bg-BG/modelProvider.json +1 -1
  5. package/locales/de-DE/modelProvider.json +1 -1
  6. package/locales/en-US/modelProvider.json +1 -1
  7. package/locales/es-ES/modelProvider.json +1 -1
  8. package/locales/fa-IR/modelProvider.json +1 -1
  9. package/locales/fr-FR/modelProvider.json +1 -1
  10. package/locales/it-IT/modelProvider.json +1 -1
  11. package/locales/ja-JP/modelProvider.json +1 -1
  12. package/locales/ko-KR/modelProvider.json +1 -1
  13. package/locales/nl-NL/modelProvider.json +1 -1
  14. package/locales/pl-PL/modelProvider.json +1 -1
  15. package/locales/pt-BR/modelProvider.json +1 -1
  16. package/locales/ru-RU/modelProvider.json +1 -1
  17. package/locales/tr-TR/modelProvider.json +1 -1
  18. package/locales/vi-VN/modelProvider.json +1 -1
  19. package/locales/zh-CN/modelProvider.json +1 -1
  20. package/locales/zh-TW/modelProvider.json +1 -1
  21. package/package.json +4 -4
  22. package/src/app/(main)/chat/(workspace)/features/TelemetryNotification.tsx +1 -1
  23. package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/EmptyStatus.tsx +1 -1
  24. package/src/app/(main)/files/[id]/Header.tsx +1 -1
  25. package/src/app/(main)/settings/sync/features/WebRTC/SyncSwitch/index.tsx +7 -7
  26. package/src/components/BubblesLoading/index.tsx +3 -3
  27. package/src/config/aiModels/index.ts +38 -0
  28. package/src/config/modelProviders/index.ts +3 -0
  29. package/src/database/repositories/aiInfra/index.ts +3 -1
  30. package/src/features/Conversation/Messages/Assistant/FileChunks/index.tsx +1 -1
  31. package/src/features/Conversation/components/History/index.tsx +1 -1
  32. package/src/features/InitClientDB/PGliteIcon.tsx +1 -1
  33. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +1 -0
  34. package/src/locales/default/modelProvider.ts +1 -1
  35. package/src/migrations/FromV3ToV4/index.ts +1 -1
  36. package/src/server/globalConfig/{genServerLLMConfig.test.ts → _deprecated.test.ts} +2 -4
  37. package/src/server/globalConfig/{genServerLLMConfig.ts → _deprecated.ts} +1 -1
  38. package/src/server/globalConfig/genServerAiProviderConfig.ts +42 -0
  39. package/src/server/globalConfig/index.ts +23 -1
  40. package/src/server/routers/lambda/aiModel.ts +2 -2
  41. package/src/server/routers/lambda/aiProvider.ts +2 -2
  42. package/src/types/aiModel.ts +1 -0
  43. package/src/types/serverConfig.ts +1 -0
  44. package/src/types/user/settings/modelProvider.ts +2 -0
  45. package/src/utils/__snapshots__/parseModels.test.ts.snap +37 -5
  46. package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +112 -0
  47. package/src/utils/_deprecated/parseModels.test.ts +276 -0
  48. package/src/utils/_deprecated/parseModels.ts +161 -0
  49. package/src/utils/parseModels.test.ts +153 -46
  50. package/src/utils/parseModels.ts +34 -21
@@ -6,19 +6,41 @@ import { enableNextAuth } from '@/const/auth';
6
6
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
7
7
  import { GlobalServerConfig } from '@/types/serverConfig';
8
8
 
9
- import { genServerLLMConfig } from './genServerLLMConfig';
9
+ import { genServerLLMConfig } from './_deprecated';
10
+ import { genServerAiProvidersConfig } from './genServerAiProviderConfig';
10
11
  import { parseAgentConfig } from './parseDefaultAgent';
11
12
 
12
13
  export const getServerGlobalConfig = () => {
13
14
  const { ACCESS_CODES, DEFAULT_AGENT_CONFIG } = getAppConfig();
14
15
 
15
16
  const config: GlobalServerConfig = {
17
+ aiProvider: genServerAiProvidersConfig({
18
+ azure: {
19
+ enabledKey: 'ENABLED_AZURE_OPENAI',
20
+ withDeploymentName: true,
21
+ },
22
+ bedrock: {
23
+ enabledKey: 'ENABLED_AWS_BEDROCK',
24
+ modelListKey: 'AWS_BEDROCK_MODEL_LIST',
25
+ },
26
+ giteeai: {
27
+ enabledKey: 'ENABLED_GITEE_AI',
28
+ modelListKey: 'GITEE_AI_MODEL_LIST',
29
+ },
30
+ ollama: {
31
+ fetchOnClient: !process.env.OLLAMA_PROXY_URL,
32
+ },
33
+ }),
16
34
  defaultAgent: {
17
35
  config: parseAgentConfig(DEFAULT_AGENT_CONFIG),
18
36
  },
19
37
  enableUploadFileToServer: !!fileEnv.S3_SECRET_ACCESS_KEY,
20
38
  enabledAccessCode: ACCESS_CODES?.length > 0,
39
+
21
40
  enabledOAuthSSO: enableNextAuth,
41
+ /**
42
+ * @deprecated
43
+ */
22
44
  languageModel: genServerLLMConfig({
23
45
  azure: {
24
46
  enabledKey: 'ENABLED_AZURE_OPENAI',
@@ -19,14 +19,14 @@ const aiModelProcedure = authedProcedure.use(async (opts) => {
19
19
  const { ctx } = opts;
20
20
 
21
21
  const gateKeeper = await KeyVaultsGateKeeper.initWithEnvKey();
22
- const { languageModel } = getServerGlobalConfig();
22
+ const { aiProvider } = getServerGlobalConfig();
23
23
 
24
24
  return opts.next({
25
25
  ctx: {
26
26
  aiInfraRepos: new AiInfraRepos(
27
27
  serverDB,
28
28
  ctx.userId,
29
- languageModel as Record<string, ProviderConfig>,
29
+ aiProvider as Record<string, ProviderConfig>,
30
30
  ),
31
31
  aiModelModel: new AiModelModel(serverDB, ctx.userId),
32
32
  gateKeeper,
@@ -18,7 +18,7 @@ import { ProviderConfig } from '@/types/user/settings';
18
18
  const aiProviderProcedure = authedProcedure.use(async (opts) => {
19
19
  const { ctx } = opts;
20
20
 
21
- const { languageModel } = getServerGlobalConfig();
21
+ const { aiProvider } = getServerGlobalConfig();
22
22
 
23
23
  const gateKeeper = await KeyVaultsGateKeeper.initWithEnvKey();
24
24
  return opts.next({
@@ -26,7 +26,7 @@ const aiProviderProcedure = authedProcedure.use(async (opts) => {
26
26
  aiInfraRepos: new AiInfraRepos(
27
27
  serverDB,
28
28
  ctx.userId,
29
- languageModel as Record<string, ProviderConfig>,
29
+ aiProvider as Record<string, ProviderConfig>,
30
30
  ),
31
31
  aiProviderModel: new AiProviderModel(serverDB, ctx.userId),
32
32
  gateKeeper,
@@ -230,6 +230,7 @@ export interface AIRealtimeModelCard extends AIBaseModelCard {
230
230
 
231
231
  export interface AiFullModelCard extends AIBaseModelCard {
232
232
  abilities?: ModelAbilities;
233
+ config?: AiModelConfig;
233
234
  contextWindowTokens?: number;
234
235
  displayName?: string;
235
236
  id: string;
@@ -20,6 +20,7 @@ export interface ServerModelProviderConfig {
20
20
  export type ServerLanguageModel = Partial<Record<GlobalLLMProviderKey, ServerModelProviderConfig>>;
21
21
 
22
22
  export interface GlobalServerConfig {
23
+ aiProvider?: ServerLanguageModel;
23
24
  defaultAgent?: DeepPartial<UserDefaultAgent>;
24
25
  enableUploadFileToServer?: boolean;
25
26
  enabledAccessCode?: boolean;
@@ -1,4 +1,5 @@
1
1
  import { ModelProviderKey } from '@/libs/agent-runtime';
2
+ import { AiFullModelCard } from '@/types/aiModel';
2
3
  import { ChatModelCard } from '@/types/llm';
3
4
 
4
5
  export interface ProviderConfig {
@@ -27,6 +28,7 @@ export interface ProviderConfig {
27
28
  * fetched models from provider side
28
29
  */
29
30
  remoteModelCards?: ChatModelCard[];
31
+ serverModelLists?: AiFullModelCard[];
30
32
  }
31
33
 
32
34
  export type GlobalLLMProviderKey = ModelProviderKey;
@@ -4,16 +4,22 @@ exports[`parseModelString > custom deletion, addition, and renaming of models 1`
4
4
  {
5
5
  "add": [
6
6
  {
7
+ "abilities": {},
7
8
  "displayName": undefined,
8
9
  "id": "llama",
10
+ "type": "chat",
9
11
  },
10
12
  {
13
+ "abilities": {},
11
14
  "displayName": undefined,
12
15
  "id": "claude-2",
16
+ "type": "chat",
13
17
  },
14
18
  {
19
+ "abilities": {},
15
20
  "displayName": "gpt-4-32k",
16
21
  "id": "gpt-4-1106-preview",
22
+ "type": "chat",
17
23
  },
18
24
  ],
19
25
  "removeAll": true,
@@ -28,8 +34,10 @@ exports[`parseModelString > duplicate naming model 1`] = `
28
34
  {
29
35
  "add": [
30
36
  {
37
+ "abilities": {},
31
38
  "displayName": "gpt-4-32k",
32
39
  "id": "gpt-4-1106-preview",
40
+ "type": "chat",
33
41
  },
34
42
  ],
35
43
  "removeAll": false,
@@ -41,12 +49,16 @@ exports[`parseModelString > empty string model 1`] = `
41
49
  {
42
50
  "add": [
43
51
  {
52
+ "abilities": {},
44
53
  "displayName": "gpt-4-turbo",
45
54
  "id": "gpt-4-1106-preview",
55
+ "type": "chat",
46
56
  },
47
57
  {
58
+ "abilities": {},
48
59
  "displayName": undefined,
49
60
  "id": "claude-2",
61
+ "type": "chat",
50
62
  },
51
63
  ],
52
64
  "removeAll": false,
@@ -58,20 +70,28 @@ exports[`parseModelString > only add the model 1`] = `
58
70
  {
59
71
  "add": [
60
72
  {
73
+ "abilities": {},
61
74
  "displayName": undefined,
62
75
  "id": "model1",
76
+ "type": "chat",
63
77
  },
64
78
  {
79
+ "abilities": {},
65
80
  "displayName": undefined,
66
81
  "id": "model2",
82
+ "type": "chat",
67
83
  },
68
84
  {
85
+ "abilities": {},
69
86
  "displayName": undefined,
70
87
  "id": "model3",
88
+ "type": "chat",
71
89
  },
72
90
  {
91
+ "abilities": {},
73
92
  "displayName": undefined,
74
93
  "id": "model4",
94
+ "type": "chat",
75
95
  },
76
96
  ],
77
97
  "removeAll": false,
@@ -82,31 +102,43 @@ exports[`parseModelString > only add the model 1`] = `
82
102
  exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = `
83
103
  [
84
104
  {
105
+ "abilities": {
106
+ "files": true,
107
+ "functionCall": true,
108
+ },
85
109
  "contextWindowTokens": 128000,
86
110
  "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
87
111
  "displayName": "ChatGPT-4",
88
112
  "enabled": true,
89
- "files": true,
90
- "functionCall": true,
91
113
  "id": "gpt-4-0125-preview",
92
114
  "pricing": {
93
115
  "input": 10,
94
116
  "output": 30,
95
117
  },
118
+ "providerId": "openai",
119
+ "releasedAt": "2024-01-25",
120
+ "source": "builtin",
121
+ "type": "chat",
96
122
  },
97
123
  {
124
+ "abilities": {
125
+ "files": true,
126
+ "functionCall": true,
127
+ "vision": true,
128
+ },
98
129
  "contextWindowTokens": 128000,
99
130
  "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
100
131
  "displayName": "ChatGPT-4 Vision",
101
132
  "enabled": true,
102
- "files": true,
103
- "functionCall": true,
104
133
  "id": "gpt-4-turbo-2024-04-09",
105
134
  "pricing": {
106
135
  "input": 10,
107
136
  "output": 30,
108
137
  },
109
- "vision": true,
138
+ "providerId": "openai",
139
+ "releasedAt": "2024-04-09",
140
+ "source": "builtin",
141
+ "type": "chat",
110
142
  },
111
143
  ]
112
144
  `;
@@ -0,0 +1,112 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`parseModelString > custom deletion, addition, and renaming of models 1`] = `
4
+ {
5
+ "add": [
6
+ {
7
+ "displayName": undefined,
8
+ "id": "llama",
9
+ },
10
+ {
11
+ "displayName": undefined,
12
+ "id": "claude-2",
13
+ },
14
+ {
15
+ "displayName": "gpt-4-32k",
16
+ "id": "gpt-4-1106-preview",
17
+ },
18
+ ],
19
+ "removeAll": true,
20
+ "removed": [
21
+ "all",
22
+ "gpt-3.5-turbo",
23
+ ],
24
+ }
25
+ `;
26
+
27
+ exports[`parseModelString > duplicate naming model 1`] = `
28
+ {
29
+ "add": [
30
+ {
31
+ "displayName": "gpt-4-32k",
32
+ "id": "gpt-4-1106-preview",
33
+ },
34
+ ],
35
+ "removeAll": false,
36
+ "removed": [],
37
+ }
38
+ `;
39
+
40
+ exports[`parseModelString > empty string model 1`] = `
41
+ {
42
+ "add": [
43
+ {
44
+ "displayName": "gpt-4-turbo",
45
+ "id": "gpt-4-1106-preview",
46
+ },
47
+ {
48
+ "displayName": undefined,
49
+ "id": "claude-2",
50
+ },
51
+ ],
52
+ "removeAll": false,
53
+ "removed": [],
54
+ }
55
+ `;
56
+
57
+ exports[`parseModelString > only add the model 1`] = `
58
+ {
59
+ "add": [
60
+ {
61
+ "displayName": undefined,
62
+ "id": "model1",
63
+ },
64
+ {
65
+ "displayName": undefined,
66
+ "id": "model2",
67
+ },
68
+ {
69
+ "displayName": undefined,
70
+ "id": "model3",
71
+ },
72
+ {
73
+ "displayName": undefined,
74
+ "id": "model4",
75
+ },
76
+ ],
77
+ "removeAll": false,
78
+ "removed": [],
79
+ }
80
+ `;
81
+
82
+ exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = `
83
+ [
84
+ {
85
+ "contextWindowTokens": 128000,
86
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
87
+ "displayName": "ChatGPT-4",
88
+ "enabled": true,
89
+ "files": true,
90
+ "functionCall": true,
91
+ "id": "gpt-4-0125-preview",
92
+ "pricing": {
93
+ "input": 10,
94
+ "output": 30,
95
+ },
96
+ },
97
+ {
98
+ "contextWindowTokens": 128000,
99
+ "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
100
+ "displayName": "ChatGPT-4 Vision",
101
+ "enabled": true,
102
+ "files": true,
103
+ "functionCall": true,
104
+ "id": "gpt-4-turbo-2024-04-09",
105
+ "pricing": {
106
+ "input": 10,
107
+ "output": 30,
108
+ },
109
+ "vision": true,
110
+ },
111
+ ]
112
+ `;
@@ -0,0 +1,276 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { LOBE_DEFAULT_MODEL_LIST, OpenAIProviderCard } from '@/config/modelProviders';
4
+ import { ChatModelCard } from '@/types/llm';
5
+
6
+ import { parseModelString, transformToChatModelCards } from './parseModels';
7
+
8
+ describe('parseModelString', () => {
9
+ it('custom deletion, addition, and renaming of models', () => {
10
+ const result = parseModelString(
11
+ '-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k',
12
+ );
13
+
14
+ expect(result).toMatchSnapshot();
15
+ });
16
+
17
+ it('duplicate naming model', () => {
18
+ const result = parseModelString('gpt-4-1106-preview=gpt-4-turbo,gpt-4-1106-preview=gpt-4-32k');
19
+ expect(result).toMatchSnapshot();
20
+ });
21
+
22
+ it('only add the model', () => {
23
+ const result = parseModelString('model1,model2,model3,model4');
24
+
25
+ expect(result).toMatchSnapshot();
26
+ });
27
+
28
+ it('empty string model', () => {
29
+ const result = parseModelString('gpt-4-1106-preview=gpt-4-turbo,, ,\n ,+claude-2');
30
+ expect(result).toMatchSnapshot();
31
+ });
32
+
33
+ describe('extension capabilities', () => {
34
+ it('with token', () => {
35
+ const result = parseModelString('chatglm-6b=ChatGLM 6B<4096>');
36
+
37
+ expect(result.add[0]).toEqual({
38
+ displayName: 'ChatGLM 6B',
39
+ id: 'chatglm-6b',
40
+ contextWindowTokens: 4096,
41
+ });
42
+ });
43
+
44
+ it('token and function calling', () => {
45
+ const result = parseModelString('spark-v3.5=讯飞星火 v3.5<8192:fc>');
46
+
47
+ expect(result.add[0]).toEqual({
48
+ displayName: '讯飞星火 v3.5',
49
+ functionCall: true,
50
+ id: 'spark-v3.5',
51
+ contextWindowTokens: 8192,
52
+ });
53
+ });
54
+
55
+ it('multi models', () => {
56
+ const result = parseModelString(
57
+ 'gemini-1.5-flash-latest=Gemini 1.5 Flash<16000:vision>,gpt-4-all=ChatGPT Plus<128000:fc:vision:file>',
58
+ );
59
+
60
+ expect(result.add).toEqual([
61
+ {
62
+ displayName: 'Gemini 1.5 Flash',
63
+ vision: true,
64
+ id: 'gemini-1.5-flash-latest',
65
+ contextWindowTokens: 16000,
66
+ },
67
+ {
68
+ displayName: 'ChatGPT Plus',
69
+ vision: true,
70
+ functionCall: true,
71
+ files: true,
72
+ id: 'gpt-4-all',
73
+ contextWindowTokens: 128000,
74
+ },
75
+ ]);
76
+ });
77
+
78
+ it('should have file with builtin models like gpt-4-0125-preview', () => {
79
+ const result = parseModelString(
80
+ '-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
81
+ );
82
+ expect(result.add).toEqual([
83
+ {
84
+ displayName: 'ChatGPT-4',
85
+ files: true,
86
+ functionCall: true,
87
+ id: 'gpt-4-0125-preview',
88
+ contextWindowTokens: 128000,
89
+ },
90
+ {
91
+ displayName: 'ChatGPT-4 Vision',
92
+ files: true,
93
+ functionCall: true,
94
+ id: 'gpt-4-turbo-2024-04-09',
95
+ contextWindowTokens: 128000,
96
+ vision: true,
97
+ },
98
+ ]);
99
+ });
100
+
101
+ it('should handle empty extension capability value', () => {
102
+ const result = parseModelString('model1<1024:>');
103
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
104
+ });
105
+
106
+ it('should handle empty extension capability name', () => {
107
+ const result = parseModelString('model1<1024::file>');
108
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, files: true });
109
+ });
110
+
111
+ it('should handle duplicate extension capabilities', () => {
112
+ const result = parseModelString('model1<1024:vision:vision>');
113
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, vision: true });
114
+ });
115
+
116
+ it('should handle case-sensitive extension capability names', () => {
117
+ const result = parseModelString('model1<1024:VISION:FC:file>');
118
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, files: true });
119
+ });
120
+
121
+ it('should handle case-sensitive extension capability values', () => {
122
+ const result = parseModelString('model1<1024:vision:Fc:File>');
123
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024, vision: true });
124
+ });
125
+
126
+ it('should handle empty angle brackets', () => {
127
+ const result = parseModelString('model1<>');
128
+ expect(result.add[0]).toEqual({ id: 'model1' });
129
+ });
130
+
131
+ it('should handle not close angle brackets', () => {
132
+ const result = parseModelString('model1<,model2');
133
+ expect(result.add).toEqual([{ id: 'model1' }, { id: 'model2' }]);
134
+ });
135
+
136
+ it('should handle multi close angle brackets', () => {
137
+ const result = parseModelString('model1<>>,model2');
138
+ expect(result.add).toEqual([{ id: 'model1' }, { id: 'model2' }]);
139
+ });
140
+
141
+ it('should handle only colon inside angle brackets', () => {
142
+ const result = parseModelString('model1<:>');
143
+ expect(result.add[0]).toEqual({ id: 'model1' });
144
+ });
145
+
146
+ it('should handle only non-digit characters inside angle brackets', () => {
147
+ const result = parseModelString('model1<abc>');
148
+ expect(result.add[0]).toEqual({ id: 'model1' });
149
+ });
150
+
151
+ it('should handle non-digit characters followed by digits inside angle brackets', () => {
152
+ const result = parseModelString('model1<abc123>');
153
+ expect(result.add[0]).toEqual({ id: 'model1' });
154
+ });
155
+
156
+ it('should handle digits followed by non-colon characters inside angle brackets', () => {
157
+ const result = parseModelString('model1<1024abc>');
158
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
159
+ });
160
+
161
+ it('should handle digits followed by multiple colons inside angle brackets', () => {
162
+ const result = parseModelString('model1<1024::>');
163
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
164
+ });
165
+
166
+ it('should handle digits followed by a colon and non-letter characters inside angle brackets', () => {
167
+ const result = parseModelString('model1<1024:123>');
168
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
169
+ });
170
+
171
+ it('should handle digits followed by a colon and spaces inside angle brackets', () => {
172
+ const result = parseModelString('model1<1024: vision>');
173
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
174
+ });
175
+
176
+ it('should handle digits followed by multiple colons and spaces inside angle brackets', () => {
177
+ const result = parseModelString('model1<1024: : vision>');
178
+ expect(result.add[0]).toEqual({ id: 'model1', contextWindowTokens: 1024 });
179
+ });
180
+ });
181
+
182
+ describe('deployment name', () => {
183
+ it('should have same deployment name as id', () => {
184
+ const result = parseModelString('model1=Model 1', true);
185
+ expect(result.add[0]).toEqual({
186
+ id: 'model1',
187
+ displayName: 'Model 1',
188
+ deploymentName: 'model1',
189
+ });
190
+ });
191
+
192
+ it('should have diff deployment name as id', () => {
193
+ const result = parseModelString('gpt-35-turbo->my-deploy=GPT 3.5 Turbo', true);
194
+ expect(result.add[0]).toEqual({
195
+ id: 'gpt-35-turbo',
196
+ displayName: 'GPT 3.5 Turbo',
197
+ deploymentName: 'my-deploy',
198
+ });
199
+ });
200
+ });
201
+ });
202
+
203
+ describe('transformToChatModelCards', () => {
204
+ const defaultChatModels: ChatModelCard[] = [
205
+ { id: 'model1', displayName: 'Model 1', enabled: true },
206
+ { id: 'model2', displayName: 'Model 2', enabled: false },
207
+ ];
208
+
209
+ it('should return undefined when modelString is empty', () => {
210
+ const result = transformToChatModelCards({
211
+ modelString: '',
212
+ defaultChatModels,
213
+ });
214
+ expect(result).toBeUndefined();
215
+ });
216
+
217
+ it('should remove all models when removeAll is true', () => {
218
+ const result = transformToChatModelCards({
219
+ modelString: '-all',
220
+ defaultChatModels,
221
+ });
222
+ expect(result).toEqual([]);
223
+ });
224
+
225
+ it('should remove specified models', () => {
226
+ const result = transformToChatModelCards({
227
+ modelString: '-model1',
228
+ defaultChatModels,
229
+ });
230
+ expect(result).toEqual([{ id: 'model2', displayName: 'Model 2', enabled: false }]);
231
+ });
232
+
233
+ it('should add a new known model', () => {
234
+ const knownModel = LOBE_DEFAULT_MODEL_LIST[0];
235
+ const result = transformToChatModelCards({
236
+ modelString: `${knownModel.id}`,
237
+ defaultChatModels,
238
+ });
239
+ expect(result).toContainEqual({
240
+ ...knownModel,
241
+ displayName: knownModel.displayName || knownModel.id,
242
+ enabled: true,
243
+ });
244
+ });
245
+
246
+ it('should update an existing known model', () => {
247
+ const knownModel = LOBE_DEFAULT_MODEL_LIST[0];
248
+ const result = transformToChatModelCards({
249
+ modelString: `+${knownModel.id}=Updated Model`,
250
+ defaultChatModels: [knownModel],
251
+ });
252
+ expect(result![0]).toEqual({ ...knownModel, displayName: 'Updated Model', enabled: true });
253
+ });
254
+
255
+ it('should add a new custom model', () => {
256
+ const result = transformToChatModelCards({
257
+ modelString: '+custom_model=Custom Model',
258
+ defaultChatModels,
259
+ });
260
+ expect(result).toContainEqual({
261
+ id: 'custom_model',
262
+ displayName: 'Custom Model',
263
+ enabled: true,
264
+ });
265
+ });
266
+
267
+ it('should have file with builtin models like gpt-4-0125-preview', () => {
268
+ const result = transformToChatModelCards({
269
+ modelString:
270
+ '-all,+gpt-4-0125-preview=ChatGPT-4<128000:fc:file>,+gpt-4-turbo-2024-04-09=ChatGPT-4 Vision<128000:fc:vision:file>',
271
+ defaultChatModels: OpenAIProviderCard.chatModels,
272
+ });
273
+
274
+ expect(result).toMatchSnapshot();
275
+ });
276
+ });