@lobehub/chat 1.45.5 → 1.45.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/modelProvider.json +2 -2
  4. package/locales/bg-BG/modelProvider.json +2 -2
  5. package/locales/de-DE/modelProvider.json +2 -2
  6. package/locales/en-US/modelProvider.json +2 -2
  7. package/locales/es-ES/modelProvider.json +2 -2
  8. package/locales/fa-IR/modelProvider.json +2 -2
  9. package/locales/fr-FR/modelProvider.json +2 -2
  10. package/locales/it-IT/modelProvider.json +2 -2
  11. package/locales/ja-JP/modelProvider.json +2 -2
  12. package/locales/ko-KR/modelProvider.json +2 -2
  13. package/locales/nl-NL/modelProvider.json +2 -2
  14. package/locales/pl-PL/modelProvider.json +2 -2
  15. package/locales/pt-BR/modelProvider.json +2 -2
  16. package/locales/ru-RU/modelProvider.json +2 -2
  17. package/locales/tr-TR/modelProvider.json +2 -2
  18. package/locales/vi-VN/modelProvider.json +2 -2
  19. package/locales/zh-CN/modelProvider.json +2 -2
  20. package/locales/zh-TW/modelProvider.json +2 -2
  21. package/package.json +3 -3
  22. package/src/app/(main)/chat/(workspace)/features/TelemetryNotification.tsx +1 -1
  23. package/src/app/(main)/files/(content)/@menu/features/KnowledgeBase/EmptyStatus.tsx +1 -1
  24. package/src/app/(main)/files/[id]/Header.tsx +1 -1
  25. package/src/app/(main)/settings/provider/features/CreateNewProvider/index.tsx +1 -1
  26. package/src/app/(main)/settings/sync/features/WebRTC/SyncSwitch/index.tsx +7 -7
  27. package/src/components/BubblesLoading/index.tsx +3 -3
  28. package/src/config/aiModels/index.ts +38 -0
  29. package/src/config/modelProviders/index.ts +3 -0
  30. package/src/database/repositories/aiInfra/index.ts +3 -1
  31. package/src/features/Conversation/Messages/Assistant/FileChunks/index.tsx +1 -1
  32. package/src/features/Conversation/components/History/index.tsx +1 -1
  33. package/src/features/InitClientDB/PGliteIcon.tsx +1 -1
  34. package/src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap +6 -0
  35. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +94 -23
  36. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +21 -6
  37. package/src/locales/default/modelProvider.ts +2 -2
  38. package/src/migrations/FromV3ToV4/index.ts +1 -1
  39. package/src/server/globalConfig/{genServerLLMConfig.test.ts → _deprecated.test.ts} +2 -4
  40. package/src/server/globalConfig/{genServerLLMConfig.ts → _deprecated.ts} +1 -1
  41. package/src/server/globalConfig/genServerAiProviderConfig.ts +42 -0
  42. package/src/server/globalConfig/index.ts +23 -1
  43. package/src/server/routers/lambda/aiModel.ts +2 -2
  44. package/src/server/routers/lambda/aiProvider.ts +2 -2
  45. package/src/types/aiModel.ts +1 -0
  46. package/src/types/serverConfig.ts +1 -0
  47. package/src/types/user/settings/modelProvider.ts +2 -0
  48. package/src/utils/__snapshots__/parseModels.test.ts.snap +37 -5
  49. package/src/utils/_deprecated/__snapshots__/parseModels.test.ts.snap +112 -0
  50. package/src/utils/_deprecated/parseModels.test.ts +276 -0
  51. package/src/utils/_deprecated/parseModels.ts +161 -0
  52. package/src/utils/fetch/__tests__/fetchSSE.test.ts +1 -1
  53. package/src/utils/parseModels.test.ts +153 -46
  54. package/src/utils/parseModels.ts +34 -21
  55. package/tests/setup-db.ts +0 -3
@@ -65,7 +65,7 @@ const FileChunks = memo<FileChunksProps>(({ data }) => {
65
65
  {showDetail && (
66
66
  <Flexbox gap={8} horizontal wrap={'wrap'}>
67
67
  {data.map((item, index) => {
68
- return <ChunkItem index={index} key={item.id} {...item}></ChunkItem>;
68
+ return <ChunkItem index={index} key={item.id} {...item} />;
69
69
  })}
70
70
  </Flexbox>
71
71
  )}
@@ -57,7 +57,7 @@ const History = memo(() => {
57
57
  </Flexbox>
58
58
  <Flexbox align={'flex-start'} gap={8} horizontal>
59
59
  <Flexbox align={'center'} padding={8} width={20}>
60
- <div className={styles.line}></div>
60
+ <div className={styles.line} />
61
61
  </Flexbox>
62
62
  <Markdown className={styles.content} variant={'chat'}>
63
63
  {content}
@@ -20,7 +20,7 @@ const PGliteIcon: IconType = forwardRef(({ size = '1em', style, ...rest }, ref)
20
20
  <path
21
21
  clip-rule="evenodd"
22
22
  d="M941.581 335.737v460.806c0 15.926-12.913 28.836-28.832 28.818l-115.283-.137c-15.243-.018-27.706-11.88-28.703-26.877.011-.569.018-1.138.018-1.711l-.004-172.904c0-47.745-38.736-86.451-86.454-86.451-46.245 0-84.052-36.359-86.342-82.068V191.496l201.708.149c79.484.058 143.892 64.553 143.892 144.092zm-576-144.281v201.818c0 47.746 38.682 86.456 86.4 86.456h86.4v-5.796c0 66.816 54.13 120.98 120.902 120.98 28.617 0 51.815 23.213 51.815 51.848v149.644c0 .688.011 1.372.025 2.057-.943 15.065-13.453 26.992-28.746 26.992l-144.982-.007.986-201.586c.079-15.915-12.755-28.88-28.66-28.959-15.904-.079-28.861 12.763-28.94 28.678l-.986 201.741v.118l-172.174-.01V623.722c0-15.915-12.895-28.819-28.8-28.819-15.906 0-28.8 12.904-28.8 28.819v201.704l-143.642-.007c-15.905-.004-28.798-12.904-28.798-28.819V335.547c0-79.58 64.471-144.093 144.001-144.092l143.999.001zm446.544 173.693c0-23.874-19.343-43.228-43.2-43.228-23.861 0-43.2 19.354-43.2 43.228 0 23.875 19.339 43.226 43.2 43.226 23.857 0 43.2-19.351 43.2-43.226z"
23
- ></path>
23
+ />
24
24
  </svg>
25
25
  );
26
26
  });
@@ -16,15 +16,19 @@ exports[`LobeOpenAI > models > should get models 1`] = `
16
16
  },
17
17
  {
18
18
  "id": "gpt-3.5-turbo-16k",
19
+ "releasedAt": "2023-05-10",
19
20
  },
20
21
  {
21
22
  "id": "gpt-3.5-turbo-16k-0613",
23
+ "releasedAt": "2023-05-30",
22
24
  },
23
25
  {
24
26
  "id": "gpt-4-1106-vision-preview",
27
+ "releasedAt": "2024-03-26",
25
28
  },
26
29
  {
27
30
  "id": "gpt-3.5-turbo-instruct-0914",
31
+ "releasedAt": "2023-09-07",
28
32
  },
29
33
  {
30
34
  "contextWindowTokens": 128000,
@@ -63,9 +67,11 @@ exports[`LobeOpenAI > models > should get models 1`] = `
63
67
  },
64
68
  {
65
69
  "id": "gpt-3.5-turbo-0301",
70
+ "releasedAt": "2023-03-01",
66
71
  },
67
72
  {
68
73
  "id": "gpt-3.5-turbo-0613",
74
+ "releasedAt": "2023-06-12",
69
75
  },
70
76
  {
71
77
  "contextWindowTokens": 16385,
@@ -1,7 +1,6 @@
1
1
  // @vitest-environment node
2
2
  import OpenAI from 'openai';
3
3
  import type { Stream } from 'openai/streaming';
4
-
5
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
5
 
7
6
  import {
@@ -11,6 +10,7 @@ import {
11
10
  LobeOpenAICompatibleRuntime,
12
11
  ModelProvider,
13
12
  } from '@/libs/agent-runtime';
13
+ import officalOpenAIModels from '@/libs/agent-runtime/openai/fixtures/openai-models.json';
14
14
  import { sleep } from '@/utils/sleep';
15
15
 
16
16
  import * as debugStreamModule from '../debugStream';
@@ -802,26 +802,29 @@ describe('LobeOpenAICompatibleFactory', () => {
802
802
 
803
803
  it('should use custom stream handler when provided', async () => {
804
804
  // Create a custom stream handler that handles both ReadableStream and OpenAI Stream
805
- const customStreamHandler = vi.fn((stream: ReadableStream | Stream<OpenAI.ChatCompletionChunk>) => {
806
- const readableStream = stream instanceof ReadableStream ? stream : stream.toReadableStream();
807
- return new ReadableStream({
808
- start(controller) {
809
- const reader = readableStream.getReader();
810
- const process = async () => {
811
- try {
812
- while (true) {
813
- const { done, value } = await reader.read();
814
- if (done) break;
815
- controller.enqueue(value);
805
+ const customStreamHandler = vi.fn(
806
+ (stream: ReadableStream | Stream<OpenAI.ChatCompletionChunk>) => {
807
+ const readableStream =
808
+ stream instanceof ReadableStream ? stream : stream.toReadableStream();
809
+ return new ReadableStream({
810
+ start(controller) {
811
+ const reader = readableStream.getReader();
812
+ const process = async () => {
813
+ try {
814
+ while (true) {
815
+ const { done, value } = await reader.read();
816
+ if (done) break;
817
+ controller.enqueue(value);
818
+ }
819
+ } finally {
820
+ controller.close();
816
821
  }
817
- } finally {
818
- controller.close();
819
- }
820
- };
821
- process();
822
- },
823
- });
824
- });
822
+ };
823
+ process();
824
+ },
825
+ });
826
+ },
827
+ );
825
828
 
826
829
  const LobeMockProvider = LobeOpenAICompatibleFactory({
827
830
  baseURL: 'https://api.test.com/v1',
@@ -897,10 +900,10 @@ describe('LobeOpenAICompatibleFactory', () => {
897
900
  choices: [
898
901
  {
899
902
  index: 0,
900
- message: {
901
- role: 'assistant',
903
+ message: {
904
+ role: 'assistant',
902
905
  content: 'Test response',
903
- refusal: null
906
+ refusal: null,
904
907
  },
905
908
  logprobs: null,
906
909
  finish_reason: 'stop',
@@ -969,4 +972,72 @@ describe('LobeOpenAICompatibleFactory', () => {
969
972
  });
970
973
  });
971
974
  });
975
+
976
+ describe('models', () => {
977
+ it('should get models with third party model list', async () => {
978
+ vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
979
+ data: [
980
+ { id: 'gpt-4o', object: 'model', created: 1698218177 },
981
+ { id: 'claude-3-haiku-20240307', object: 'model' },
982
+ { id: 'gpt-4o-mini', object: 'model', created: 1698318177 * 1000 },
983
+ { id: 'gemini', object: 'model', created: 1736499509125 },
984
+ ],
985
+ } as any);
986
+
987
+ const list = await instance.models();
988
+
989
+ expect(list).toEqual([
990
+ {
991
+ contextWindowTokens: 128000,
992
+ releasedAt: '2023-10-25',
993
+ description:
994
+ 'ChatGPT-4o 是一款动态模型,实时更新以保持当前最新版本。它结合了强大的语言理解与生成能力,适合于大规模应用场景,包括客户服务、教育和技术支持。',
995
+ displayName: 'GPT-4o',
996
+ enabled: true,
997
+ functionCall: true,
998
+ id: 'gpt-4o',
999
+ pricing: {
1000
+ input: 2.5,
1001
+ output: 10,
1002
+ },
1003
+ vision: true,
1004
+ },
1005
+ {
1006
+ contextWindowTokens: 200000,
1007
+ description:
1008
+ 'Claude 3 Haiku 是 Anthropic 的最快且最紧凑的模型,旨在实现近乎即时的响应。它具有快速且准确的定向性能。',
1009
+ displayName: 'Claude 3 Haiku',
1010
+ functionCall: true,
1011
+ id: 'claude-3-haiku-20240307',
1012
+ maxOutput: 4096,
1013
+ pricing: {
1014
+ input: 0.25,
1015
+ output: 1.25,
1016
+ },
1017
+ releasedAt: '2024-03-07',
1018
+ vision: true,
1019
+ },
1020
+ {
1021
+ contextWindowTokens: 128000,
1022
+ description:
1023
+ 'GPT-4o mini是OpenAI在GPT-4 Omni之后推出的最新模型,支持图文输入并输出文本。作为他们最先进的小型模型,它比其他近期的前沿模型便宜很多,并且比GPT-3.5 Turbo便宜超过60%。它保持了最先进的智能,同时具有显著的性价比。GPT-4o mini在MMLU测试中获得了 82% 的得分,目前在聊天偏好上排名高于 GPT-4。',
1024
+ displayName: 'GPT-4o mini',
1025
+ enabled: true,
1026
+ functionCall: true,
1027
+ id: 'gpt-4o-mini',
1028
+ maxOutput: 16385,
1029
+ pricing: {
1030
+ input: 0.15,
1031
+ output: 0.6,
1032
+ },
1033
+ releasedAt: '2023-10-26',
1034
+ vision: true,
1035
+ },
1036
+ {
1037
+ id: 'gemini',
1038
+ releasedAt: '2025-01-10',
1039
+ },
1040
+ ]);
1041
+ });
1042
+ });
972
1043
  });
@@ -279,19 +279,34 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
279
279
  return models.transformModel(item);
280
280
  }
281
281
 
282
+ const toReleasedAt = () => {
283
+ if (!item.created) return;
284
+
285
+ // guarantee item.created in Date String format
286
+ if (
287
+ typeof (item.created as any) === 'string' ||
288
+ // or in milliseconds
289
+ item.created.toFixed(0).length === 13
290
+ ) {
291
+ return dayjs.utc(item.created).format('YYYY-MM-DD');
292
+ }
293
+
294
+ // by default, the created time is in seconds
295
+ return dayjs.utc(item.created * 1000).format('YYYY-MM-DD');
296
+ };
297
+
298
+ // TODO: should refactor after remove v1 user/modelList code
282
299
  const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === item.id);
283
300
 
284
301
  if (knownModel) {
285
302
  dayjs.extend(utc);
286
303
 
287
- return {
288
- ...knownModel,
289
- releasedAt:
290
- knownModel.releasedAt ?? dayjs.utc(item.created * 1000).format('YYYY-MM-DD'),
291
- };
304
+ const releasedAt = knownModel.releasedAt ?? toReleasedAt();
305
+
306
+ return { ...knownModel, releasedAt };
292
307
  }
293
308
 
294
- return { id: item.id };
309
+ return { id: item.id, releasedAt: toReleasedAt() };
295
310
  })
296
311
 
297
312
  .filter(Boolean) as ChatModelCard[];
@@ -80,7 +80,7 @@ export default {
80
80
  },
81
81
  id: {
82
82
  desc: '作为服务商唯一标识,创建后将不可修改',
83
- format: '只能包含小写字母、连字符(-)和下划线(_)',
83
+ format: '只能包含数字、小写字母、连字符(-)和下划线(_)',
84
84
  placeholder: '例如 openai、gemini 等',
85
85
  required: '请填写服务商 ID',
86
86
  title: '服务商 ID',
@@ -152,7 +152,7 @@ export default {
152
152
  title: '正在下载模型 {{model}} ',
153
153
  },
154
154
  endpoint: {
155
- desc: '填入 Ollama 接口代理地址,本地未额外指定可留空',
155
+ desc: '必须包含http(s)://,本地未额外指定可留空',
156
156
  title: 'Ollama 服务地址',
157
157
  },
158
158
  setup: {
@@ -1,5 +1,5 @@
1
1
  import type { Migration, MigrationData } from '@/migrations/VersionController';
2
- import { transformToChatModelCards } from '@/utils/parseModels';
2
+ import { transformToChatModelCards } from '@/utils/_deprecated/parseModels';
3
3
 
4
4
  import { V3ConfigState, V3LegacyConfig, V3OpenAIConfig, V3Settings } from './types/v3';
5
5
  import { V4AzureOpenAIConfig, V4ConfigState, V4ProviderConfig, V4Settings } from './types/v4';
@@ -1,8 +1,6 @@
1
1
  import { describe, expect, it, vi } from 'vitest';
2
2
 
3
- import { getLLMConfig } from '@/config/llm';
4
-
5
- import { genServerLLMConfig } from './genServerLLMConfig';
3
+ import { genServerLLMConfig } from './_deprecated';
6
4
 
7
5
  // Mock ModelProvider enum
8
6
  vi.mock('@/libs/agent-runtime', () => ({
@@ -40,7 +38,7 @@ vi.mock('@/config/llm', () => ({
40
38
  }));
41
39
 
42
40
  // Mock parse models utils
43
- vi.mock('@/utils/parseModels', () => ({
41
+ vi.mock('@/utils/_deprecated/parseModels', () => ({
44
42
  extractEnabledModels: (modelString: string, withDeploymentName?: boolean) => {
45
43
  // Returns different format if withDeploymentName is true
46
44
  return withDeploymentName ? [`${modelString}_withDeployment`] : [modelString];
@@ -2,7 +2,7 @@ import { getLLMConfig } from '@/config/llm';
2
2
  import * as ProviderCards from '@/config/modelProviders';
3
3
  import { ModelProvider } from '@/libs/agent-runtime';
4
4
  import { ModelProviderCard } from '@/types/llm';
5
- import { extractEnabledModels, transformToChatModelCards } from '@/utils/parseModels';
5
+ import { extractEnabledModels, transformToChatModelCards } from '@/utils/_deprecated/parseModels';
6
6
 
7
7
  export const genServerLLMConfig = (specificConfig: Record<any, any>) => {
8
8
  const llmConfig = getLLMConfig() as Record<string, any>;
@@ -0,0 +1,42 @@
1
+ import * as AiModels from '@/config/aiModels';
2
+ import { getLLMConfig } from '@/config/llm';
3
+ import { ModelProvider } from '@/libs/agent-runtime';
4
+ import { AiFullModelCard } from '@/types/aiModel';
5
+ import { ProviderConfig } from '@/types/user/settings';
6
+ import { extractEnabledModels, transformToAiChatModelList } from '@/utils/parseModels';
7
+
8
+ export const genServerAiProvidersConfig = (specificConfig: Record<any, any>) => {
9
+ const llmConfig = getLLMConfig() as Record<string, any>;
10
+
11
+ return Object.values(ModelProvider).reduce(
12
+ (config, provider) => {
13
+ const providerUpperCase = provider.toUpperCase();
14
+ const providerCard = AiModels[provider] as AiFullModelCard[];
15
+ const providerConfig = specificConfig[provider as keyof typeof specificConfig] || {};
16
+ const providerModelList =
17
+ process.env[providerConfig.modelListKey ?? `${providerUpperCase}_MODEL_LIST`];
18
+
19
+ const defaultChatModels = providerCard.filter((c) => c.type === 'chat');
20
+
21
+ config[provider] = {
22
+ enabled: llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
23
+ enabledModels: extractEnabledModels(
24
+ providerModelList,
25
+ providerConfig.withDeploymentName || false,
26
+ ),
27
+ serverModelLists: transformToAiChatModelList({
28
+ defaultChatModels: defaultChatModels || [],
29
+ modelString: providerModelList,
30
+ providerId: provider,
31
+ withDeploymentName: providerConfig.withDeploymentName || false,
32
+ }),
33
+ ...(providerConfig.fetchOnClient !== undefined && {
34
+ fetchOnClient: providerConfig.fetchOnClient,
35
+ }),
36
+ };
37
+
38
+ return config;
39
+ },
40
+ {} as Record<string, ProviderConfig>,
41
+ );
42
+ };
@@ -6,19 +6,41 @@ import { enableNextAuth } from '@/const/auth';
6
6
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
7
7
  import { GlobalServerConfig } from '@/types/serverConfig';
8
8
 
9
- import { genServerLLMConfig } from './genServerLLMConfig';
9
+ import { genServerLLMConfig } from './_deprecated';
10
+ import { genServerAiProvidersConfig } from './genServerAiProviderConfig';
10
11
  import { parseAgentConfig } from './parseDefaultAgent';
11
12
 
12
13
  export const getServerGlobalConfig = () => {
13
14
  const { ACCESS_CODES, DEFAULT_AGENT_CONFIG } = getAppConfig();
14
15
 
15
16
  const config: GlobalServerConfig = {
17
+ aiProvider: genServerAiProvidersConfig({
18
+ azure: {
19
+ enabledKey: 'ENABLED_AZURE_OPENAI',
20
+ withDeploymentName: true,
21
+ },
22
+ bedrock: {
23
+ enabledKey: 'ENABLED_AWS_BEDROCK',
24
+ modelListKey: 'AWS_BEDROCK_MODEL_LIST',
25
+ },
26
+ giteeai: {
27
+ enabledKey: 'ENABLED_GITEE_AI',
28
+ modelListKey: 'GITEE_AI_MODEL_LIST',
29
+ },
30
+ ollama: {
31
+ fetchOnClient: !process.env.OLLAMA_PROXY_URL,
32
+ },
33
+ }),
16
34
  defaultAgent: {
17
35
  config: parseAgentConfig(DEFAULT_AGENT_CONFIG),
18
36
  },
19
37
  enableUploadFileToServer: !!fileEnv.S3_SECRET_ACCESS_KEY,
20
38
  enabledAccessCode: ACCESS_CODES?.length > 0,
39
+
21
40
  enabledOAuthSSO: enableNextAuth,
41
+ /**
42
+ * @deprecated
43
+ */
22
44
  languageModel: genServerLLMConfig({
23
45
  azure: {
24
46
  enabledKey: 'ENABLED_AZURE_OPENAI',
@@ -19,14 +19,14 @@ const aiModelProcedure = authedProcedure.use(async (opts) => {
19
19
  const { ctx } = opts;
20
20
 
21
21
  const gateKeeper = await KeyVaultsGateKeeper.initWithEnvKey();
22
- const { languageModel } = getServerGlobalConfig();
22
+ const { aiProvider } = getServerGlobalConfig();
23
23
 
24
24
  return opts.next({
25
25
  ctx: {
26
26
  aiInfraRepos: new AiInfraRepos(
27
27
  serverDB,
28
28
  ctx.userId,
29
- languageModel as Record<string, ProviderConfig>,
29
+ aiProvider as Record<string, ProviderConfig>,
30
30
  ),
31
31
  aiModelModel: new AiModelModel(serverDB, ctx.userId),
32
32
  gateKeeper,
@@ -18,7 +18,7 @@ import { ProviderConfig } from '@/types/user/settings';
18
18
  const aiProviderProcedure = authedProcedure.use(async (opts) => {
19
19
  const { ctx } = opts;
20
20
 
21
- const { languageModel } = getServerGlobalConfig();
21
+ const { aiProvider } = getServerGlobalConfig();
22
22
 
23
23
  const gateKeeper = await KeyVaultsGateKeeper.initWithEnvKey();
24
24
  return opts.next({
@@ -26,7 +26,7 @@ const aiProviderProcedure = authedProcedure.use(async (opts) => {
26
26
  aiInfraRepos: new AiInfraRepos(
27
27
  serverDB,
28
28
  ctx.userId,
29
- languageModel as Record<string, ProviderConfig>,
29
+ aiProvider as Record<string, ProviderConfig>,
30
30
  ),
31
31
  aiProviderModel: new AiProviderModel(serverDB, ctx.userId),
32
32
  gateKeeper,
@@ -230,6 +230,7 @@ export interface AIRealtimeModelCard extends AIBaseModelCard {
230
230
 
231
231
  export interface AiFullModelCard extends AIBaseModelCard {
232
232
  abilities?: ModelAbilities;
233
+ config?: AiModelConfig;
233
234
  contextWindowTokens?: number;
234
235
  displayName?: string;
235
236
  id: string;
@@ -20,6 +20,7 @@ export interface ServerModelProviderConfig {
20
20
  export type ServerLanguageModel = Partial<Record<GlobalLLMProviderKey, ServerModelProviderConfig>>;
21
21
 
22
22
  export interface GlobalServerConfig {
23
+ aiProvider?: ServerLanguageModel;
23
24
  defaultAgent?: DeepPartial<UserDefaultAgent>;
24
25
  enableUploadFileToServer?: boolean;
25
26
  enabledAccessCode?: boolean;
@@ -1,4 +1,5 @@
1
1
  import { ModelProviderKey } from '@/libs/agent-runtime';
2
+ import { AiFullModelCard } from '@/types/aiModel';
2
3
  import { ChatModelCard } from '@/types/llm';
3
4
 
4
5
  export interface ProviderConfig {
@@ -27,6 +28,7 @@ export interface ProviderConfig {
27
28
  * fetched models from provider side
28
29
  */
29
30
  remoteModelCards?: ChatModelCard[];
31
+ serverModelLists?: AiFullModelCard[];
30
32
  }
31
33
 
32
34
  export type GlobalLLMProviderKey = ModelProviderKey;
@@ -4,16 +4,22 @@ exports[`parseModelString > custom deletion, addition, and renaming of models 1`
4
4
  {
5
5
  "add": [
6
6
  {
7
+ "abilities": {},
7
8
  "displayName": undefined,
8
9
  "id": "llama",
10
+ "type": "chat",
9
11
  },
10
12
  {
13
+ "abilities": {},
11
14
  "displayName": undefined,
12
15
  "id": "claude-2",
16
+ "type": "chat",
13
17
  },
14
18
  {
19
+ "abilities": {},
15
20
  "displayName": "gpt-4-32k",
16
21
  "id": "gpt-4-1106-preview",
22
+ "type": "chat",
17
23
  },
18
24
  ],
19
25
  "removeAll": true,
@@ -28,8 +34,10 @@ exports[`parseModelString > duplicate naming model 1`] = `
28
34
  {
29
35
  "add": [
30
36
  {
37
+ "abilities": {},
31
38
  "displayName": "gpt-4-32k",
32
39
  "id": "gpt-4-1106-preview",
40
+ "type": "chat",
33
41
  },
34
42
  ],
35
43
  "removeAll": false,
@@ -41,12 +49,16 @@ exports[`parseModelString > empty string model 1`] = `
41
49
  {
42
50
  "add": [
43
51
  {
52
+ "abilities": {},
44
53
  "displayName": "gpt-4-turbo",
45
54
  "id": "gpt-4-1106-preview",
55
+ "type": "chat",
46
56
  },
47
57
  {
58
+ "abilities": {},
48
59
  "displayName": undefined,
49
60
  "id": "claude-2",
61
+ "type": "chat",
50
62
  },
51
63
  ],
52
64
  "removeAll": false,
@@ -58,20 +70,28 @@ exports[`parseModelString > only add the model 1`] = `
58
70
  {
59
71
  "add": [
60
72
  {
73
+ "abilities": {},
61
74
  "displayName": undefined,
62
75
  "id": "model1",
76
+ "type": "chat",
63
77
  },
64
78
  {
79
+ "abilities": {},
65
80
  "displayName": undefined,
66
81
  "id": "model2",
82
+ "type": "chat",
67
83
  },
68
84
  {
85
+ "abilities": {},
69
86
  "displayName": undefined,
70
87
  "id": "model3",
88
+ "type": "chat",
71
89
  },
72
90
  {
91
+ "abilities": {},
73
92
  "displayName": undefined,
74
93
  "id": "model4",
94
+ "type": "chat",
75
95
  },
76
96
  ],
77
97
  "removeAll": false,
@@ -82,31 +102,43 @@ exports[`parseModelString > only add the model 1`] = `
82
102
  exports[`transformToChatModelCards > should have file with builtin models like gpt-4-0125-preview 1`] = `
83
103
  [
84
104
  {
105
+ "abilities": {
106
+ "files": true,
107
+ "functionCall": true,
108
+ },
85
109
  "contextWindowTokens": 128000,
86
110
  "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
87
111
  "displayName": "ChatGPT-4",
88
112
  "enabled": true,
89
- "files": true,
90
- "functionCall": true,
91
113
  "id": "gpt-4-0125-preview",
92
114
  "pricing": {
93
115
  "input": 10,
94
116
  "output": 30,
95
117
  },
118
+ "providerId": "openai",
119
+ "releasedAt": "2024-01-25",
120
+ "source": "builtin",
121
+ "type": "chat",
96
122
  },
97
123
  {
124
+ "abilities": {
125
+ "files": true,
126
+ "functionCall": true,
127
+ "vision": true,
128
+ },
98
129
  "contextWindowTokens": 128000,
99
130
  "description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
100
131
  "displayName": "ChatGPT-4 Vision",
101
132
  "enabled": true,
102
- "files": true,
103
- "functionCall": true,
104
133
  "id": "gpt-4-turbo-2024-04-09",
105
134
  "pricing": {
106
135
  "input": 10,
107
136
  "output": 30,
108
137
  },
109
- "vision": true,
138
+ "providerId": "openai",
139
+ "releasedAt": "2024-04-09",
140
+ "source": "builtin",
141
+ "type": "chat",
110
142
  },
111
143
  ]
112
144
  `;