@lobehub/lobehub 2.0.0-next.14 → 2.0.0-next.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/desktop-pr-build.yml +6 -6
- package/.github/workflows/release-desktop-beta.yml +4 -4
- package/.github/workflows/release.yml +1 -2
- package/.github/workflows/test.yml +4 -5
- package/.nvmrc +1 -1
- package/CHANGELOG.md +42 -0
- package/apps/desktop/tsconfig.json +0 -1
- package/changelog/v1.json +14 -0
- package/docs/self-hosting/advanced/feature-flags.mdx +0 -1
- package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +0 -1
- package/e2e/tsconfig.json +0 -1
- package/package.json +58 -58
- package/packages/types/src/serverConfig.ts +2 -6
- package/packages/web-crawler/tsconfig.json +0 -1
- package/src/app/[variants]/(auth)/signup/[[...signup]]/page.tsx +1 -8
- package/src/app/[variants]/(main)/(mobile)/me/(home)/features/UserBanner.tsx +3 -6
- package/src/app/[variants]/(main)/labs/components/LabCard.tsx +3 -1
- package/src/app/[variants]/(main)/settings/provider/detail/azure/index.tsx +5 -7
- package/src/components/InvalidAPIKey/APIKeyForm/Bedrock.tsx +8 -13
- package/src/config/featureFlags/schema.test.ts +0 -2
- package/src/config/featureFlags/schema.ts +0 -6
- package/src/config/modelProviders/ai21.ts +1 -16
- package/src/config/modelProviders/ai302.ts +1 -128
- package/src/config/modelProviders/ai360.ts +1 -32
- package/src/config/modelProviders/anthropic.ts +1 -71
- package/src/config/modelProviders/azure.ts +1 -51
- package/src/config/modelProviders/baichuan.ts +1 -57
- package/src/config/modelProviders/bedrock.ts +1 -276
- package/src/config/modelProviders/cloudflare.ts +1 -64
- package/src/config/modelProviders/deepseek.ts +1 -19
- package/src/config/modelProviders/fireworksai.ts +1 -174
- package/src/config/modelProviders/giteeai.ts +1 -135
- package/src/config/modelProviders/github.ts +1 -254
- package/src/config/modelProviders/google.ts +1 -130
- package/src/config/modelProviders/groq.ts +1 -119
- package/src/config/modelProviders/higress.ts +1 -1713
- package/src/config/modelProviders/huggingface.ts +1 -54
- package/src/config/modelProviders/hunyuan.ts +1 -83
- package/src/config/modelProviders/infiniai.ts +1 -74
- package/src/config/modelProviders/internlm.ts +1 -20
- package/src/config/modelProviders/mistral.ts +1 -95
- package/src/config/modelProviders/modelscope.ts +1 -27
- package/src/config/modelProviders/moonshot.ts +1 -29
- package/src/config/modelProviders/novita.ts +1 -105
- package/src/config/modelProviders/ollama.ts +1 -325
- package/src/config/modelProviders/openai.ts +1 -242
- package/src/config/modelProviders/openrouter.ts +1 -240
- package/src/config/modelProviders/perplexity.ts +1 -45
- package/src/config/modelProviders/ppio.ts +1 -152
- package/src/config/modelProviders/qiniu.ts +1 -18
- package/src/config/modelProviders/qwen.ts +1 -245
- package/src/config/modelProviders/search1api.ts +1 -34
- package/src/config/modelProviders/sensenova.ts +1 -69
- package/src/config/modelProviders/siliconcloud.ts +1 -417
- package/src/config/modelProviders/spark.ts +1 -59
- package/src/config/modelProviders/stepfun.ts +1 -98
- package/src/config/modelProviders/taichu.ts +1 -18
- package/src/config/modelProviders/togetherai.ts +1 -274
- package/src/config/modelProviders/upstage.ts +1 -28
- package/src/config/modelProviders/wenxin.ts +1 -140
- package/src/config/modelProviders/xai.ts +1 -38
- package/src/config/modelProviders/zeroone.ts +1 -81
- package/src/config/modelProviders/zhipu.ts +1 -108
- package/src/helpers/isCanUseFC.ts +0 -8
- package/src/hooks/useEnabledChatModels.ts +0 -8
- package/src/hooks/useModelContextWindowTokens.ts +0 -8
- package/src/hooks/useModelHasContextWindowToken.ts +1 -10
- package/src/hooks/useModelSupportFiles.ts +1 -11
- package/src/hooks/useModelSupportReasoning.ts +1 -11
- package/src/hooks/useModelSupportToolUse.ts +1 -11
- package/src/hooks/useModelSupportVision.ts +1 -11
- package/src/layout/AuthProvider/Clerk/index.tsx +2 -16
- package/src/server/globalConfig/index.ts +0 -23
- package/src/server/routers/lambda/config/__snapshots__/index.test.ts.snap +175 -12
- package/src/server/routers/lambda/config/index.test.ts +36 -28
- package/src/services/chat/chat.test.ts +12 -0
- package/src/services/chat/helper.ts +7 -31
- package/src/services/models.ts +2 -11
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +41 -14
- package/src/store/global/store.ts +1 -7
- package/src/store/user/initialState.ts +1 -7
- package/src/store/user/selectors.ts +1 -5
- package/src/store/user/slices/common/action.ts +5 -4
- package/src/store/user/slices/settings/selectors/index.ts +1 -0
- package/src/store/user/slices/settings/selectors/keyVaults.ts +21 -0
- package/src/store/user/store.ts +0 -3
- package/src/tools/web-browsing/Render/Search/ConfigForm/Form.tsx +1 -1
- package/tsconfig.json +0 -1
- package/packages/utils/src/_deprecated/__snapshots__/parseModels.test.ts.snap +0 -104
- package/packages/utils/src/_deprecated/parseModels.test.ts +0 -287
- package/packages/utils/src/_deprecated/parseModels.ts +0 -165
- package/src/hooks/_header.ts +0 -23
- package/src/server/globalConfig/_deprecated.test.ts +0 -92
- package/src/server/globalConfig/_deprecated.ts +0 -41
- package/src/store/global/actions/clientDb.ts +0 -67
- package/src/store/user/slices/modelList/__snapshots__/action.test.ts.snap +0 -12
- package/src/store/user/slices/modelList/action.test.ts +0 -359
- package/src/store/user/slices/modelList/action.ts +0 -223
- package/src/store/user/slices/modelList/initialState.ts +0 -15
- package/src/store/user/slices/modelList/reducers/customModelCard.test.ts +0 -204
- package/src/store/user/slices/modelList/reducers/customModelCard.ts +0 -64
- package/src/store/user/slices/modelList/selectors/index.ts +0 -3
- package/src/store/user/slices/modelList/selectors/keyVaults.test.ts +0 -201
- package/src/store/user/slices/modelList/selectors/keyVaults.ts +0 -50
- package/src/store/user/slices/modelList/selectors/modelConfig.test.ts +0 -219
- package/src/store/user/slices/modelList/selectors/modelConfig.ts +0 -95
- package/src/store/user/slices/modelList/selectors/modelProvider.test.ts +0 -138
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +0 -170
|
@@ -1,114 +1,7 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
3
|
const ZhiPu: ModelProviderCard = {
|
|
4
|
-
chatModels: [
|
|
5
|
-
{
|
|
6
|
-
contextWindowTokens: 128_000,
|
|
7
|
-
description: 'GLM-4-Flash 是处理简单任务的理想选择,速度最快且免费。',
|
|
8
|
-
displayName: 'GLM-4-Flash',
|
|
9
|
-
enabled: true,
|
|
10
|
-
functionCall: true,
|
|
11
|
-
id: 'glm-4-flash',
|
|
12
|
-
},
|
|
13
|
-
{
|
|
14
|
-
contextWindowTokens: 128_000,
|
|
15
|
-
description: 'GLM-4-FlashX 是Flash的增强版本,超快推理速度。',
|
|
16
|
-
displayName: 'GLM-4-FlashX',
|
|
17
|
-
enabled: true,
|
|
18
|
-
functionCall: true,
|
|
19
|
-
id: 'glm-4-flashx',
|
|
20
|
-
},
|
|
21
|
-
{
|
|
22
|
-
contextWindowTokens: 1_024_000,
|
|
23
|
-
description: 'GLM-4-Long 支持超长文本输入,适合记忆型任务与大规模文档处理。',
|
|
24
|
-
displayName: 'GLM-4-Long',
|
|
25
|
-
functionCall: true,
|
|
26
|
-
id: 'glm-4-long',
|
|
27
|
-
},
|
|
28
|
-
{
|
|
29
|
-
contextWindowTokens: 128_000,
|
|
30
|
-
description: 'GLM-4-Air 是性价比高的版本,性能接近GLM-4,提供快速度和实惠的价格。',
|
|
31
|
-
displayName: 'GLM-4-Air',
|
|
32
|
-
enabled: true,
|
|
33
|
-
functionCall: true,
|
|
34
|
-
id: 'glm-4-air',
|
|
35
|
-
},
|
|
36
|
-
{
|
|
37
|
-
contextWindowTokens: 8192,
|
|
38
|
-
description: 'GLM-4-AirX 提供 GLM-4-Air 的高效版本,推理速度可达其2.6倍。',
|
|
39
|
-
displayName: 'GLM-4-AirX',
|
|
40
|
-
enabled: true,
|
|
41
|
-
functionCall: true,
|
|
42
|
-
id: 'glm-4-airx',
|
|
43
|
-
},
|
|
44
|
-
{
|
|
45
|
-
contextWindowTokens: 128_000,
|
|
46
|
-
description:
|
|
47
|
-
'GLM-4-Plus 作为高智能旗舰,具备强大的处理长文本和复杂任务的能力,性能全面提升。',
|
|
48
|
-
displayName: 'GLM-4-Plus',
|
|
49
|
-
enabled: true,
|
|
50
|
-
functionCall: true,
|
|
51
|
-
id: 'glm-4-plus',
|
|
52
|
-
},
|
|
53
|
-
{
|
|
54
|
-
contextWindowTokens: 128_000,
|
|
55
|
-
description: 'GLM-4-0520 是最新模型版本,专为高度复杂和多样化任务设计,表现卓越。',
|
|
56
|
-
displayName: 'GLM-4-0520',
|
|
57
|
-
functionCall: true,
|
|
58
|
-
id: 'glm-4-0520',
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
contextWindowTokens: 128_000,
|
|
62
|
-
description: 'GLM-4 是发布于2024年1月的旧旗舰版本,目前已被更强的 GLM-4-0520 取代。',
|
|
63
|
-
displayName: 'GLM-4',
|
|
64
|
-
functionCall: true,
|
|
65
|
-
id: 'glm-4',
|
|
66
|
-
},
|
|
67
|
-
{
|
|
68
|
-
contextWindowTokens: 8192,
|
|
69
|
-
description:
|
|
70
|
-
'GLM-4V-Flash 专注于高效的单一图像理解,适用于快速图像解析的场景,例如实时图像分析或批量图像处理。',
|
|
71
|
-
displayName: 'GLM-4V-Flash',
|
|
72
|
-
enabled: true,
|
|
73
|
-
id: 'glm-4v-flash',
|
|
74
|
-
releasedAt: '2024-12-09',
|
|
75
|
-
vision: true,
|
|
76
|
-
},
|
|
77
|
-
{
|
|
78
|
-
contextWindowTokens: 8192,
|
|
79
|
-
description: 'GLM-4V-Plus 具备对视频内容及多图片的理解能力,适合多模态任务。',
|
|
80
|
-
displayName: 'GLM-4V-Plus',
|
|
81
|
-
enabled: true,
|
|
82
|
-
id: 'glm-4v-plus',
|
|
83
|
-
vision: true,
|
|
84
|
-
},
|
|
85
|
-
{
|
|
86
|
-
contextWindowTokens: 2048,
|
|
87
|
-
description: 'GLM-4V 提供强大的图像理解与推理能力,支持多种视觉任务。',
|
|
88
|
-
displayName: 'GLM-4V',
|
|
89
|
-
id: 'glm-4v',
|
|
90
|
-
vision: true,
|
|
91
|
-
},
|
|
92
|
-
{
|
|
93
|
-
contextWindowTokens: 128_000,
|
|
94
|
-
description:
|
|
95
|
-
'CodeGeeX-4 是强大的AI编程助手,支持多种编程语言的智能问答与代码补全,提升开发效率。',
|
|
96
|
-
displayName: 'CodeGeeX-4',
|
|
97
|
-
id: 'codegeex-4',
|
|
98
|
-
},
|
|
99
|
-
{
|
|
100
|
-
contextWindowTokens: 4096,
|
|
101
|
-
description: 'CharGLM-4 专为角色扮演与情感陪伴设计,支持超长多轮记忆与个性化对话,应用广泛。',
|
|
102
|
-
displayName: 'CharGLM-4',
|
|
103
|
-
id: 'charglm-4',
|
|
104
|
-
},
|
|
105
|
-
{
|
|
106
|
-
contextWindowTokens: 8192,
|
|
107
|
-
description: 'Emohaa 是心理模型,具备专业咨询能力,帮助用户理解情感问题。',
|
|
108
|
-
displayName: 'Emohaa',
|
|
109
|
-
id: 'emohaa',
|
|
110
|
-
},
|
|
111
|
-
],
|
|
4
|
+
chatModels: [],
|
|
112
5
|
checkModel: 'glm-4.5-flash',
|
|
113
6
|
description:
|
|
114
7
|
'智谱 AI 提供多模态与语言模型的开放平台,支持广泛的AI应用场景,包括文本处理、图像理解与编程辅助等。',
|
|
@@ -1,13 +1,5 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, getAiInfraStoreState } from '@/store/aiInfra';
|
|
3
|
-
import { getUserStoreState } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const isCanUseFC = (model: string, provider: string): boolean => {
|
|
7
|
-
// TODO: remove isDeprecatedEdition condition in V2.0
|
|
8
|
-
if (isDeprecatedEdition) {
|
|
9
|
-
return modelProviderSelectors.isModelEnabledFunctionCall(model)(getUserStoreState());
|
|
10
|
-
}
|
|
11
|
-
|
|
12
4
|
return aiModelSelectors.isModelSupportToolUse(model, provider)(getAiInfraStoreState()) || false;
|
|
13
5
|
};
|
|
@@ -1,18 +1,10 @@
|
|
|
1
1
|
import isEqual from 'fast-deep-equal';
|
|
2
2
|
|
|
3
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
4
3
|
import { useAiInfraStore } from '@/store/aiInfra';
|
|
5
|
-
import { useUserStore } from '@/store/user';
|
|
6
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
7
4
|
import { EnabledProviderWithModels } from '@/types/aiProvider';
|
|
8
5
|
|
|
9
6
|
export const useEnabledChatModels = (): EnabledProviderWithModels[] => {
|
|
10
|
-
const enabledList = useUserStore(modelProviderSelectors.modelProviderListForModelSelect, isEqual);
|
|
11
7
|
const enabledChatModelList = useAiInfraStore((s) => s.enabledChatModelList, isEqual);
|
|
12
8
|
|
|
13
|
-
if (isDeprecatedEdition) {
|
|
14
|
-
return enabledList;
|
|
15
|
-
}
|
|
16
|
-
|
|
17
9
|
return enabledChatModelList || [];
|
|
18
10
|
};
|
|
@@ -1,15 +1,7 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
3
|
-
import { useUserStore } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const useModelContextWindowTokens = (model: string, provider: string) => {
|
|
7
4
|
const newValue = useAiInfraStore(aiModelSelectors.modelContextWindowTokens(model, provider));
|
|
8
5
|
|
|
9
|
-
// TODO: remove this in V2.0
|
|
10
|
-
const oldValue = useUserStore(modelProviderSelectors.modelMaxToken(model));
|
|
11
|
-
if (isDeprecatedEdition) return oldValue;
|
|
12
|
-
//
|
|
13
|
-
|
|
14
6
|
return newValue as number;
|
|
15
7
|
};
|
|
@@ -1,19 +1,10 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { useAgentStore } from '@/store/agent';
|
|
3
2
|
import { agentSelectors } from '@/store/agent/slices/chat';
|
|
4
3
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
5
|
-
import { useUserStore } from '@/store/user';
|
|
6
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
7
4
|
|
|
8
5
|
export const useModelHasContextWindowToken = () => {
|
|
9
6
|
const model = useAgentStore(agentSelectors.currentAgentModel);
|
|
10
7
|
const provider = useAgentStore(agentSelectors.currentAgentModelProvider);
|
|
11
|
-
const newValue = useAiInfraStore(aiModelSelectors.isModelHasContextWindowToken(model, provider));
|
|
12
8
|
|
|
13
|
-
|
|
14
|
-
const oldValue = useUserStore(modelProviderSelectors.isModelHasMaxToken(model));
|
|
15
|
-
if (isDeprecatedEdition) return oldValue;
|
|
16
|
-
//
|
|
17
|
-
|
|
18
|
-
return newValue;
|
|
9
|
+
return useAiInfraStore(aiModelSelectors.isModelHasContextWindowToken(model, provider));
|
|
19
10
|
};
|
|
@@ -1,15 +1,5 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
3
|
-
import { useUserStore } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const useModelSupportFiles = (model: string, provider: string) => {
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
// TODO: remove this in V2.0
|
|
10
|
-
const oldValue = useUserStore(modelProviderSelectors.isModelEnabledFiles(model));
|
|
11
|
-
if (isDeprecatedEdition) return oldValue;
|
|
12
|
-
//
|
|
13
|
-
|
|
14
|
-
return newValue;
|
|
4
|
+
return useAiInfraStore(aiModelSelectors.isModelSupportFiles(model, provider));
|
|
15
5
|
};
|
|
@@ -1,15 +1,5 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
3
|
-
import { useUserStore } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const useModelSupportReasoning = (model: string, provider: string) => {
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
// TODO: remove this in V2.0
|
|
10
|
-
const oldValue = useUserStore(modelProviderSelectors.isModelEnabledReasoning(model));
|
|
11
|
-
if (isDeprecatedEdition) return oldValue;
|
|
12
|
-
//
|
|
13
|
-
|
|
14
|
-
return newValue;
|
|
4
|
+
return useAiInfraStore(aiModelSelectors.isModelSupportReasoning(model, provider));
|
|
15
5
|
};
|
|
@@ -1,15 +1,5 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
3
|
-
import { useUserStore } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const useModelSupportToolUse = (model: string, provider: string) => {
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
// TODO: remove this in V2.0
|
|
10
|
-
const oldValue = useUserStore(modelProviderSelectors.isModelEnabledFunctionCall(model));
|
|
11
|
-
if (isDeprecatedEdition) return oldValue;
|
|
12
|
-
//
|
|
13
|
-
|
|
14
|
-
return newValue;
|
|
4
|
+
return useAiInfraStore(aiModelSelectors.isModelSupportToolUse(model, provider));
|
|
15
5
|
};
|
|
@@ -1,15 +1,5 @@
|
|
|
1
|
-
import { isDeprecatedEdition } from '@/const/version';
|
|
2
1
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
|
3
|
-
import { useUserStore } from '@/store/user';
|
|
4
|
-
import { modelProviderSelectors } from '@/store/user/selectors';
|
|
5
2
|
|
|
6
3
|
export const useModelSupportVision = (model: string, provider: string) => {
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
// TODO: remove this in V2.0
|
|
10
|
-
const oldValue = useUserStore(modelProviderSelectors.isModelEnabledVision(model));
|
|
11
|
-
if (isDeprecatedEdition) return oldValue;
|
|
12
|
-
//
|
|
13
|
-
|
|
14
|
-
return newValue;
|
|
4
|
+
return useAiInfraStore(aiModelSelectors.isModelSupportVision(model, provider));
|
|
15
5
|
};
|
|
@@ -4,13 +4,10 @@ import { ClerkProvider } from '@clerk/nextjs';
|
|
|
4
4
|
import { PropsWithChildren, memo, useEffect, useMemo, useState, useTransition } from 'react';
|
|
5
5
|
import { useTranslation } from 'react-i18next';
|
|
6
6
|
|
|
7
|
-
import { featureFlagsSelectors, useServerConfigStore } from '@/store/serverConfig';
|
|
8
|
-
|
|
9
7
|
import UserUpdater from './UserUpdater';
|
|
10
8
|
import { useAppearance } from './useAppearance';
|
|
11
9
|
|
|
12
10
|
const Clerk = memo(({ children }: PropsWithChildren) => {
|
|
13
|
-
const { enableClerkSignUp } = useServerConfigStore(featureFlagsSelectors);
|
|
14
11
|
const appearance = useAppearance();
|
|
15
12
|
const {
|
|
16
13
|
i18n: { language, getResourceBundle },
|
|
@@ -41,23 +38,12 @@ const Clerk = memo(({ children }: PropsWithChildren) => {
|
|
|
41
38
|
return origins.length ? origins : undefined;
|
|
42
39
|
}, []);
|
|
43
40
|
|
|
44
|
-
const updatedAppearance = useMemo(
|
|
45
|
-
() => ({
|
|
46
|
-
...appearance,
|
|
47
|
-
elements: {
|
|
48
|
-
...appearance.elements,
|
|
49
|
-
...(!enableClerkSignUp ? { footerAction: { display: 'none' } } : {}),
|
|
50
|
-
},
|
|
51
|
-
}),
|
|
52
|
-
[appearance, enableClerkSignUp],
|
|
53
|
-
);
|
|
54
|
-
|
|
55
41
|
return (
|
|
56
42
|
<ClerkProvider
|
|
57
43
|
allowedRedirectOrigins={allowedRedirectOrigins}
|
|
58
|
-
appearance={
|
|
44
|
+
appearance={appearance}
|
|
59
45
|
localization={localization}
|
|
60
|
-
signUpUrl=
|
|
46
|
+
signUpUrl="/signup"
|
|
61
47
|
>
|
|
62
48
|
{children}
|
|
63
49
|
<UserUpdater />
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { enableNextAuth } from '@/const/auth';
|
|
2
1
|
import { isDesktop } from '@/const/version';
|
|
3
2
|
import { appEnv, getAppConfig } from '@/envs/app';
|
|
4
3
|
import { authEnv } from '@/envs/auth';
|
|
@@ -10,7 +9,6 @@ import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
|
|
|
10
9
|
import { GlobalServerConfig } from '@/types/serverConfig';
|
|
11
10
|
import { cleanObject } from '@/utils/object';
|
|
12
11
|
|
|
13
|
-
import { genServerLLMConfig } from './_deprecated';
|
|
14
12
|
import { genServerAiProvidersConfig } from './genServerAiProviderConfig';
|
|
15
13
|
import { parseAgentConfig } from './parseDefaultAgent';
|
|
16
14
|
import { parseFilesConfig } from './parseFilesConfig';
|
|
@@ -62,30 +60,9 @@ export const getServerGlobalConfig = async () => {
|
|
|
62
60
|
enableUploadFileToServer: !!fileEnv.S3_SECRET_ACCESS_KEY,
|
|
63
61
|
enabledAccessCode: ACCESS_CODES?.length > 0,
|
|
64
62
|
|
|
65
|
-
enabledOAuthSSO: enableNextAuth,
|
|
66
63
|
image: cleanObject({
|
|
67
64
|
defaultImageNum: imageEnv.AI_IMAGE_DEFAULT_IMAGE_NUM,
|
|
68
65
|
}),
|
|
69
|
-
/**
|
|
70
|
-
* @deprecated
|
|
71
|
-
*/
|
|
72
|
-
languageModel: genServerLLMConfig({
|
|
73
|
-
azure: {
|
|
74
|
-
enabledKey: 'ENABLED_AZURE_OPENAI',
|
|
75
|
-
withDeploymentName: true,
|
|
76
|
-
},
|
|
77
|
-
bedrock: {
|
|
78
|
-
enabledKey: 'ENABLED_AWS_BEDROCK',
|
|
79
|
-
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
|
|
80
|
-
},
|
|
81
|
-
giteeai: {
|
|
82
|
-
enabledKey: 'ENABLED_GITEE_AI',
|
|
83
|
-
modelListKey: 'GITEE_AI_MODEL_LIST',
|
|
84
|
-
},
|
|
85
|
-
ollama: {
|
|
86
|
-
fetchOnClient: !process.env.OLLAMA_PROXY_URL,
|
|
87
|
-
},
|
|
88
|
-
}),
|
|
89
66
|
oAuthSSOProviders: authEnv.NEXT_AUTH_SSO_PROVIDERS.trim().split(/[,,]/),
|
|
90
67
|
systemAgent: parseSystemAgent(appEnv.SYSTEM_AGENT),
|
|
91
68
|
telemetry: {
|
|
@@ -8,24 +8,50 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
|
8
8
|
"claude-2",
|
|
9
9
|
"gpt-4-0125-preview",
|
|
10
10
|
],
|
|
11
|
-
"
|
|
11
|
+
"serverModelLists": [
|
|
12
12
|
{
|
|
13
|
+
"abilities": {},
|
|
13
14
|
"displayName": "llama",
|
|
14
15
|
"enabled": true,
|
|
15
16
|
"id": "llama",
|
|
17
|
+
"type": "chat",
|
|
16
18
|
},
|
|
17
19
|
{
|
|
20
|
+
"abilities": {},
|
|
18
21
|
"displayName": "claude-2",
|
|
19
22
|
"enabled": true,
|
|
20
23
|
"id": "claude-2",
|
|
24
|
+
"type": "chat",
|
|
21
25
|
},
|
|
22
26
|
{
|
|
27
|
+
"abilities": {
|
|
28
|
+
"functionCall": true,
|
|
29
|
+
},
|
|
23
30
|
"contextWindowTokens": 128000,
|
|
24
31
|
"description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
|
|
25
32
|
"displayName": "gpt-4-32k",
|
|
26
33
|
"enabled": true,
|
|
27
|
-
"functionCall": true,
|
|
28
34
|
"id": "gpt-4-0125-preview",
|
|
35
|
+
"pricing": {
|
|
36
|
+
"units": [
|
|
37
|
+
{
|
|
38
|
+
"name": "textInput",
|
|
39
|
+
"rate": 10,
|
|
40
|
+
"strategy": "fixed",
|
|
41
|
+
"unit": "millionTokens",
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"name": "textOutput",
|
|
45
|
+
"rate": 30,
|
|
46
|
+
"strategy": "fixed",
|
|
47
|
+
"unit": "millionTokens",
|
|
48
|
+
},
|
|
49
|
+
],
|
|
50
|
+
},
|
|
51
|
+
"providerId": "openai",
|
|
52
|
+
"releasedAt": "2024-01-25",
|
|
53
|
+
"source": "builtin",
|
|
54
|
+
"type": "chat",
|
|
29
55
|
},
|
|
30
56
|
],
|
|
31
57
|
}
|
|
@@ -34,61 +60,190 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
|
|
|
34
60
|
exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = `
|
|
35
61
|
[
|
|
36
62
|
{
|
|
37
|
-
"
|
|
63
|
+
"abilities": {
|
|
64
|
+
"functionCall": true,
|
|
65
|
+
},
|
|
66
|
+
"contextWindowTokens": 16384,
|
|
38
67
|
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
|
|
39
68
|
"displayName": "GPT-3.5 Turbo 1106",
|
|
40
69
|
"enabled": true,
|
|
41
|
-
"functionCall": true,
|
|
42
70
|
"id": "gpt-3.5-turbo-1106",
|
|
71
|
+
"pricing": {
|
|
72
|
+
"units": [
|
|
73
|
+
{
|
|
74
|
+
"name": "textInput",
|
|
75
|
+
"rate": 1,
|
|
76
|
+
"strategy": "fixed",
|
|
77
|
+
"unit": "millionTokens",
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
"name": "textOutput",
|
|
81
|
+
"rate": 2,
|
|
82
|
+
"strategy": "fixed",
|
|
83
|
+
"unit": "millionTokens",
|
|
84
|
+
},
|
|
85
|
+
],
|
|
86
|
+
},
|
|
87
|
+
"providerId": "openai",
|
|
88
|
+
"releasedAt": "2023-11-06",
|
|
89
|
+
"source": "builtin",
|
|
90
|
+
"type": "chat",
|
|
43
91
|
},
|
|
44
92
|
{
|
|
45
|
-
"
|
|
93
|
+
"abilities": {
|
|
94
|
+
"functionCall": true,
|
|
95
|
+
},
|
|
96
|
+
"contextWindowTokens": 16384,
|
|
46
97
|
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务,Currently points to gpt-3.5-turbo-0125",
|
|
47
98
|
"displayName": "GPT-3.5 Turbo",
|
|
48
99
|
"enabled": true,
|
|
49
|
-
"functionCall": true,
|
|
50
100
|
"id": "gpt-3.5-turbo",
|
|
101
|
+
"pricing": {
|
|
102
|
+
"units": [
|
|
103
|
+
{
|
|
104
|
+
"name": "textInput",
|
|
105
|
+
"rate": 0.5,
|
|
106
|
+
"strategy": "fixed",
|
|
107
|
+
"unit": "millionTokens",
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
"name": "textOutput",
|
|
111
|
+
"rate": 1.5,
|
|
112
|
+
"strategy": "fixed",
|
|
113
|
+
"unit": "millionTokens",
|
|
114
|
+
},
|
|
115
|
+
],
|
|
116
|
+
},
|
|
117
|
+
"providerId": "openai",
|
|
118
|
+
"source": "builtin",
|
|
119
|
+
"type": "chat",
|
|
51
120
|
},
|
|
52
121
|
{
|
|
122
|
+
"abilities": {
|
|
123
|
+
"functionCall": true,
|
|
124
|
+
},
|
|
53
125
|
"contextWindowTokens": 8192,
|
|
54
126
|
"description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
|
|
55
127
|
"displayName": "GPT-4",
|
|
56
128
|
"enabled": true,
|
|
57
|
-
"functionCall": true,
|
|
58
129
|
"id": "gpt-4",
|
|
130
|
+
"pricing": {
|
|
131
|
+
"units": [
|
|
132
|
+
{
|
|
133
|
+
"name": "textInput",
|
|
134
|
+
"rate": 30,
|
|
135
|
+
"strategy": "fixed",
|
|
136
|
+
"unit": "millionTokens",
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
"name": "textOutput",
|
|
140
|
+
"rate": 60,
|
|
141
|
+
"strategy": "fixed",
|
|
142
|
+
"unit": "millionTokens",
|
|
143
|
+
},
|
|
144
|
+
],
|
|
145
|
+
},
|
|
146
|
+
"providerId": "openai",
|
|
147
|
+
"source": "builtin",
|
|
148
|
+
"type": "chat",
|
|
59
149
|
},
|
|
60
150
|
{
|
|
151
|
+
"abilities": {
|
|
152
|
+
"functionCall": true,
|
|
153
|
+
},
|
|
61
154
|
"contextWindowTokens": 32768,
|
|
62
155
|
"description": "GPT-4 提供了一个更大的上下文窗口,能够处理更长的文本输入,适用于需要广泛信息整合和数据分析的场景。",
|
|
63
156
|
"displayName": "GPT-4 32K",
|
|
64
157
|
"enabled": true,
|
|
65
|
-
"functionCall": true,
|
|
66
158
|
"id": "gpt-4-32k",
|
|
159
|
+
"pricing": {
|
|
160
|
+
"units": [
|
|
161
|
+
{
|
|
162
|
+
"name": "textInput",
|
|
163
|
+
"rate": 60,
|
|
164
|
+
"strategy": "fixed",
|
|
165
|
+
"unit": "millionTokens",
|
|
166
|
+
},
|
|
167
|
+
{
|
|
168
|
+
"name": "textOutput",
|
|
169
|
+
"rate": 120,
|
|
170
|
+
"strategy": "fixed",
|
|
171
|
+
"unit": "millionTokens",
|
|
172
|
+
},
|
|
173
|
+
],
|
|
174
|
+
},
|
|
175
|
+
"providerId": "openai",
|
|
176
|
+
"source": "builtin",
|
|
177
|
+
"type": "chat",
|
|
67
178
|
},
|
|
68
179
|
{
|
|
180
|
+
"abilities": {
|
|
181
|
+
"functionCall": true,
|
|
182
|
+
},
|
|
69
183
|
"contextWindowTokens": 128000,
|
|
70
184
|
"description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
|
|
71
185
|
"displayName": "GPT-4 Turbo Preview 1106",
|
|
72
186
|
"enabled": true,
|
|
73
|
-
"functionCall": true,
|
|
74
187
|
"id": "gpt-4-1106-preview",
|
|
188
|
+
"pricing": {
|
|
189
|
+
"units": [
|
|
190
|
+
{
|
|
191
|
+
"name": "textInput",
|
|
192
|
+
"rate": 10,
|
|
193
|
+
"strategy": "fixed",
|
|
194
|
+
"unit": "millionTokens",
|
|
195
|
+
},
|
|
196
|
+
{
|
|
197
|
+
"name": "textOutput",
|
|
198
|
+
"rate": 30,
|
|
199
|
+
"strategy": "fixed",
|
|
200
|
+
"unit": "millionTokens",
|
|
201
|
+
},
|
|
202
|
+
],
|
|
203
|
+
},
|
|
204
|
+
"providerId": "openai",
|
|
205
|
+
"releasedAt": "2023-11-06",
|
|
206
|
+
"source": "builtin",
|
|
207
|
+
"type": "chat",
|
|
75
208
|
},
|
|
76
209
|
{
|
|
210
|
+
"abilities": {},
|
|
77
211
|
"displayName": "gpt-4-vision",
|
|
78
212
|
"enabled": true,
|
|
79
213
|
"id": "gpt-4-vision",
|
|
214
|
+
"type": "chat",
|
|
80
215
|
},
|
|
81
216
|
]
|
|
82
217
|
`;
|
|
83
218
|
|
|
84
219
|
exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST > show the hidden model 1`] = `
|
|
85
220
|
{
|
|
221
|
+
"abilities": {
|
|
222
|
+
"functionCall": true,
|
|
223
|
+
},
|
|
86
224
|
"contextWindowTokens": 128000,
|
|
87
225
|
"description": "最新的 GPT-4 Turbo 模型具备视觉功能。现在,视觉请求可以使用 JSON 模式和函数调用。 GPT-4 Turbo 是一个增强版本,为多模态任务提供成本效益高的支持。它在准确性和效率之间找到平衡,适合需要进行实时交互的应用程序场景。",
|
|
88
226
|
"displayName": "GPT-4 Turbo Preview 1106",
|
|
89
227
|
"enabled": true,
|
|
90
|
-
"functionCall": true,
|
|
91
228
|
"id": "gpt-4-1106-preview",
|
|
229
|
+
"pricing": {
|
|
230
|
+
"units": [
|
|
231
|
+
{
|
|
232
|
+
"name": "textInput",
|
|
233
|
+
"rate": 10,
|
|
234
|
+
"strategy": "fixed",
|
|
235
|
+
"unit": "millionTokens",
|
|
236
|
+
},
|
|
237
|
+
{
|
|
238
|
+
"name": "textOutput",
|
|
239
|
+
"rate": 30,
|
|
240
|
+
"strategy": "fixed",
|
|
241
|
+
"unit": "millionTokens",
|
|
242
|
+
},
|
|
243
|
+
],
|
|
244
|
+
},
|
|
245
|
+
"releasedAt": "2023-11-06",
|
|
246
|
+
"type": "chat",
|
|
92
247
|
}
|
|
93
248
|
`;
|
|
94
249
|
|
|
@@ -99,20 +254,28 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENROUTER_MODEL_
|
|
|
99
254
|
"meta-llama/llama-3.1-8b-instruct:free",
|
|
100
255
|
"google/gemma-2-9b-it:free",
|
|
101
256
|
],
|
|
102
|
-
"
|
|
257
|
+
"serverModelLists": [
|
|
103
258
|
{
|
|
104
|
-
"
|
|
259
|
+
"abilities": {},
|
|
260
|
+
"contextWindowTokens": 131072,
|
|
105
261
|
"description": "LLaMA 3.1 提供多语言支持,是业界领先的生成模型之一。",
|
|
106
262
|
"displayName": "Llama 3.1 8B (Free)",
|
|
107
263
|
"enabled": true,
|
|
108
264
|
"id": "meta-llama/llama-3.1-8b-instruct:free",
|
|
265
|
+
"providerId": "openrouter",
|
|
266
|
+
"source": "builtin",
|
|
267
|
+
"type": "chat",
|
|
109
268
|
},
|
|
110
269
|
{
|
|
270
|
+
"abilities": {},
|
|
111
271
|
"contextWindowTokens": 8192,
|
|
112
272
|
"description": "Gemma 2 是Google轻量化的开源文本模型系列。",
|
|
113
273
|
"displayName": "Gemma 2 9B (Free)",
|
|
114
274
|
"enabled": true,
|
|
115
275
|
"id": "google/gemma-2-9b-it:free",
|
|
276
|
+
"providerId": "openrouter",
|
|
277
|
+
"source": "builtin",
|
|
278
|
+
"type": "chat",
|
|
116
279
|
},
|
|
117
280
|
],
|
|
118
281
|
}
|