@lobehub/chat 1.31.1 → 1.31.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/package.json +1 -1
- package/src/config/modelProviders/deepseek.ts +5 -4
- package/src/const/settings/llm.ts +4 -168
- package/src/server/globalConfig/genServerLLMConfig.test.ts +90 -0
- package/src/server/globalConfig/genServerLLMConfig.ts +34 -0
- package/src/server/globalConfig/index.ts +9 -403
- package/src/utils/genUserLLMConfig.test.ts +45 -0
- package/src/utils/genUserLLMConfig.ts +22 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.31.3](https://github.com/lobehub/lobe-chat/compare/v1.31.2...v1.31.3)
|
6
|
+
|
7
|
+
<sup>Released on **2024-11-12**</sup>
|
8
|
+
|
9
|
+
#### ♻ Code Refactoring
|
10
|
+
|
11
|
+
- **misc**: Refactor `languageModel` & `DEFAULT_LLM_CONFIG` generate.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Code refactoring
|
19
|
+
|
20
|
+
- **misc**: Refactor `languageModel` & `DEFAULT_LLM_CONFIG` generate, closes [#4649](https://github.com/lobehub/lobe-chat/issues/4649) ([2045131](https://github.com/lobehub/lobe-chat/commit/2045131))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.31.2](https://github.com/lobehub/lobe-chat/compare/v1.31.1...v1.31.2)
|
31
|
+
|
32
|
+
<sup>Released on **2024-11-12**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Update deepseek model.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Update deepseek model, closes [#4674](https://github.com/lobehub/lobe-chat/issues/4674) ([5ab2986](https://github.com/lobehub/lobe-chat/commit/5ab2986))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.31.1](https://github.com/lobehub/lobe-chat/compare/v1.31.0...v1.31.1)
|
6
56
|
|
7
57
|
<sup>Released on **2024-11-12**</sup>
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.31.
|
3
|
+
"version": "1.31.3",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -11,12 +11,13 @@ const DeepSeek: ModelProviderCard = {
|
|
11
11
|
functionCall: true,
|
12
12
|
id: 'deepseek-chat',
|
13
13
|
pricing: {
|
14
|
-
cachedInput: 0.
|
15
|
-
|
16
|
-
|
14
|
+
cachedInput: 0.1,
|
15
|
+
currency: 'CNY',
|
16
|
+
input: 1,
|
17
|
+
output: 2,
|
17
18
|
},
|
18
19
|
releasedAt: '2024-09-05',
|
19
|
-
tokens:
|
20
|
+
tokens: 64_000,
|
20
21
|
},
|
21
22
|
],
|
22
23
|
checkModel: 'deepseek-chat',
|
@@ -1,180 +1,16 @@
|
|
1
|
-
import {
|
2
|
-
Ai21ProviderCard,
|
3
|
-
Ai360ProviderCard,
|
4
|
-
AnthropicProviderCard,
|
5
|
-
BaichuanProviderCard,
|
6
|
-
BedrockProviderCard,
|
7
|
-
CloudflareProviderCard,
|
8
|
-
DeepSeekProviderCard,
|
9
|
-
FireworksAIProviderCard,
|
10
|
-
GithubProviderCard,
|
11
|
-
GoogleProviderCard,
|
12
|
-
GroqProviderCard,
|
13
|
-
HuggingFaceProviderCard,
|
14
|
-
HunyuanProviderCard,
|
15
|
-
MinimaxProviderCard,
|
16
|
-
MistralProviderCard,
|
17
|
-
MoonshotProviderCard,
|
18
|
-
NovitaProviderCard,
|
19
|
-
OllamaProviderCard,
|
20
|
-
OpenAIProviderCard,
|
21
|
-
OpenRouterProviderCard,
|
22
|
-
PerplexityProviderCard,
|
23
|
-
QwenProviderCard,
|
24
|
-
SenseNovaProviderCard,
|
25
|
-
SiliconCloudProviderCard,
|
26
|
-
SparkProviderCard,
|
27
|
-
StepfunProviderCard,
|
28
|
-
TaichuProviderCard,
|
29
|
-
TogetherAIProviderCard,
|
30
|
-
UpstageProviderCard,
|
31
|
-
WenxinProviderCard,
|
32
|
-
XAIProviderCard,
|
33
|
-
ZeroOneProviderCard,
|
34
|
-
ZhiPuProviderCard,
|
35
|
-
filterEnabledModels,
|
36
|
-
} from '@/config/modelProviders';
|
37
1
|
import { ModelProvider } from '@/libs/agent-runtime';
|
38
|
-
import { UserModelProviderConfig } from '@/types/user/settings';
|
39
2
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
enabledModels: filterEnabledModels(Ai21ProviderCard),
|
44
|
-
},
|
45
|
-
ai360: {
|
46
|
-
enabled: false,
|
47
|
-
enabledModels: filterEnabledModels(Ai360ProviderCard),
|
48
|
-
},
|
49
|
-
anthropic: {
|
50
|
-
enabled: false,
|
51
|
-
enabledModels: filterEnabledModels(AnthropicProviderCard),
|
52
|
-
},
|
53
|
-
azure: {
|
54
|
-
enabled: false,
|
55
|
-
},
|
56
|
-
baichuan: {
|
57
|
-
enabled: false,
|
58
|
-
enabledModels: filterEnabledModels(BaichuanProviderCard),
|
59
|
-
},
|
60
|
-
bedrock: {
|
61
|
-
enabled: false,
|
62
|
-
enabledModels: filterEnabledModels(BedrockProviderCard),
|
63
|
-
},
|
64
|
-
cloudflare: {
|
65
|
-
enabled: false,
|
66
|
-
enabledModels: filterEnabledModels(CloudflareProviderCard),
|
67
|
-
},
|
68
|
-
deepseek: {
|
69
|
-
enabled: false,
|
70
|
-
enabledModels: filterEnabledModels(DeepSeekProviderCard),
|
71
|
-
},
|
72
|
-
fireworksai: {
|
73
|
-
enabled: false,
|
74
|
-
enabledModels: filterEnabledModels(FireworksAIProviderCard),
|
75
|
-
},
|
76
|
-
github: {
|
77
|
-
enabled: false,
|
78
|
-
enabledModels: filterEnabledModels(GithubProviderCard),
|
79
|
-
},
|
80
|
-
google: {
|
81
|
-
enabled: false,
|
82
|
-
enabledModels: filterEnabledModels(GoogleProviderCard),
|
83
|
-
},
|
84
|
-
groq: {
|
85
|
-
enabled: false,
|
86
|
-
enabledModels: filterEnabledModels(GroqProviderCard),
|
87
|
-
},
|
88
|
-
huggingface: {
|
89
|
-
enabled: false,
|
90
|
-
enabledModels: filterEnabledModels(HuggingFaceProviderCard),
|
91
|
-
},
|
92
|
-
hunyuan: {
|
93
|
-
enabled: false,
|
94
|
-
enabledModels: filterEnabledModels(HunyuanProviderCard),
|
95
|
-
},
|
96
|
-
minimax: {
|
97
|
-
enabled: false,
|
98
|
-
enabledModels: filterEnabledModels(MinimaxProviderCard),
|
99
|
-
},
|
100
|
-
mistral: {
|
101
|
-
enabled: false,
|
102
|
-
enabledModels: filterEnabledModels(MistralProviderCard),
|
103
|
-
},
|
104
|
-
moonshot: {
|
105
|
-
enabled: false,
|
106
|
-
enabledModels: filterEnabledModels(MoonshotProviderCard),
|
107
|
-
},
|
108
|
-
novita: {
|
109
|
-
enabled: false,
|
110
|
-
enabledModels: filterEnabledModels(NovitaProviderCard),
|
111
|
-
},
|
3
|
+
import { genUserLLMConfig } from '@/utils/genUserLLMConfig'
|
4
|
+
|
5
|
+
export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
|
112
6
|
ollama: {
|
113
7
|
enabled: true,
|
114
|
-
enabledModels: filterEnabledModels(OllamaProviderCard),
|
115
8
|
fetchOnClient: true,
|
116
9
|
},
|
117
10
|
openai: {
|
118
11
|
enabled: true,
|
119
|
-
enabledModels: filterEnabledModels(OpenAIProviderCard),
|
120
|
-
},
|
121
|
-
openrouter: {
|
122
|
-
enabled: false,
|
123
|
-
enabledModels: filterEnabledModels(OpenRouterProviderCard),
|
124
|
-
},
|
125
|
-
perplexity: {
|
126
|
-
enabled: false,
|
127
|
-
enabledModels: filterEnabledModels(PerplexityProviderCard),
|
128
|
-
},
|
129
|
-
qwen: {
|
130
|
-
enabled: false,
|
131
|
-
enabledModels: filterEnabledModels(QwenProviderCard),
|
132
|
-
},
|
133
|
-
sensenova: {
|
134
|
-
enabled: false,
|
135
|
-
enabledModels: filterEnabledModels(SenseNovaProviderCard),
|
136
|
-
},
|
137
|
-
siliconcloud: {
|
138
|
-
enabled: false,
|
139
|
-
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
|
140
|
-
},
|
141
|
-
spark: {
|
142
|
-
enabled: false,
|
143
|
-
enabledModels: filterEnabledModels(SparkProviderCard),
|
144
|
-
},
|
145
|
-
stepfun: {
|
146
|
-
enabled: false,
|
147
|
-
enabledModels: filterEnabledModels(StepfunProviderCard),
|
148
|
-
},
|
149
|
-
taichu: {
|
150
|
-
enabled: false,
|
151
|
-
enabledModels: filterEnabledModels(TaichuProviderCard),
|
152
|
-
},
|
153
|
-
togetherai: {
|
154
|
-
enabled: false,
|
155
|
-
enabledModels: filterEnabledModels(TogetherAIProviderCard),
|
156
|
-
},
|
157
|
-
upstage: {
|
158
|
-
enabled: false,
|
159
|
-
enabledModels: filterEnabledModels(UpstageProviderCard),
|
160
|
-
},
|
161
|
-
wenxin: {
|
162
|
-
enabled: false,
|
163
|
-
enabledModels: filterEnabledModels(WenxinProviderCard),
|
164
|
-
},
|
165
|
-
xai: {
|
166
|
-
enabled: false,
|
167
|
-
enabledModels: filterEnabledModels(XAIProviderCard),
|
168
|
-
},
|
169
|
-
zeroone: {
|
170
|
-
enabled: false,
|
171
|
-
enabledModels: filterEnabledModels(ZeroOneProviderCard),
|
172
|
-
},
|
173
|
-
zhipu: {
|
174
|
-
enabled: false,
|
175
|
-
enabledModels: filterEnabledModels(ZhiPuProviderCard),
|
176
12
|
},
|
177
|
-
};
|
13
|
+
});
|
178
14
|
|
179
15
|
export const DEFAULT_MODEL = 'gpt-4o-mini';
|
180
16
|
export const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
|
@@ -0,0 +1,90 @@
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
import { genServerLLMConfig } from './genServerLLMConfig';
|
4
|
+
|
5
|
+
import { getLLMConfig } from '@/config/llm';
|
6
|
+
|
7
|
+
// Mock ModelProvider enum
|
8
|
+
vi.mock('@/libs/agent-runtime', () => ({
|
9
|
+
ModelProvider: {
|
10
|
+
Azure: 'azure',
|
11
|
+
Bedrock: 'bedrock',
|
12
|
+
Ollama: 'ollama',
|
13
|
+
}
|
14
|
+
}));
|
15
|
+
|
16
|
+
// Mock ProviderCards
|
17
|
+
vi.mock('@/config/modelProviders', () => ({
|
18
|
+
azureProviderCard: {
|
19
|
+
chatModels: [],
|
20
|
+
},
|
21
|
+
bedrockProviderCard: {
|
22
|
+
chatModels: ['bedrockModel1', 'bedrockModel2'],
|
23
|
+
},
|
24
|
+
ollamaProviderCard: {
|
25
|
+
chatModels: ['ollamaModel1', 'ollamaModel2'],
|
26
|
+
},
|
27
|
+
}));
|
28
|
+
|
29
|
+
// Mock LLM config
|
30
|
+
vi.mock('@/config/llm', () => ({
|
31
|
+
getLLMConfig: () => ({
|
32
|
+
ENABLED_AZURE_OPENAI: true,
|
33
|
+
ENABLED_AWS_BEDROCK: true,
|
34
|
+
ENABLED_OLLAMA: true,
|
35
|
+
AZURE_MODEL_LIST: 'azureModels',
|
36
|
+
AWS_BEDROCK_MODEL_LIST: 'bedrockModels',
|
37
|
+
OLLAMA_MODEL_LIST: 'ollamaModels',
|
38
|
+
OLLAMA_PROXY_URL: '',
|
39
|
+
}),
|
40
|
+
}));
|
41
|
+
|
42
|
+
// Mock parse models utils
|
43
|
+
vi.mock('@/utils/parseModels', () => ({
|
44
|
+
extractEnabledModels: (modelString: string, withDeploymentName?: boolean) => {
|
45
|
+
// Returns different format if withDeploymentName is true
|
46
|
+
return withDeploymentName ? [`${modelString}_withDeployment`] : [modelString];
|
47
|
+
},
|
48
|
+
transformToChatModelCards: ({ defaultChatModels, modelString, withDeploymentName }: any) => {
|
49
|
+
// Simulate transformation based on withDeploymentName
|
50
|
+
return withDeploymentName ? [`${modelString}_transformed`] : defaultChatModels;
|
51
|
+
},
|
52
|
+
}));
|
53
|
+
|
54
|
+
describe('genServerLLMConfig', () => {
|
55
|
+
it('should generate correct LLM config for Azure, Bedrock, and Ollama', () => {
|
56
|
+
const specificConfig = {
|
57
|
+
azure: {
|
58
|
+
enabledKey: 'ENABLED_AZURE_OPENAI',
|
59
|
+
withDeploymentName: true,
|
60
|
+
},
|
61
|
+
bedrock: {
|
62
|
+
enabledKey: 'ENABLED_AWS_BEDROCK',
|
63
|
+
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
|
64
|
+
},
|
65
|
+
ollama: {
|
66
|
+
fetchOnClient: !getLLMConfig().OLLAMA_PROXY_URL,
|
67
|
+
},
|
68
|
+
};
|
69
|
+
const config = genServerLLMConfig(specificConfig);
|
70
|
+
|
71
|
+
expect(config.azure).toEqual({
|
72
|
+
enabled: true,
|
73
|
+
enabledModels: ['azureModels_withDeployment'],
|
74
|
+
serverModelCards: ['azureModels_transformed'],
|
75
|
+
});
|
76
|
+
|
77
|
+
expect(config.bedrock).toEqual({
|
78
|
+
enabled: true,
|
79
|
+
enabledModels: ['bedrockModels'],
|
80
|
+
serverModelCards: ['bedrockModel1', 'bedrockModel2'],
|
81
|
+
});
|
82
|
+
|
83
|
+
expect(config.ollama).toEqual({
|
84
|
+
enabled: true,
|
85
|
+
enabledModels: ['ollamaModels'],
|
86
|
+
fetchOnClient: true,
|
87
|
+
serverModelCards: ['ollamaModel1', 'ollamaModel2'],
|
88
|
+
});
|
89
|
+
});
|
90
|
+
});
|
@@ -0,0 +1,34 @@
|
|
1
|
+
import { getLLMConfig } from '@/config/llm';
|
2
|
+
import * as ProviderCards from '@/config/modelProviders';
|
3
|
+
|
4
|
+
import { ModelProvider } from '@/libs/agent-runtime';
|
5
|
+
|
6
|
+
import { extractEnabledModels, transformToChatModelCards } from '@/utils/parseModels';
|
7
|
+
|
8
|
+
import { ModelProviderCard } from '@/types/llm';
|
9
|
+
|
10
|
+
export const genServerLLMConfig = (specificConfig: Record<any, any>) => {
|
11
|
+
const llmConfig = getLLMConfig() as Record<string, any>;
|
12
|
+
|
13
|
+
return Object.values(ModelProvider).reduce((config, provider) => {
|
14
|
+
const providerUpperCase = provider.toUpperCase();
|
15
|
+
const providerCard = ProviderCards[`${provider}ProviderCard` as keyof typeof ProviderCards] as ModelProviderCard;
|
16
|
+
const providerConfig = specificConfig[provider as keyof typeof specificConfig] || {};
|
17
|
+
|
18
|
+
config[provider] = {
|
19
|
+
enabled: llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
|
20
|
+
enabledModels: extractEnabledModels(
|
21
|
+
llmConfig[providerConfig.modelListKey || `${providerUpperCase}_MODEL_LIST`],
|
22
|
+
providerConfig.withDeploymentName || false,
|
23
|
+
),
|
24
|
+
serverModelCards: transformToChatModelCards({
|
25
|
+
defaultChatModels: (providerCard as ModelProviderCard)?.chatModels || [],
|
26
|
+
modelString: llmConfig[providerConfig.modelListKey || `${providerUpperCase}_MODEL_LIST`],
|
27
|
+
withDeploymentName: providerConfig.withDeploymentName || false,
|
28
|
+
}),
|
29
|
+
...(providerConfig.fetchOnClient !== undefined && { fetchOnClient: providerConfig.fetchOnClient }),
|
30
|
+
};
|
31
|
+
|
32
|
+
return config;
|
33
|
+
}, {} as Record<ModelProvider, any>);
|
34
|
+
};
|
@@ -3,155 +3,17 @@ import { authEnv } from '@/config/auth';
|
|
3
3
|
import { fileEnv } from '@/config/file';
|
4
4
|
import { langfuseEnv } from '@/config/langfuse';
|
5
5
|
import { getLLMConfig } from '@/config/llm';
|
6
|
-
import {
|
7
|
-
Ai21ProviderCard,
|
8
|
-
Ai360ProviderCard,
|
9
|
-
AnthropicProviderCard,
|
10
|
-
BaichuanProviderCard,
|
11
|
-
BedrockProviderCard,
|
12
|
-
DeepSeekProviderCard,
|
13
|
-
FireworksAIProviderCard,
|
14
|
-
GithubProviderCard,
|
15
|
-
GoogleProviderCard,
|
16
|
-
GroqProviderCard,
|
17
|
-
HuggingFaceProviderCard,
|
18
|
-
HunyuanProviderCard,
|
19
|
-
MinimaxProviderCard,
|
20
|
-
MistralProviderCard,
|
21
|
-
MoonshotProviderCard,
|
22
|
-
NovitaProviderCard,
|
23
|
-
OllamaProviderCard,
|
24
|
-
OpenAIProviderCard,
|
25
|
-
OpenRouterProviderCard,
|
26
|
-
PerplexityProviderCard,
|
27
|
-
QwenProviderCard,
|
28
|
-
SenseNovaProviderCard,
|
29
|
-
SiliconCloudProviderCard,
|
30
|
-
SparkProviderCard,
|
31
|
-
StepfunProviderCard,
|
32
|
-
TaichuProviderCard,
|
33
|
-
TogetherAIProviderCard,
|
34
|
-
UpstageProviderCard,
|
35
|
-
WenxinProviderCard,
|
36
|
-
XAIProviderCard,
|
37
|
-
ZeroOneProviderCard,
|
38
|
-
ZhiPuProviderCard,
|
39
|
-
} from '@/config/modelProviders';
|
40
6
|
import { enableNextAuth } from '@/const/auth';
|
41
7
|
import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
|
42
8
|
import { GlobalServerConfig } from '@/types/serverConfig';
|
43
|
-
import { extractEnabledModels, transformToChatModelCards } from '@/utils/parseModels';
|
44
9
|
|
45
10
|
import { parseAgentConfig } from './parseDefaultAgent';
|
46
11
|
|
12
|
+
import { genServerLLMConfig } from './genServerLLMConfig'
|
13
|
+
|
47
14
|
export const getServerGlobalConfig = () => {
|
48
15
|
const { ACCESS_CODES, DEFAULT_AGENT_CONFIG } = getAppConfig();
|
49
16
|
|
50
|
-
const {
|
51
|
-
ENABLED_OPENAI,
|
52
|
-
OPENAI_MODEL_LIST,
|
53
|
-
|
54
|
-
ENABLED_MOONSHOT,
|
55
|
-
MOONSHOT_MODEL_LIST,
|
56
|
-
|
57
|
-
ENABLED_ZHIPU,
|
58
|
-
ZHIPU_MODEL_LIST,
|
59
|
-
|
60
|
-
ENABLED_AWS_BEDROCK,
|
61
|
-
AWS_BEDROCK_MODEL_LIST,
|
62
|
-
|
63
|
-
ENABLED_GOOGLE,
|
64
|
-
GOOGLE_MODEL_LIST,
|
65
|
-
|
66
|
-
ENABLED_GROQ,
|
67
|
-
GROQ_MODEL_LIST,
|
68
|
-
|
69
|
-
ENABLED_GITHUB,
|
70
|
-
GITHUB_MODEL_LIST,
|
71
|
-
|
72
|
-
ENABLED_HUNYUAN,
|
73
|
-
HUNYUAN_MODEL_LIST,
|
74
|
-
|
75
|
-
ENABLED_DEEPSEEK,
|
76
|
-
DEEPSEEK_MODEL_LIST,
|
77
|
-
|
78
|
-
ENABLED_PERPLEXITY,
|
79
|
-
PERPLEXITY_MODEL_LIST,
|
80
|
-
|
81
|
-
ENABLED_ANTHROPIC,
|
82
|
-
ANTHROPIC_MODEL_LIST,
|
83
|
-
|
84
|
-
ENABLED_MINIMAX,
|
85
|
-
MINIMAX_MODEL_LIST,
|
86
|
-
|
87
|
-
ENABLED_MISTRAL,
|
88
|
-
MISTRAL_MODEL_LIST,
|
89
|
-
|
90
|
-
ENABLED_NOVITA,
|
91
|
-
NOVITA_MODEL_LIST,
|
92
|
-
|
93
|
-
ENABLED_QWEN,
|
94
|
-
QWEN_MODEL_LIST,
|
95
|
-
|
96
|
-
ENABLED_STEPFUN,
|
97
|
-
STEPFUN_MODEL_LIST,
|
98
|
-
|
99
|
-
ENABLED_BAICHUAN,
|
100
|
-
BAICHUAN_MODEL_LIST,
|
101
|
-
|
102
|
-
ENABLED_TAICHU,
|
103
|
-
|
104
|
-
ENABLED_CLOUDFLARE,
|
105
|
-
|
106
|
-
TAICHU_MODEL_LIST,
|
107
|
-
|
108
|
-
ENABLED_AI21,
|
109
|
-
AI21_MODEL_LIST,
|
110
|
-
|
111
|
-
ENABLED_AI360,
|
112
|
-
AI360_MODEL_LIST,
|
113
|
-
|
114
|
-
ENABLED_SENSENOVA,
|
115
|
-
SENSENOVA_MODEL_LIST,
|
116
|
-
|
117
|
-
ENABLED_SILICONCLOUD,
|
118
|
-
SILICONCLOUD_MODEL_LIST,
|
119
|
-
|
120
|
-
ENABLED_UPSTAGE,
|
121
|
-
UPSTAGE_MODEL_LIST,
|
122
|
-
|
123
|
-
ENABLED_SPARK,
|
124
|
-
SPARK_MODEL_LIST,
|
125
|
-
|
126
|
-
ENABLED_AZURE_OPENAI,
|
127
|
-
AZURE_MODEL_LIST,
|
128
|
-
|
129
|
-
ENABLED_OLLAMA,
|
130
|
-
OLLAMA_MODEL_LIST,
|
131
|
-
OLLAMA_PROXY_URL,
|
132
|
-
|
133
|
-
ENABLED_OPENROUTER,
|
134
|
-
OPENROUTER_MODEL_LIST,
|
135
|
-
|
136
|
-
ENABLED_ZEROONE,
|
137
|
-
ZEROONE_MODEL_LIST,
|
138
|
-
|
139
|
-
ENABLED_TOGETHERAI,
|
140
|
-
TOGETHERAI_MODEL_LIST,
|
141
|
-
|
142
|
-
ENABLED_FIREWORKSAI,
|
143
|
-
FIREWORKSAI_MODEL_LIST,
|
144
|
-
|
145
|
-
ENABLED_WENXIN,
|
146
|
-
WENXIN_MODEL_LIST,
|
147
|
-
|
148
|
-
ENABLED_HUGGINGFACE,
|
149
|
-
HUGGINGFACE_MODEL_LIST,
|
150
|
-
|
151
|
-
ENABLED_XAI,
|
152
|
-
XAI_MODEL_LIST,
|
153
|
-
} = getLLMConfig();
|
154
|
-
|
155
17
|
const config: GlobalServerConfig = {
|
156
18
|
defaultAgent: {
|
157
19
|
config: parseAgentConfig(DEFAULT_AGENT_CONFIG),
|
@@ -159,275 +21,19 @@ export const getServerGlobalConfig = () => {
|
|
159
21
|
enableUploadFileToServer: !!fileEnv.S3_SECRET_ACCESS_KEY,
|
160
22
|
enabledAccessCode: ACCESS_CODES?.length > 0,
|
161
23
|
enabledOAuthSSO: enableNextAuth,
|
162
|
-
languageModel: {
|
163
|
-
ai21: {
|
164
|
-
enabled: ENABLED_AI21,
|
165
|
-
enabledModels: extractEnabledModels(AI21_MODEL_LIST),
|
166
|
-
serverModelCards: transformToChatModelCards({
|
167
|
-
defaultChatModels: Ai21ProviderCard.chatModels,
|
168
|
-
modelString: AI21_MODEL_LIST,
|
169
|
-
}),
|
170
|
-
},
|
171
|
-
ai360: {
|
172
|
-
enabled: ENABLED_AI360,
|
173
|
-
enabledModels: extractEnabledModels(AI360_MODEL_LIST),
|
174
|
-
serverModelCards: transformToChatModelCards({
|
175
|
-
defaultChatModels: Ai360ProviderCard.chatModels,
|
176
|
-
modelString: AI360_MODEL_LIST,
|
177
|
-
}),
|
178
|
-
},
|
179
|
-
anthropic: {
|
180
|
-
enabled: ENABLED_ANTHROPIC,
|
181
|
-
enabledModels: extractEnabledModels(ANTHROPIC_MODEL_LIST),
|
182
|
-
serverModelCards: transformToChatModelCards({
|
183
|
-
defaultChatModels: AnthropicProviderCard.chatModels,
|
184
|
-
modelString: ANTHROPIC_MODEL_LIST,
|
185
|
-
}),
|
186
|
-
},
|
24
|
+
languageModel: genServerLLMConfig({
|
187
25
|
azure: {
|
188
|
-
|
189
|
-
|
190
|
-
serverModelCards: transformToChatModelCards({
|
191
|
-
defaultChatModels: [],
|
192
|
-
modelString: AZURE_MODEL_LIST,
|
193
|
-
withDeploymentName: true,
|
194
|
-
}),
|
195
|
-
},
|
196
|
-
baichuan: {
|
197
|
-
enabled: ENABLED_BAICHUAN,
|
198
|
-
enabledModels: extractEnabledModels(BAICHUAN_MODEL_LIST),
|
199
|
-
serverModelCards: transformToChatModelCards({
|
200
|
-
defaultChatModels: BaichuanProviderCard.chatModels,
|
201
|
-
modelString: BAICHUAN_MODEL_LIST,
|
202
|
-
}),
|
26
|
+
enabledKey: 'ENABLED_AZURE_OPENAI',
|
27
|
+
withDeploymentName: true,
|
203
28
|
},
|
204
29
|
bedrock: {
|
205
|
-
|
206
|
-
|
207
|
-
serverModelCards: transformToChatModelCards({
|
208
|
-
defaultChatModels: BedrockProviderCard.chatModels,
|
209
|
-
modelString: AWS_BEDROCK_MODEL_LIST,
|
210
|
-
}),
|
211
|
-
},
|
212
|
-
cloudflare: { enabled: ENABLED_CLOUDFLARE },
|
213
|
-
deepseek: {
|
214
|
-
enabled: ENABLED_DEEPSEEK,
|
215
|
-
enabledModels: extractEnabledModels(DEEPSEEK_MODEL_LIST),
|
216
|
-
serverModelCards: transformToChatModelCards({
|
217
|
-
defaultChatModels: DeepSeekProviderCard.chatModels,
|
218
|
-
modelString: DEEPSEEK_MODEL_LIST,
|
219
|
-
}),
|
220
|
-
},
|
221
|
-
fireworksai: {
|
222
|
-
enabled: ENABLED_FIREWORKSAI,
|
223
|
-
enabledModels: extractEnabledModels(FIREWORKSAI_MODEL_LIST),
|
224
|
-
serverModelCards: transformToChatModelCards({
|
225
|
-
defaultChatModels: FireworksAIProviderCard.chatModels,
|
226
|
-
modelString: FIREWORKSAI_MODEL_LIST,
|
227
|
-
}),
|
228
|
-
},
|
229
|
-
github: {
|
230
|
-
enabled: ENABLED_GITHUB,
|
231
|
-
enabledModels: extractEnabledModels(GITHUB_MODEL_LIST),
|
232
|
-
serverModelCards: transformToChatModelCards({
|
233
|
-
defaultChatModels: GithubProviderCard.chatModels,
|
234
|
-
modelString: GITHUB_MODEL_LIST,
|
235
|
-
}),
|
236
|
-
},
|
237
|
-
google: {
|
238
|
-
enabled: ENABLED_GOOGLE,
|
239
|
-
enabledModels: extractEnabledModels(GOOGLE_MODEL_LIST),
|
240
|
-
serverModelCards: transformToChatModelCards({
|
241
|
-
defaultChatModels: GoogleProviderCard.chatModels,
|
242
|
-
modelString: GOOGLE_MODEL_LIST,
|
243
|
-
}),
|
244
|
-
},
|
245
|
-
groq: {
|
246
|
-
enabled: ENABLED_GROQ,
|
247
|
-
enabledModels: extractEnabledModels(GROQ_MODEL_LIST),
|
248
|
-
serverModelCards: transformToChatModelCards({
|
249
|
-
defaultChatModels: GroqProviderCard.chatModels,
|
250
|
-
modelString: GROQ_MODEL_LIST,
|
251
|
-
}),
|
252
|
-
},
|
253
|
-
huggingface: {
|
254
|
-
enabled: ENABLED_HUGGINGFACE,
|
255
|
-
enabledModels: extractEnabledModels(HUGGINGFACE_MODEL_LIST),
|
256
|
-
serverModelCards: transformToChatModelCards({
|
257
|
-
defaultChatModels: HuggingFaceProviderCard.chatModels,
|
258
|
-
modelString: HUGGINGFACE_MODEL_LIST,
|
259
|
-
}),
|
260
|
-
},
|
261
|
-
hunyuan: {
|
262
|
-
enabled: ENABLED_HUNYUAN,
|
263
|
-
enabledModels: extractEnabledModels(HUNYUAN_MODEL_LIST),
|
264
|
-
serverModelCards: transformToChatModelCards({
|
265
|
-
defaultChatModels: HunyuanProviderCard.chatModels,
|
266
|
-
modelString: HUNYUAN_MODEL_LIST,
|
267
|
-
}),
|
268
|
-
},
|
269
|
-
minimax: {
|
270
|
-
enabled: ENABLED_MINIMAX,
|
271
|
-
enabledModels: extractEnabledModels(MINIMAX_MODEL_LIST),
|
272
|
-
serverModelCards: transformToChatModelCards({
|
273
|
-
defaultChatModels: MinimaxProviderCard.chatModels,
|
274
|
-
modelString: MINIMAX_MODEL_LIST,
|
275
|
-
}),
|
276
|
-
},
|
277
|
-
mistral: {
|
278
|
-
enabled: ENABLED_MISTRAL,
|
279
|
-
enabledModels: extractEnabledModels(MISTRAL_MODEL_LIST),
|
280
|
-
serverModelCards: transformToChatModelCards({
|
281
|
-
defaultChatModels: MistralProviderCard.chatModels,
|
282
|
-
modelString: MISTRAL_MODEL_LIST,
|
283
|
-
}),
|
284
|
-
},
|
285
|
-
moonshot: {
|
286
|
-
enabled: ENABLED_MOONSHOT,
|
287
|
-
enabledModels: extractEnabledModels(MOONSHOT_MODEL_LIST),
|
288
|
-
serverModelCards: transformToChatModelCards({
|
289
|
-
defaultChatModels: MoonshotProviderCard.chatModels,
|
290
|
-
modelString: MOONSHOT_MODEL_LIST,
|
291
|
-
}),
|
292
|
-
},
|
293
|
-
novita: {
|
294
|
-
enabled: ENABLED_NOVITA,
|
295
|
-
enabledModels: extractEnabledModels(NOVITA_MODEL_LIST),
|
296
|
-
serverModelCards: transformToChatModelCards({
|
297
|
-
defaultChatModels: NovitaProviderCard.chatModels,
|
298
|
-
modelString: NOVITA_MODEL_LIST,
|
299
|
-
}),
|
30
|
+
enabledKey: 'ENABLED_AWS_BEDROCK',
|
31
|
+
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
|
300
32
|
},
|
301
33
|
ollama: {
|
302
|
-
|
303
|
-
enabledModels: extractEnabledModels(OLLAMA_MODEL_LIST),
|
304
|
-
fetchOnClient: !OLLAMA_PROXY_URL,
|
305
|
-
serverModelCards: transformToChatModelCards({
|
306
|
-
defaultChatModels: OllamaProviderCard.chatModels,
|
307
|
-
modelString: OLLAMA_MODEL_LIST,
|
308
|
-
}),
|
34
|
+
fetchOnClient: !getLLMConfig().OLLAMA_PROXY_URL,
|
309
35
|
},
|
310
|
-
|
311
|
-
enabled: ENABLED_OPENAI,
|
312
|
-
enabledModels: extractEnabledModels(OPENAI_MODEL_LIST),
|
313
|
-
serverModelCards: transformToChatModelCards({
|
314
|
-
defaultChatModels: OpenAIProviderCard.chatModels,
|
315
|
-
modelString: OPENAI_MODEL_LIST,
|
316
|
-
}),
|
317
|
-
},
|
318
|
-
openrouter: {
|
319
|
-
enabled: ENABLED_OPENROUTER,
|
320
|
-
enabledModels: extractEnabledModels(OPENROUTER_MODEL_LIST),
|
321
|
-
serverModelCards: transformToChatModelCards({
|
322
|
-
defaultChatModels: OpenRouterProviderCard.chatModels,
|
323
|
-
modelString: OPENROUTER_MODEL_LIST,
|
324
|
-
}),
|
325
|
-
},
|
326
|
-
perplexity: {
|
327
|
-
enabled: ENABLED_PERPLEXITY,
|
328
|
-
enabledModels: extractEnabledModels(PERPLEXITY_MODEL_LIST),
|
329
|
-
serverModelCards: transformToChatModelCards({
|
330
|
-
defaultChatModels: PerplexityProviderCard.chatModels,
|
331
|
-
modelString: PERPLEXITY_MODEL_LIST,
|
332
|
-
}),
|
333
|
-
},
|
334
|
-
qwen: {
|
335
|
-
enabled: ENABLED_QWEN,
|
336
|
-
enabledModels: extractEnabledModels(QWEN_MODEL_LIST),
|
337
|
-
serverModelCards: transformToChatModelCards({
|
338
|
-
defaultChatModels: QwenProviderCard.chatModels,
|
339
|
-
modelString: QWEN_MODEL_LIST,
|
340
|
-
}),
|
341
|
-
},
|
342
|
-
sensenova: {
|
343
|
-
enabled: ENABLED_SENSENOVA,
|
344
|
-
enabledModels: extractEnabledModels(SENSENOVA_MODEL_LIST),
|
345
|
-
serverModelCards: transformToChatModelCards({
|
346
|
-
defaultChatModels: SenseNovaProviderCard.chatModels,
|
347
|
-
modelString: SENSENOVA_MODEL_LIST,
|
348
|
-
}),
|
349
|
-
},
|
350
|
-
siliconcloud: {
|
351
|
-
enabled: ENABLED_SILICONCLOUD,
|
352
|
-
enabledModels: extractEnabledModels(SILICONCLOUD_MODEL_LIST),
|
353
|
-
serverModelCards: transformToChatModelCards({
|
354
|
-
defaultChatModels: SiliconCloudProviderCard.chatModels,
|
355
|
-
modelString: SILICONCLOUD_MODEL_LIST,
|
356
|
-
}),
|
357
|
-
},
|
358
|
-
spark: {
|
359
|
-
enabled: ENABLED_SPARK,
|
360
|
-
enabledModels: extractEnabledModels(SPARK_MODEL_LIST),
|
361
|
-
serverModelCards: transformToChatModelCards({
|
362
|
-
defaultChatModels: SparkProviderCard.chatModels,
|
363
|
-
modelString: SPARK_MODEL_LIST,
|
364
|
-
}),
|
365
|
-
},
|
366
|
-
stepfun: {
|
367
|
-
enabled: ENABLED_STEPFUN,
|
368
|
-
enabledModels: extractEnabledModels(STEPFUN_MODEL_LIST),
|
369
|
-
serverModelCards: transformToChatModelCards({
|
370
|
-
defaultChatModels: StepfunProviderCard.chatModels,
|
371
|
-
modelString: STEPFUN_MODEL_LIST,
|
372
|
-
}),
|
373
|
-
},
|
374
|
-
taichu: {
|
375
|
-
enabled: ENABLED_TAICHU,
|
376
|
-
enabledModels: extractEnabledModels(TAICHU_MODEL_LIST),
|
377
|
-
serverModelCards: transformToChatModelCards({
|
378
|
-
defaultChatModels: TaichuProviderCard.chatModels,
|
379
|
-
modelString: TAICHU_MODEL_LIST,
|
380
|
-
}),
|
381
|
-
},
|
382
|
-
togetherai: {
|
383
|
-
enabled: ENABLED_TOGETHERAI,
|
384
|
-
enabledModels: extractEnabledModels(TOGETHERAI_MODEL_LIST),
|
385
|
-
serverModelCards: transformToChatModelCards({
|
386
|
-
defaultChatModels: TogetherAIProviderCard.chatModels,
|
387
|
-
modelString: TOGETHERAI_MODEL_LIST,
|
388
|
-
}),
|
389
|
-
},
|
390
|
-
upstage: {
|
391
|
-
enabled: ENABLED_UPSTAGE,
|
392
|
-
enabledModels: extractEnabledModels(UPSTAGE_MODEL_LIST),
|
393
|
-
serverModelCards: transformToChatModelCards({
|
394
|
-
defaultChatModels: UpstageProviderCard.chatModels,
|
395
|
-
modelString: UPSTAGE_MODEL_LIST,
|
396
|
-
}),
|
397
|
-
},
|
398
|
-
wenxin: {
|
399
|
-
enabled: ENABLED_WENXIN,
|
400
|
-
enabledModels: extractEnabledModels(WENXIN_MODEL_LIST),
|
401
|
-
serverModelCards: transformToChatModelCards({
|
402
|
-
defaultChatModels: WenxinProviderCard.chatModels,
|
403
|
-
modelString: WENXIN_MODEL_LIST,
|
404
|
-
}),
|
405
|
-
},
|
406
|
-
xai: {
|
407
|
-
enabled: ENABLED_XAI,
|
408
|
-
enabledModels: extractEnabledModels(XAI_MODEL_LIST),
|
409
|
-
serverModelCards: transformToChatModelCards({
|
410
|
-
defaultChatModels: XAIProviderCard.chatModels,
|
411
|
-
modelString: XAI_MODEL_LIST,
|
412
|
-
}),
|
413
|
-
},
|
414
|
-
zeroone: {
|
415
|
-
enabled: ENABLED_ZEROONE,
|
416
|
-
enabledModels: extractEnabledModels(ZEROONE_MODEL_LIST),
|
417
|
-
serverModelCards: transformToChatModelCards({
|
418
|
-
defaultChatModels: ZeroOneProviderCard.chatModels,
|
419
|
-
modelString: ZEROONE_MODEL_LIST,
|
420
|
-
}),
|
421
|
-
},
|
422
|
-
zhipu: {
|
423
|
-
enabled: ENABLED_ZHIPU,
|
424
|
-
enabledModels: extractEnabledModels(ZHIPU_MODEL_LIST),
|
425
|
-
serverModelCards: transformToChatModelCards({
|
426
|
-
defaultChatModels: ZhiPuProviderCard.chatModels,
|
427
|
-
modelString: ZHIPU_MODEL_LIST,
|
428
|
-
}),
|
429
|
-
},
|
430
|
-
},
|
36
|
+
}),
|
431
37
|
oAuthSSOProviders: authEnv.NEXT_AUTH_SSO_PROVIDERS.trim().split(/[,,]/),
|
432
38
|
systemAgent: parseSystemAgent(appEnv.SYSTEM_AGENT),
|
433
39
|
telemetry: {
|
@@ -0,0 +1,45 @@
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
2
|
+
|
3
|
+
import { genUserLLMConfig } from './genUserLLMConfig';
|
4
|
+
|
5
|
+
import { ModelProviderCard } from '@/types/llm';
|
6
|
+
|
7
|
+
// Mock ModelProvider enum
|
8
|
+
vi.mock('@/libs/agent-runtime', () => ({
|
9
|
+
ModelProvider: {
|
10
|
+
Ollama: 'ollama',
|
11
|
+
OpenAI: 'openai',
|
12
|
+
}
|
13
|
+
}));
|
14
|
+
|
15
|
+
// Mock ProviderCards and filterEnabledModels
|
16
|
+
vi.mock('@/config/modelProviders', () => ({
|
17
|
+
OllamaProviderCard: {
|
18
|
+
chatModels: ['ollamaModel1', 'ollamaModel2'],
|
19
|
+
},
|
20
|
+
OpenAIProviderCard: {
|
21
|
+
chatModels: ['openaiModel1', 'openaiModel2'],
|
22
|
+
},
|
23
|
+
filterEnabledModels: (providerCard: ModelProviderCard) => providerCard.chatModels,
|
24
|
+
}));
|
25
|
+
|
26
|
+
describe('genUserLLMConfig', () => {
|
27
|
+
it('should generate correct LLM config for Ollama and OpenAI', () => {
|
28
|
+
const specificConfig = {
|
29
|
+
ollama: { enabled: true, fetchOnClient: true },
|
30
|
+
openai: { enabled: true }
|
31
|
+
};
|
32
|
+
const config = genUserLLMConfig(specificConfig);
|
33
|
+
|
34
|
+
expect(config.ollama).toEqual({
|
35
|
+
enabled: true,
|
36
|
+
enabledModels: ['ollamaModel1', 'ollamaModel2'],
|
37
|
+
fetchOnClient: true,
|
38
|
+
});
|
39
|
+
|
40
|
+
expect(config.openai).toEqual({
|
41
|
+
enabled: true,
|
42
|
+
enabledModels: ['openaiModel1', 'openaiModel2'],
|
43
|
+
});
|
44
|
+
});
|
45
|
+
});
|
@@ -0,0 +1,22 @@
|
|
1
|
+
import * as ProviderCards from '@/config/modelProviders';
|
2
|
+
|
3
|
+
import { ModelProvider } from '@/libs/agent-runtime';
|
4
|
+
|
5
|
+
import { ModelProviderCard } from '@/types/llm';
|
6
|
+
import { UserModelProviderConfig } from '@/types/user/settings';
|
7
|
+
|
8
|
+
export const genUserLLMConfig = (specificConfig: Record<any, any>): UserModelProviderConfig => {
|
9
|
+
return Object.keys(ModelProvider).reduce((config, providerKey) => {
|
10
|
+
const provider = ModelProvider[providerKey as keyof typeof ModelProvider];
|
11
|
+
const providerCard = ProviderCards[`${providerKey}ProviderCard` as keyof typeof ProviderCards] as ModelProviderCard;
|
12
|
+
const providerConfig = specificConfig[provider as keyof typeof specificConfig] || {};
|
13
|
+
|
14
|
+
config[provider] = {
|
15
|
+
enabled: providerConfig.enabled !== undefined ? providerConfig.enabled : false,
|
16
|
+
enabledModels: providerCard ? ProviderCards.filterEnabledModels(providerCard) : [],
|
17
|
+
...(providerConfig.fetchOnClient !== undefined && { fetchOnClient: providerConfig.fetchOnClient }),
|
18
|
+
};
|
19
|
+
|
20
|
+
return config;
|
21
|
+
}, {} as UserModelProviderConfig);
|
22
|
+
};
|