@lobehub/chat 1.32.7 → 1.32.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/Dockerfile +1 -1
- package/Dockerfile.database +1 -1
- package/package.json +1 -1
- package/src/app/(backend)/_deprecated/createBizOpenAI/createAzureOpenai.ts +2 -1
- package/src/app/(backend)/_deprecated/createBizOpenAI/createOpenai.ts +2 -1
- package/src/config/llm.ts +0 -18
- package/src/config/modelProviders/moonshot.ts +3 -0
- package/src/server/globalConfig/genServerLLMConfig.test.ts +1 -1
- package/src/server/globalConfig/index.ts +1 -2
- package/src/server/modules/AgentRuntime/index.ts +31 -33
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.32.8](https://github.com/lobehub/lobe-chat/compare/v1.32.7...v1.32.8)
|
6
|
+
|
7
|
+
<sup>Released on **2024-11-24**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Fix `XAI_PROXY_URL` env missing.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Fix `XAI_PROXY_URL` env missing, closes [#4747](https://github.com/lobehub/lobe-chat/issues/4747) ([7c9e88e](https://github.com/lobehub/lobe-chat/commit/7c9e88e))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.32.7](https://github.com/lobehub/lobe-chat/compare/v1.32.6...v1.32.7)
|
6
31
|
|
7
32
|
<sup>Released on **2024-11-24**</sup>
|
package/Dockerfile
CHANGED
@@ -205,7 +205,7 @@ ENV \
|
|
205
205
|
# Wenxin
|
206
206
|
WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
|
207
207
|
# xAI
|
208
|
-
XAI_API_KEY="" XAI_MODEL_LIST="" \
|
208
|
+
XAI_API_KEY="" XAI_MODEL_LIST="" XAI_PROXY_URL="" \
|
209
209
|
# 01.AI
|
210
210
|
ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
|
211
211
|
# Zhipu
|
package/Dockerfile.database
CHANGED
@@ -240,7 +240,7 @@ ENV \
|
|
240
240
|
# Wenxin
|
241
241
|
WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
|
242
242
|
# xAI
|
243
|
-
XAI_API_KEY="" XAI_MODEL_LIST="" \
|
243
|
+
XAI_API_KEY="" XAI_MODEL_LIST="" XAI_PROXY_URL="" \
|
244
244
|
# 01.AI
|
245
245
|
ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
|
246
246
|
# Zhipu
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.32.
|
3
|
+
"version": "1.32.8",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -11,7 +11,8 @@ export const createAzureOpenai = (params: {
|
|
11
11
|
model: string;
|
12
12
|
userApiKey?: string | null;
|
13
13
|
}) => {
|
14
|
-
const {
|
14
|
+
const { AZURE_API_VERSION, AZURE_API_KEY } = getLLMConfig();
|
15
|
+
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL || '';
|
15
16
|
|
16
17
|
const endpoint = !params.endpoint ? OPENAI_PROXY_URL : params.endpoint;
|
17
18
|
const baseURL = urlJoin(endpoint, `/openai/deployments/${params.model.replace('.', '')}`); // refs: https://test-001.openai.azure.com/openai/deployments/gpt-35-turbo
|
@@ -5,7 +5,8 @@ import { ChatErrorType } from '@/types/fetch';
|
|
5
5
|
|
6
6
|
// create OpenAI instance
|
7
7
|
export const createOpenai = (userApiKey: string | null, endpoint?: string | null) => {
|
8
|
-
const { OPENAI_API_KEY
|
8
|
+
const { OPENAI_API_KEY } = getLLMConfig();
|
9
|
+
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;
|
9
10
|
|
10
11
|
const baseURL = endpoint ? endpoint : OPENAI_PROXY_URL ? OPENAI_PROXY_URL : undefined;
|
11
12
|
|
package/src/config/llm.ts
CHANGED
@@ -9,7 +9,6 @@ export const getLLMConfig = () => {
|
|
9
9
|
|
10
10
|
ENABLED_OPENAI: z.boolean(),
|
11
11
|
OPENAI_API_KEY: z.string().optional(),
|
12
|
-
OPENAI_PROXY_URL: z.string().optional(),
|
13
12
|
|
14
13
|
ENABLED_AZURE_OPENAI: z.boolean(),
|
15
14
|
AZURE_API_KEY: z.string().optional(),
|
@@ -24,19 +23,15 @@ export const getLLMConfig = () => {
|
|
24
23
|
|
25
24
|
ENABLED_GOOGLE: z.boolean(),
|
26
25
|
GOOGLE_API_KEY: z.string().optional(),
|
27
|
-
GOOGLE_PROXY_URL: z.string().optional(),
|
28
26
|
|
29
27
|
ENABLED_MOONSHOT: z.boolean(),
|
30
28
|
MOONSHOT_API_KEY: z.string().optional(),
|
31
|
-
MOONSHOT_PROXY_URL: z.string().optional(),
|
32
29
|
|
33
30
|
ENABLED_PERPLEXITY: z.boolean(),
|
34
31
|
PERPLEXITY_API_KEY: z.string().optional(),
|
35
|
-
PERPLEXITY_PROXY_URL: z.string().optional(),
|
36
32
|
|
37
33
|
ENABLED_ANTHROPIC: z.boolean(),
|
38
34
|
ANTHROPIC_API_KEY: z.string().optional(),
|
39
|
-
ANTHROPIC_PROXY_URL: z.string().optional(),
|
40
35
|
|
41
36
|
ENABLED_MINIMAX: z.boolean(),
|
42
37
|
MINIMAX_API_KEY: z.string().optional(),
|
@@ -46,7 +41,6 @@ export const getLLMConfig = () => {
|
|
46
41
|
|
47
42
|
ENABLED_GROQ: z.boolean(),
|
48
43
|
GROQ_API_KEY: z.string().optional(),
|
49
|
-
GROQ_PROXY_URL: z.string().optional(),
|
50
44
|
|
51
45
|
ENABLED_GITHUB: z.boolean(),
|
52
46
|
GITHUB_TOKEN: z.string().optional(),
|
@@ -74,7 +68,6 @@ export const getLLMConfig = () => {
|
|
74
68
|
WENXIN_SECRET_KEY: z.string().optional(),
|
75
69
|
|
76
70
|
ENABLED_OLLAMA: z.boolean(),
|
77
|
-
OLLAMA_PROXY_URL: z.string().optional(),
|
78
71
|
|
79
72
|
ENABLED_QWEN: z.boolean(),
|
80
73
|
QWEN_API_KEY: z.string().optional(),
|
@@ -100,7 +93,6 @@ export const getLLMConfig = () => {
|
|
100
93
|
|
101
94
|
ENABLED_SILICONCLOUD: z.boolean(),
|
102
95
|
SILICONCLOUD_API_KEY: z.string().optional(),
|
103
|
-
SILICONCLOUD_PROXY_URL: z.string().optional(),
|
104
96
|
|
105
97
|
ENABLED_UPSTAGE: z.boolean(),
|
106
98
|
UPSTAGE_API_KEY: z.string().optional(),
|
@@ -116,7 +108,6 @@ export const getLLMConfig = () => {
|
|
116
108
|
|
117
109
|
ENABLED_HUGGINGFACE: z.boolean(),
|
118
110
|
HUGGINGFACE_API_KEY: z.string().optional(),
|
119
|
-
HUGGINGFACE_PROXY_URL: z.string().optional(),
|
120
111
|
|
121
112
|
ENABLED_SENSENOVA: z.boolean(),
|
122
113
|
SENSENOVA_ACCESS_KEY_ID: z.string().optional(),
|
@@ -133,7 +124,6 @@ export const getLLMConfig = () => {
|
|
133
124
|
|
134
125
|
ENABLED_OPENAI: process.env.ENABLED_OPENAI !== '0',
|
135
126
|
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
136
|
-
OPENAI_PROXY_URL: process.env.OPENAI_PROXY_URL,
|
137
127
|
|
138
128
|
ENABLED_AZURE_OPENAI: !!process.env.AZURE_API_KEY,
|
139
129
|
AZURE_API_KEY: process.env.AZURE_API_KEY,
|
@@ -148,15 +138,12 @@ export const getLLMConfig = () => {
|
|
148
138
|
|
149
139
|
ENABLED_GOOGLE: !!process.env.GOOGLE_API_KEY,
|
150
140
|
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
151
|
-
GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
|
152
141
|
|
153
142
|
ENABLED_PERPLEXITY: !!process.env.PERPLEXITY_API_KEY,
|
154
143
|
PERPLEXITY_API_KEY: process.env.PERPLEXITY_API_KEY,
|
155
|
-
PERPLEXITY_PROXY_URL: process.env.PERPLEXITY_PROXY_URL,
|
156
144
|
|
157
145
|
ENABLED_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
|
158
146
|
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
159
|
-
ANTHROPIC_PROXY_URL: process.env.ANTHROPIC_PROXY_URL,
|
160
147
|
|
161
148
|
ENABLED_MINIMAX: !!process.env.MINIMAX_API_KEY,
|
162
149
|
MINIMAX_API_KEY: process.env.MINIMAX_API_KEY,
|
@@ -175,11 +162,9 @@ export const getLLMConfig = () => {
|
|
175
162
|
|
176
163
|
ENABLED_MOONSHOT: !!process.env.MOONSHOT_API_KEY,
|
177
164
|
MOONSHOT_API_KEY: process.env.MOONSHOT_API_KEY,
|
178
|
-
MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
|
179
165
|
|
180
166
|
ENABLED_GROQ: !!process.env.GROQ_API_KEY,
|
181
167
|
GROQ_API_KEY: process.env.GROQ_API_KEY,
|
182
|
-
GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
|
183
168
|
|
184
169
|
ENABLED_GITHUB: !!process.env.GITHUB_TOKEN,
|
185
170
|
GITHUB_TOKEN: process.env.GITHUB_TOKEN,
|
@@ -198,7 +183,6 @@ export const getLLMConfig = () => {
|
|
198
183
|
WENXIN_SECRET_KEY: process.env.WENXIN_SECRET_KEY,
|
199
184
|
|
200
185
|
ENABLED_OLLAMA: process.env.ENABLED_OLLAMA !== '0',
|
201
|
-
OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
|
202
186
|
|
203
187
|
ENABLED_QWEN: !!process.env.QWEN_API_KEY,
|
204
188
|
QWEN_API_KEY: process.env.QWEN_API_KEY,
|
@@ -225,7 +209,6 @@ export const getLLMConfig = () => {
|
|
225
209
|
|
226
210
|
ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
|
227
211
|
SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
|
228
|
-
SILICONCLOUD_PROXY_URL: process.env.SILICONCLOUD_PROXY_URL,
|
229
212
|
|
230
213
|
ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
|
231
214
|
UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
|
@@ -241,7 +224,6 @@ export const getLLMConfig = () => {
|
|
241
224
|
|
242
225
|
ENABLED_HUGGINGFACE: !!process.env.HUGGINGFACE_API_KEY,
|
243
226
|
HUGGINGFACE_API_KEY: process.env.HUGGINGFACE_API_KEY,
|
244
|
-
HUGGINGFACE_PROXY_URL: process.env.HUGGINGFACE_PROXY_URL,
|
245
227
|
|
246
228
|
ENABLED_SENSENOVA: !!process.env.SENSENOVA_ACCESS_KEY_ID && !!process.env.SENSENOVA_ACCESS_KEY_SECRET,
|
247
229
|
SENSENOVA_ACCESS_KEY_ID: process.env.SENSENOVA_ACCESS_KEY_ID,
|
@@ -67,7 +67,7 @@ describe('genServerLLMConfig', () => {
|
|
67
67
|
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
|
68
68
|
},
|
69
69
|
ollama: {
|
70
|
-
fetchOnClient: !
|
70
|
+
fetchOnClient: !process.env.OLLAMA_PROXY_URL,
|
71
71
|
},
|
72
72
|
};
|
73
73
|
const config = genServerLLMConfig(specificConfig);
|
@@ -2,7 +2,6 @@ import { appEnv, getAppConfig } from '@/config/app';
|
|
2
2
|
import { authEnv } from '@/config/auth';
|
3
3
|
import { fileEnv } from '@/config/file';
|
4
4
|
import { langfuseEnv } from '@/config/langfuse';
|
5
|
-
import { getLLMConfig } from '@/config/llm';
|
6
5
|
import { enableNextAuth } from '@/const/auth';
|
7
6
|
import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
|
8
7
|
import { GlobalServerConfig } from '@/types/serverConfig';
|
@@ -31,7 +30,7 @@ export const getServerGlobalConfig = () => {
|
|
31
30
|
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
|
32
31
|
},
|
33
32
|
ollama: {
|
34
|
-
fetchOnClient: !
|
33
|
+
fetchOnClient: !process.env.OLLAMA_PROXY_URL,
|
35
34
|
},
|
36
35
|
}),
|
37
36
|
oAuthSSOProviders: authEnv.NEXT_AUTH_SSO_PROVIDERS.trim().split(/[,,]/),
|
@@ -30,14 +30,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
30
30
|
switch (provider) {
|
31
31
|
default: // Use Openai options as default
|
32
32
|
case ModelProvider.OpenAI: {
|
33
|
-
const { OPENAI_API_KEY
|
33
|
+
const { OPENAI_API_KEY } = getLLMConfig();
|
34
|
+
|
34
35
|
const openaiApiKey = payload?.apiKey || OPENAI_API_KEY;
|
35
|
-
const baseURL = payload?.endpoint || OPENAI_PROXY_URL;
|
36
|
+
const baseURL = payload?.endpoint || process.env.OPENAI_PROXY_URL;
|
36
37
|
const apiKey = apiKeyManager.pick(openaiApiKey);
|
37
|
-
|
38
|
-
|
39
|
-
baseURL,
|
40
|
-
};
|
38
|
+
|
39
|
+
return { apiKey, baseURL };
|
41
40
|
}
|
42
41
|
case ModelProvider.Azure: {
|
43
42
|
const { AZURE_API_KEY, AZURE_API_VERSION, AZURE_ENDPOINT } = getLLMConfig();
|
@@ -52,27 +51,26 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
52
51
|
}
|
53
52
|
case ModelProvider.ZhiPu: {
|
54
53
|
const { ZHIPU_API_KEY } = getLLMConfig();
|
54
|
+
|
55
55
|
const apiKey = apiKeyManager.pick(payload?.apiKey || ZHIPU_API_KEY);
|
56
|
-
|
57
|
-
|
58
|
-
};
|
56
|
+
|
57
|
+
return { apiKey };
|
59
58
|
}
|
60
59
|
case ModelProvider.Google: {
|
61
|
-
const { GOOGLE_API_KEY
|
60
|
+
const { GOOGLE_API_KEY } = getLLMConfig();
|
61
|
+
|
62
62
|
const apiKey = apiKeyManager.pick(payload?.apiKey || GOOGLE_API_KEY);
|
63
|
-
const baseURL = payload?.endpoint || GOOGLE_PROXY_URL;
|
64
|
-
|
65
|
-
|
66
|
-
baseURL,
|
67
|
-
};
|
63
|
+
const baseURL = payload?.endpoint || process.env.GOOGLE_PROXY_URL;
|
64
|
+
|
65
|
+
return { apiKey, baseURL };
|
68
66
|
}
|
69
67
|
case ModelProvider.Moonshot: {
|
70
|
-
const { MOONSHOT_API_KEY
|
68
|
+
const { MOONSHOT_API_KEY } = getLLMConfig();
|
69
|
+
|
71
70
|
const apiKey = apiKeyManager.pick(payload?.apiKey || MOONSHOT_API_KEY);
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
};
|
71
|
+
const baseURL = payload?.endpoint || process.env.MOONSHOT_PROXY_URL;
|
72
|
+
|
73
|
+
return { apiKey, baseURL };
|
76
74
|
}
|
77
75
|
case ModelProvider.Bedrock: {
|
78
76
|
const { AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, AWS_REGION, AWS_SESSION_TOKEN } =
|
@@ -91,23 +89,22 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
91
89
|
return { accessKeyId, accessKeySecret, region, sessionToken };
|
92
90
|
}
|
93
91
|
case ModelProvider.Ollama: {
|
94
|
-
const
|
95
|
-
const baseURL = payload?.endpoint || OLLAMA_PROXY_URL;
|
92
|
+
const baseURL = payload?.endpoint || process.env.OLLAMA_PROXY_URL;
|
96
93
|
return { baseURL };
|
97
94
|
}
|
98
95
|
case ModelProvider.Perplexity: {
|
99
|
-
const { PERPLEXITY_API_KEY
|
96
|
+
const { PERPLEXITY_API_KEY } = getLLMConfig();
|
100
97
|
|
101
98
|
const apiKey = apiKeyManager.pick(payload?.apiKey || PERPLEXITY_API_KEY);
|
102
|
-
const baseURL = payload?.endpoint || PERPLEXITY_PROXY_URL;
|
99
|
+
const baseURL = payload?.endpoint || process.env.PERPLEXITY_PROXY_URL;
|
103
100
|
|
104
101
|
return { apiKey, baseURL };
|
105
102
|
}
|
106
103
|
case ModelProvider.Anthropic: {
|
107
|
-
const { ANTHROPIC_API_KEY
|
104
|
+
const { ANTHROPIC_API_KEY } = getLLMConfig();
|
108
105
|
|
109
106
|
const apiKey = apiKeyManager.pick(payload?.apiKey || ANTHROPIC_API_KEY);
|
110
|
-
const baseURL = payload?.endpoint || ANTHROPIC_PROXY_URL;
|
107
|
+
const baseURL = payload?.endpoint || process.env.ANTHROPIC_PROXY_URL;
|
111
108
|
|
112
109
|
return { apiKey, baseURL };
|
113
110
|
}
|
@@ -126,10 +123,10 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
126
123
|
return { apiKey };
|
127
124
|
}
|
128
125
|
case ModelProvider.Groq: {
|
129
|
-
const { GROQ_API_KEY
|
126
|
+
const { GROQ_API_KEY } = getLLMConfig();
|
130
127
|
|
131
128
|
const apiKey = apiKeyManager.pick(payload?.apiKey || GROQ_API_KEY);
|
132
|
-
const baseURL = payload?.endpoint || GROQ_PROXY_URL;
|
129
|
+
const baseURL = payload?.endpoint || process.env.GROQ_PROXY_URL;
|
133
130
|
|
134
131
|
return { apiKey, baseURL };
|
135
132
|
}
|
@@ -229,19 +226,19 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
229
226
|
return { apiKey };
|
230
227
|
}
|
231
228
|
case ModelProvider.SiliconCloud: {
|
232
|
-
const { SILICONCLOUD_API_KEY
|
229
|
+
const { SILICONCLOUD_API_KEY } = getLLMConfig();
|
233
230
|
|
234
231
|
const apiKey = apiKeyManager.pick(payload?.apiKey || SILICONCLOUD_API_KEY);
|
235
|
-
const baseURL = payload?.endpoint || SILICONCLOUD_PROXY_URL;
|
232
|
+
const baseURL = payload?.endpoint || process.env.SILICONCLOUD_PROXY_URL;
|
236
233
|
|
237
234
|
return { apiKey, baseURL };
|
238
235
|
}
|
239
236
|
|
240
237
|
case ModelProvider.HuggingFace: {
|
241
|
-
const {
|
238
|
+
const { HUGGINGFACE_API_KEY } = getLLMConfig();
|
242
239
|
|
243
240
|
const apiKey = apiKeyManager.pick(payload?.apiKey || HUGGINGFACE_API_KEY);
|
244
|
-
const baseURL = payload?.endpoint || HUGGINGFACE_PROXY_URL;
|
241
|
+
const baseURL = payload?.endpoint || process.env.HUGGINGFACE_PROXY_URL;
|
245
242
|
|
246
243
|
return { apiKey, baseURL };
|
247
244
|
}
|
@@ -292,8 +289,9 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
292
289
|
const { XAI_API_KEY } = getLLMConfig();
|
293
290
|
|
294
291
|
const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
|
292
|
+
const baseURL = payload?.endpoint || process.env.XAI_PROXY_URL;
|
295
293
|
|
296
|
-
return { apiKey };
|
294
|
+
return { apiKey, baseURL };
|
297
295
|
}
|
298
296
|
case ModelProvider.InternLM: {
|
299
297
|
const { INTERNLM_API_KEY } = getLLMConfig();
|