@lobehub/chat 1.31.11 → 1.32.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +58 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/package.json +1 -1
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/internlm.ts +42 -0
- package/src/database/server/models/__tests__/session.test.ts +38 -4
- package/src/database/server/models/session.ts +43 -10
- package/src/database/server/models/topic.ts +6 -1
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/github/index.ts +1 -1
- package/src/libs/agent-runtime/internlm/index.test.ts +255 -0
- package/src/libs/agent-runtime/internlm/index.ts +18 -0
- package/src/libs/agent-runtime/openai/index.ts +0 -1
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/server/modules/AgentRuntime/index.ts +7 -0
- package/src/store/chat/slices/topic/selectors.ts +1 -1
- package/src/types/user/settings/keyVaults.ts +1 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,64 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.32.1](https://github.com/lobehub/lobe-chat/compare/v1.32.0...v1.32.1)
|
6
|
+
|
7
|
+
<sup>Released on **2024-11-19**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Keyword search for chat history & sessions.
|
12
|
+
|
13
|
+
#### 💄 Styles
|
14
|
+
|
15
|
+
- **misc**: Support o1 models using streaming.
|
16
|
+
|
17
|
+
<br/>
|
18
|
+
|
19
|
+
<details>
|
20
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
21
|
+
|
22
|
+
#### What's fixed
|
23
|
+
|
24
|
+
- **misc**: Keyword search for chat history & sessions, closes [#4725](https://github.com/lobehub/lobe-chat/issues/4725) ([415d772](https://github.com/lobehub/lobe-chat/commit/415d772))
|
25
|
+
|
26
|
+
#### Styles
|
27
|
+
|
28
|
+
- **misc**: Support o1 models using streaming, closes [#4732](https://github.com/lobehub/lobe-chat/issues/4732) ([7e9e71a](https://github.com/lobehub/lobe-chat/commit/7e9e71a))
|
29
|
+
|
30
|
+
</details>
|
31
|
+
|
32
|
+
<div align="right">
|
33
|
+
|
34
|
+
[](#readme-top)
|
35
|
+
|
36
|
+
</div>
|
37
|
+
|
38
|
+
## [Version 1.32.0](https://github.com/lobehub/lobe-chat/compare/v1.31.11...v1.32.0)
|
39
|
+
|
40
|
+
<sup>Released on **2024-11-19**</sup>
|
41
|
+
|
42
|
+
#### ✨ Features
|
43
|
+
|
44
|
+
- **misc**: Add support InternLM (书生浦语) provider.
|
45
|
+
|
46
|
+
<br/>
|
47
|
+
|
48
|
+
<details>
|
49
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
50
|
+
|
51
|
+
#### What's improved
|
52
|
+
|
53
|
+
- **misc**: Add support InternLM (书生浦语) provider, closes [#4711](https://github.com/lobehub/lobe-chat/issues/4711) ([aaae059](https://github.com/lobehub/lobe-chat/commit/aaae059))
|
54
|
+
|
55
|
+
</details>
|
56
|
+
|
57
|
+
<div align="right">
|
58
|
+
|
59
|
+
[](#readme-top)
|
60
|
+
|
61
|
+
</div>
|
62
|
+
|
5
63
|
### [Version 1.31.11](https://github.com/lobehub/lobe-chat/compare/v1.31.10...v1.31.11)
|
6
64
|
|
7
65
|
<sup>Released on **2024-11-18**</sup>
|
package/Dockerfile
CHANGED
@@ -168,6 +168,8 @@ ENV \
|
|
168
168
|
HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
|
169
169
|
# Hunyuan
|
170
170
|
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
|
171
|
+
# InternLM
|
172
|
+
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
|
171
173
|
# Minimax
|
172
174
|
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
|
173
175
|
# Mistral
|
package/Dockerfile.database
CHANGED
@@ -203,6 +203,8 @@ ENV \
|
|
203
203
|
HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
|
204
204
|
# Hunyuan
|
205
205
|
HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
|
206
|
+
# InternLM
|
207
|
+
INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
|
206
208
|
# Minimax
|
207
209
|
MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
|
208
210
|
# Mistral
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.32.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -10,6 +10,7 @@ import {
|
|
10
10
|
GoogleProviderCard,
|
11
11
|
GroqProviderCard,
|
12
12
|
HunyuanProviderCard,
|
13
|
+
InternLMProviderCard,
|
13
14
|
MinimaxProviderCard,
|
14
15
|
MistralProviderCard,
|
15
16
|
MoonshotProviderCard,
|
@@ -85,6 +86,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
85
86
|
MinimaxProviderCard,
|
86
87
|
Ai360ProviderCard,
|
87
88
|
TaichuProviderCard,
|
89
|
+
InternLMProviderCard,
|
88
90
|
SiliconCloudProviderCard,
|
89
91
|
],
|
90
92
|
[
|
package/src/config/llm.ts
CHANGED
@@ -124,6 +124,9 @@ export const getLLMConfig = () => {
|
|
124
124
|
|
125
125
|
ENABLED_XAI: z.boolean(),
|
126
126
|
XAI_API_KEY: z.string().optional(),
|
127
|
+
|
128
|
+
ENABLED_INTERNLM: z.boolean(),
|
129
|
+
INTERNLM_API_KEY: z.string().optional(),
|
127
130
|
},
|
128
131
|
runtimeEnv: {
|
129
132
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -246,6 +249,9 @@ export const getLLMConfig = () => {
|
|
246
249
|
|
247
250
|
ENABLED_XAI: !!process.env.XAI_API_KEY,
|
248
251
|
XAI_API_KEY: process.env.XAI_API_KEY,
|
252
|
+
|
253
|
+
ENABLED_INTERNLM: !!process.env.INTERNLM_API_KEY,
|
254
|
+
INTERNLM_API_KEY: process.env.INTERNLM_API_KEY,
|
249
255
|
},
|
250
256
|
});
|
251
257
|
};
|
@@ -14,6 +14,7 @@ import GoogleProvider from './google';
|
|
14
14
|
import GroqProvider from './groq';
|
15
15
|
import HuggingFaceProvider from './huggingface';
|
16
16
|
import HunyuanProvider from './hunyuan';
|
17
|
+
import InternLMProvider from './internlm';
|
17
18
|
import MinimaxProvider from './minimax';
|
18
19
|
import MistralProvider from './mistral';
|
19
20
|
import MoonshotProvider from './moonshot';
|
@@ -69,6 +70,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
69
70
|
HunyuanProvider.chatModels,
|
70
71
|
WenxinProvider.chatModels,
|
71
72
|
SenseNovaProvider.chatModels,
|
73
|
+
InternLMProvider.chatModels,
|
72
74
|
].flat();
|
73
75
|
|
74
76
|
export const DEFAULT_MODEL_PROVIDER_LIST = [
|
@@ -105,6 +107,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
105
107
|
MinimaxProvider,
|
106
108
|
Ai360Provider,
|
107
109
|
TaichuProvider,
|
110
|
+
InternLMProvider,
|
108
111
|
SiliconCloudProvider,
|
109
112
|
];
|
110
113
|
|
@@ -131,6 +134,7 @@ export { default as GoogleProviderCard } from './google';
|
|
131
134
|
export { default as GroqProviderCard } from './groq';
|
132
135
|
export { default as HuggingFaceProviderCard } from './huggingface';
|
133
136
|
export { default as HunyuanProviderCard } from './hunyuan';
|
137
|
+
export { default as InternLMProviderCard } from './internlm';
|
134
138
|
export { default as MinimaxProviderCard } from './minimax';
|
135
139
|
export { default as MistralProviderCard } from './mistral';
|
136
140
|
export { default as MoonshotProviderCard } from './moonshot';
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const InternLM: ModelProviderCard = {
|
4
|
+
chatModels: [
|
5
|
+
{
|
6
|
+
description: '我们最新的模型系列,有着卓越的推理性能,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。',
|
7
|
+
displayName: 'InternLM2.5',
|
8
|
+
enabled: true,
|
9
|
+
functionCall: true,
|
10
|
+
id: 'internlm2.5-latest',
|
11
|
+
maxOutput: 4096,
|
12
|
+
pricing: {
|
13
|
+
input: 0,
|
14
|
+
output: 0,
|
15
|
+
},
|
16
|
+
tokens: 32_768,
|
17
|
+
},
|
18
|
+
{
|
19
|
+
description: '我们仍在维护的老版本模型,有 7B、20B 多种模型参数量可选。',
|
20
|
+
displayName: 'InternLM2 Pro Chat',
|
21
|
+
functionCall: true,
|
22
|
+
id: 'internlm2-pro-chat',
|
23
|
+
maxOutput: 4096,
|
24
|
+
pricing: {
|
25
|
+
input: 0,
|
26
|
+
output: 0,
|
27
|
+
},
|
28
|
+
tokens: 32_768,
|
29
|
+
},
|
30
|
+
],
|
31
|
+
checkModel: 'internlm2.5-latest',
|
32
|
+
description:
|
33
|
+
'致力于大模型研究与开发工具链的开源组织。为所有 AI 开发者提供高效、易用的开源平台,让最前沿的大模型与算法技术触手可及',
|
34
|
+
disableBrowserRequest: true,
|
35
|
+
id: 'internlm',
|
36
|
+
modelList: { showModelFetcher: true },
|
37
|
+
modelsUrl: 'https://internlm.intern-ai.org.cn/doc/docs/Models#%E8%8E%B7%E5%8F%96%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8',
|
38
|
+
name: 'InternLM',
|
39
|
+
url: 'https://internlm.intern-ai.org.cn',
|
40
|
+
};
|
41
|
+
|
42
|
+
export default InternLM;
|
@@ -231,8 +231,18 @@ describe('SessionModel', () => {
|
|
231
231
|
|
232
232
|
it('should return sessions with matching title', async () => {
|
233
233
|
await serverDB.insert(sessions).values([
|
234
|
-
{ id: '1', userId
|
235
|
-
{ id: '2', userId
|
234
|
+
{ id: '1', userId },
|
235
|
+
{ id: '2', userId },
|
236
|
+
]);
|
237
|
+
|
238
|
+
await serverDB.insert(agents).values([
|
239
|
+
{ id: 'agent-1', userId, model: 'gpt-3.5-turbo', title: 'Hello, Agent 1' },
|
240
|
+
{ id: 'agent-2', userId, model: 'gpt-4', title: 'Agent 2' },
|
241
|
+
]);
|
242
|
+
|
243
|
+
await serverDB.insert(agentsToSessions).values([
|
244
|
+
{ agentId: 'agent-1', sessionId: '1' },
|
245
|
+
{ agentId: 'agent-2', sessionId: '2' },
|
236
246
|
]);
|
237
247
|
|
238
248
|
const result = await sessionModel.queryByKeyword('hello');
|
@@ -241,9 +251,21 @@ describe('SessionModel', () => {
|
|
241
251
|
});
|
242
252
|
|
243
253
|
it('should return sessions with matching description', async () => {
|
254
|
+
// The sessions has no title and desc,
|
255
|
+
// see: https://github.com/lobehub/lobe-chat/pull/4725
|
244
256
|
await serverDB.insert(sessions).values([
|
245
|
-
{ id: '1', userId
|
246
|
-
{ id: '2', userId
|
257
|
+
{ id: '1', userId },
|
258
|
+
{ id: '2', userId },
|
259
|
+
]);
|
260
|
+
|
261
|
+
await serverDB.insert(agents).values([
|
262
|
+
{ id: 'agent-1', userId, model: 'gpt-3.5-turbo', title: 'Agent 1', description: 'Description with Keyword' },
|
263
|
+
{ id: 'agent-2', userId, model: 'gpt-4', title: 'Agent 2' },
|
264
|
+
]);
|
265
|
+
|
266
|
+
await serverDB.insert(agentsToSessions).values([
|
267
|
+
{ agentId: 'agent-1', sessionId: '1' },
|
268
|
+
{ agentId: 'agent-2', sessionId: '2' },
|
247
269
|
]);
|
248
270
|
|
249
271
|
const result = await sessionModel.queryByKeyword('keyword');
|
@@ -253,11 +275,23 @@ describe('SessionModel', () => {
|
|
253
275
|
|
254
276
|
it('should return sessions with matching title or description', async () => {
|
255
277
|
await serverDB.insert(sessions).values([
|
278
|
+
{ id: '1', userId },
|
279
|
+
{ id: '2', userId },
|
280
|
+
{ id: '3', userId },
|
281
|
+
]);
|
282
|
+
|
283
|
+
await serverDB.insert(agents).values([
|
256
284
|
{ id: '1', userId, title: 'Title with keyword', description: 'Some description' },
|
257
285
|
{ id: '2', userId, title: 'Another Session', description: 'Description with keyword' },
|
258
286
|
{ id: '3', userId, title: 'Third Session', description: 'Third description' },
|
259
287
|
]);
|
260
288
|
|
289
|
+
await serverDB.insert(agentsToSessions).values([
|
290
|
+
{ agentId: '1', sessionId: '1' },
|
291
|
+
{ agentId: '2', sessionId: '2' },
|
292
|
+
{ agentId: '3', sessionId: '3' },
|
293
|
+
]);
|
294
|
+
|
261
295
|
const result = await sessionModel.queryByKeyword('keyword');
|
262
296
|
expect(result).toHaveLength(2);
|
263
297
|
expect(result.map((s) => s.id)).toEqual(['1', '2']);
|
@@ -61,7 +61,7 @@ export class SessionModel {
|
|
61
61
|
|
62
62
|
const keywordLowerCase = keyword.toLowerCase();
|
63
63
|
|
64
|
-
const data = await this.
|
64
|
+
const data = await this.findSessionsByKeywords({ keyword: keywordLowerCase });
|
65
65
|
|
66
66
|
return data.map((item) => this.mapSessionItem(item as any));
|
67
67
|
}
|
@@ -281,15 +281,15 @@ export class SessionModel {
|
|
281
281
|
pinned !== undefined ? eq(sessions.pinned, pinned) : eq(sessions.userId, this.userId),
|
282
282
|
keyword
|
283
283
|
? or(
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
284
|
+
like(
|
285
|
+
sql`lower(${sessions.title})` as unknown as Column,
|
286
|
+
`%${keyword.toLowerCase()}%`,
|
287
|
+
),
|
288
|
+
like(
|
289
|
+
sql`lower(${sessions.description})` as unknown as Column,
|
290
|
+
`%${keyword.toLowerCase()}%`,
|
291
|
+
),
|
292
|
+
)
|
293
293
|
: eq(sessions.userId, this.userId),
|
294
294
|
group ? eq(sessions.groupId, group) : isNull(sessions.groupId),
|
295
295
|
),
|
@@ -297,4 +297,37 @@ export class SessionModel {
|
|
297
297
|
with: { agentsToSessions: { columns: {}, with: { agent: true } }, group: true },
|
298
298
|
});
|
299
299
|
}
|
300
|
+
|
301
|
+
async findSessionsByKeywords(params: {
|
302
|
+
current?: number;
|
303
|
+
keyword: string;
|
304
|
+
pageSize?: number;
|
305
|
+
}) {
|
306
|
+
const { keyword, pageSize = 9999, current = 0 } = params;
|
307
|
+
const offset = current * pageSize;
|
308
|
+
const results = await serverDB.query.agents.findMany({
|
309
|
+
limit: pageSize,
|
310
|
+
offset,
|
311
|
+
orderBy: [desc(agents.updatedAt)],
|
312
|
+
where: and(
|
313
|
+
eq(agents.userId, this.userId),
|
314
|
+
or(
|
315
|
+
like(
|
316
|
+
sql`lower(${agents.title})` as unknown as Column,
|
317
|
+
`%${keyword.toLowerCase()}%`,
|
318
|
+
),
|
319
|
+
like(
|
320
|
+
sql`lower(${agents.description})` as unknown as Column,
|
321
|
+
`%${keyword.toLowerCase()}%`,
|
322
|
+
),
|
323
|
+
)
|
324
|
+
),
|
325
|
+
with: { agentsToSessions: { columns: {}, with: { session: true } } },
|
326
|
+
});
|
327
|
+
try {
|
328
|
+
// @ts-expect-error
|
329
|
+
return results.map((item) => item.agentsToSessions[0].session);
|
330
|
+
} catch {}
|
331
|
+
return []
|
332
|
+
}
|
300
333
|
}
|
@@ -85,7 +85,12 @@ export class TopicModel {
|
|
85
85
|
serverDB
|
86
86
|
.select()
|
87
87
|
.from(messages)
|
88
|
-
.where(
|
88
|
+
.where(
|
89
|
+
and(
|
90
|
+
eq(messages.topicId, topics.id),
|
91
|
+
matchKeyword(messages.content)
|
92
|
+
)
|
93
|
+
),
|
89
94
|
),
|
90
95
|
),
|
91
96
|
),
|
@@ -17,6 +17,7 @@ import { LobeGoogleAI } from './google';
|
|
17
17
|
import { LobeGroq } from './groq';
|
18
18
|
import { LobeHuggingFaceAI } from './huggingface';
|
19
19
|
import { LobeHunyuanAI } from './hunyuan';
|
20
|
+
import { LobeInternLMAI } from './internlm';
|
20
21
|
import { LobeMinimaxAI } from './minimax';
|
21
22
|
import { LobeMistralAI } from './mistral';
|
22
23
|
import { LobeMoonshotAI } from './moonshot';
|
@@ -141,6 +142,7 @@ class AgentRuntime {
|
|
141
142
|
groq: Partial<ClientOptions>;
|
142
143
|
huggingface: { apiKey?: string; baseURL?: string };
|
143
144
|
hunyuan: Partial<ClientOptions>;
|
145
|
+
internlm: Partial<ClientOptions>;
|
144
146
|
minimax: Partial<ClientOptions>;
|
145
147
|
mistral: Partial<ClientOptions>;
|
146
148
|
moonshot: Partial<ClientOptions>;
|
@@ -335,6 +337,11 @@ class AgentRuntime {
|
|
335
337
|
runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
|
336
338
|
break;
|
337
339
|
}
|
340
|
+
|
341
|
+
case ModelProvider.InternLM: {
|
342
|
+
runtimeModel = new LobeInternLMAI(params.internlm);
|
343
|
+
break;
|
344
|
+
}
|
338
345
|
}
|
339
346
|
return new AgentRuntime(runtimeModel);
|
340
347
|
}
|
@@ -10,7 +10,7 @@ export const LobeGithubAI = LobeOpenAICompatibleFactory({
|
|
10
10
|
const { model } = payload;
|
11
11
|
|
12
12
|
if (o1Models.has(model)) {
|
13
|
-
return pruneO1Payload(payload) as any;
|
13
|
+
return { ...pruneO1Payload(payload), stream: false } as any;
|
14
14
|
}
|
15
15
|
|
16
16
|
return { ...payload, stream: payload.stream ?? true };
|
@@ -0,0 +1,255 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import OpenAI from 'openai';
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import {
|
6
|
+
ChatStreamCallbacks,
|
7
|
+
LobeOpenAICompatibleRuntime,
|
8
|
+
ModelProvider,
|
9
|
+
} from '@/libs/agent-runtime';
|
10
|
+
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
12
|
+
import { LobeInternLMAI } from './index';
|
13
|
+
|
14
|
+
const provider = ModelProvider.InternLM;
|
15
|
+
const defaultBaseURL = 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1';
|
16
|
+
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
19
|
+
|
20
|
+
// Mock the console.error to avoid polluting test output
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
+
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
24
|
+
|
25
|
+
beforeEach(() => {
|
26
|
+
instance = new LobeInternLMAI({ apiKey: 'test' });
|
27
|
+
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
+
new ReadableStream() as any,
|
31
|
+
);
|
32
|
+
});
|
33
|
+
|
34
|
+
afterEach(() => {
|
35
|
+
vi.clearAllMocks();
|
36
|
+
});
|
37
|
+
|
38
|
+
describe('LobeInternLMAI', () => {
|
39
|
+
describe('init', () => {
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
41
|
+
const instance = new LobeInternLMAI({ apiKey: 'test_api_key' });
|
42
|
+
expect(instance).toBeInstanceOf(LobeInternLMAI);
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
+
});
|
45
|
+
});
|
46
|
+
|
47
|
+
describe('chat', () => {
|
48
|
+
describe('Error', () => {
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
+
// Arrange
|
51
|
+
const apiError = new OpenAI.APIError(
|
52
|
+
400,
|
53
|
+
{
|
54
|
+
status: 400,
|
55
|
+
error: {
|
56
|
+
message: 'Bad Request',
|
57
|
+
},
|
58
|
+
},
|
59
|
+
'Error message',
|
60
|
+
{},
|
61
|
+
);
|
62
|
+
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
+
|
65
|
+
// Act
|
66
|
+
try {
|
67
|
+
await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: 'internlm2.5-latest',
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
} catch (e) {
|
73
|
+
expect(e).toEqual({
|
74
|
+
endpoint: defaultBaseURL,
|
75
|
+
error: {
|
76
|
+
error: { message: 'Bad Request' },
|
77
|
+
status: 400,
|
78
|
+
},
|
79
|
+
errorType: bizErrorType,
|
80
|
+
provider,
|
81
|
+
});
|
82
|
+
}
|
83
|
+
});
|
84
|
+
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
+
try {
|
87
|
+
new LobeInternLMAI({});
|
88
|
+
} catch (e) {
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
+
}
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
+
// Arrange
|
95
|
+
const errorInfo = {
|
96
|
+
stack: 'abc',
|
97
|
+
cause: {
|
98
|
+
message: 'api is undefined',
|
99
|
+
},
|
100
|
+
};
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
+
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
+
|
105
|
+
// Act
|
106
|
+
try {
|
107
|
+
await instance.chat({
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
+
model: 'internlm2.5-latest',
|
110
|
+
temperature: 0,
|
111
|
+
});
|
112
|
+
} catch (e) {
|
113
|
+
expect(e).toEqual({
|
114
|
+
endpoint: defaultBaseURL,
|
115
|
+
error: {
|
116
|
+
cause: { message: 'api is undefined' },
|
117
|
+
stack: 'abc',
|
118
|
+
},
|
119
|
+
errorType: bizErrorType,
|
120
|
+
provider,
|
121
|
+
});
|
122
|
+
}
|
123
|
+
});
|
124
|
+
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
+
// Arrange
|
127
|
+
const errorInfo = {
|
128
|
+
stack: 'abc',
|
129
|
+
cause: { message: 'api is undefined' },
|
130
|
+
};
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
+
|
133
|
+
instance = new LobeInternLMAI({
|
134
|
+
apiKey: 'test',
|
135
|
+
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
137
|
+
});
|
138
|
+
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
+
|
141
|
+
// Act
|
142
|
+
try {
|
143
|
+
await instance.chat({
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
+
model: 'internlm2.5-latest',
|
146
|
+
temperature: 0,
|
147
|
+
});
|
148
|
+
} catch (e) {
|
149
|
+
expect(e).toEqual({
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
151
|
+
error: {
|
152
|
+
cause: { message: 'api is undefined' },
|
153
|
+
stack: 'abc',
|
154
|
+
},
|
155
|
+
errorType: bizErrorType,
|
156
|
+
provider,
|
157
|
+
});
|
158
|
+
}
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should throw an InvalidInternLMAIAPIKey error type on 401 status code', async () => {
|
162
|
+
// Mock the API call to simulate a 401 error
|
163
|
+
const error = new Error('Unauthorized') as any;
|
164
|
+
error.status = 401;
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
+
|
167
|
+
try {
|
168
|
+
await instance.chat({
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
+
model: 'internlm2.5-latest',
|
171
|
+
temperature: 0,
|
172
|
+
});
|
173
|
+
} catch (e) {
|
174
|
+
// Expect the chat method to throw an error with InvalidInternLMAIAPIKey
|
175
|
+
expect(e).toEqual({
|
176
|
+
endpoint: defaultBaseURL,
|
177
|
+
error: new Error('Unauthorized'),
|
178
|
+
errorType: invalidErrorType,
|
179
|
+
provider,
|
180
|
+
});
|
181
|
+
}
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
+
// Arrange
|
186
|
+
const genericError = new Error('Generic Error');
|
187
|
+
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
+
|
190
|
+
// Act
|
191
|
+
try {
|
192
|
+
await instance.chat({
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
+
model: 'internlm2.5-latest',
|
195
|
+
temperature: 0,
|
196
|
+
});
|
197
|
+
} catch (e) {
|
198
|
+
expect(e).toEqual({
|
199
|
+
endpoint: defaultBaseURL,
|
200
|
+
errorType: 'AgentRuntimeError',
|
201
|
+
provider,
|
202
|
+
error: {
|
203
|
+
name: genericError.name,
|
204
|
+
cause: genericError.cause,
|
205
|
+
message: genericError.message,
|
206
|
+
stack: genericError.stack,
|
207
|
+
},
|
208
|
+
});
|
209
|
+
}
|
210
|
+
});
|
211
|
+
});
|
212
|
+
|
213
|
+
describe('DEBUG', () => {
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_INTERNLM_CHAT_COMPLETION is 1', async () => {
|
215
|
+
// Arrange
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
+
const mockDebugStream = new ReadableStream({
|
218
|
+
start(controller) {
|
219
|
+
controller.enqueue('Debug stream content');
|
220
|
+
controller.close();
|
221
|
+
},
|
222
|
+
}) as any;
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
+
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
+
});
|
229
|
+
|
230
|
+
// 保存原始环境变量值
|
231
|
+
const originalDebugValue = process.env.DEBUG_INTERNLM_CHAT_COMPLETION;
|
232
|
+
|
233
|
+
// 模拟环境变量
|
234
|
+
process.env.DEBUG_INTERNLM_CHAT_COMPLETION = '1';
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
+
|
237
|
+
// 执行测试
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
+
await instance.chat({
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
+
model: 'internlm2.5-latest',
|
243
|
+
stream: true,
|
244
|
+
temperature: 0,
|
245
|
+
});
|
246
|
+
|
247
|
+
// 验证 debugStream 被调用
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
+
|
250
|
+
// 恢复原始环境变量值
|
251
|
+
process.env.DEBUG_INTERNLM_CHAT_COMPLETION = originalDebugValue;
|
252
|
+
});
|
253
|
+
});
|
254
|
+
});
|
255
|
+
});
|
@@ -0,0 +1,18 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
export const LobeInternLMAI = LobeOpenAICompatibleFactory({
|
5
|
+
baseURL: 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1',
|
6
|
+
chatCompletion: {
|
7
|
+
handlePayload: (payload) => {
|
8
|
+
return {
|
9
|
+
...payload,
|
10
|
+
stream: !payload.tools,
|
11
|
+
} as any;
|
12
|
+
},
|
13
|
+
},
|
14
|
+
debug: {
|
15
|
+
chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
|
16
|
+
},
|
17
|
+
provider: ModelProvider.InternLM,
|
18
|
+
});
|
@@ -293,6 +293,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
293
293
|
|
294
294
|
const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
|
295
295
|
|
296
|
+
return { apiKey };
|
297
|
+
}
|
298
|
+
case ModelProvider.InternLM: {
|
299
|
+
const { INTERNLM_API_KEY } = getLLMConfig();
|
300
|
+
|
301
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || INTERNLM_API_KEY);
|
302
|
+
|
296
303
|
return { apiKey };
|
297
304
|
}
|
298
305
|
}
|
@@ -42,7 +42,7 @@ const currentActiveTopicSummary = (s: ChatStoreState): ChatTopicSummary | undefi
|
|
42
42
|
const isCreatingTopic = (s: ChatStoreState) => s.creatingTopic;
|
43
43
|
|
44
44
|
const groupedTopicsSelector = (s: ChatStoreState): GroupedTopic[] => {
|
45
|
-
const topics =
|
45
|
+
const topics = displayTopics(s);
|
46
46
|
|
47
47
|
if (!topics) return [];
|
48
48
|
const favTopics = currentFavTopics(s);
|
@@ -46,6 +46,7 @@ export interface UserKeyVaults {
|
|
46
46
|
groq?: OpenAICompatibleKeyVault;
|
47
47
|
huggingface?: OpenAICompatibleKeyVault;
|
48
48
|
hunyuan?: OpenAICompatibleKeyVault;
|
49
|
+
internlm?: OpenAICompatibleKeyVault;
|
49
50
|
lobehub?: any;
|
50
51
|
minimax?: OpenAICompatibleKeyVault;
|
51
52
|
mistral?: OpenAICompatibleKeyVault;
|