@lobehub/chat 1.31.11 → 1.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.32.0](https://github.com/lobehub/lobe-chat/compare/v1.31.11...v1.32.0)
6
+
7
+ <sup>Released on **2024-11-19**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add support InternLM (书生浦语) provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add support InternLM (书生浦语) provider, closes [#4711](https://github.com/lobehub/lobe-chat/issues/4711) ([aaae059](https://github.com/lobehub/lobe-chat/commit/aaae059))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.31.11](https://github.com/lobehub/lobe-chat/compare/v1.31.10...v1.31.11)
6
31
 
7
32
  <sup>Released on **2024-11-18**</sup>
package/Dockerfile CHANGED
@@ -168,6 +168,8 @@ ENV \
168
168
  HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
169
169
  # Hunyuan
170
170
  HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
171
+ # InternLM
172
+ INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
171
173
  # Minimax
172
174
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
173
175
  # Mistral
@@ -203,6 +203,8 @@ ENV \
203
203
  HUGGINGFACE_API_KEY="" HUGGINGFACE_MODEL_LIST="" HUGGINGFACE_PROXY_URL="" \
204
204
  # Hunyuan
205
205
  HUNYUAN_API_KEY="" HUNYUAN_MODEL_LIST="" \
206
+ # InternLM
207
+ INTERNLM_API_KEY="" INTERNLM_MODEL_LIST="" \
206
208
  # Minimax
207
209
  MINIMAX_API_KEY="" MINIMAX_MODEL_LIST="" \
208
210
  # Mistral
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.31.11",
3
+ "version": "1.32.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -10,6 +10,7 @@ import {
10
10
  GoogleProviderCard,
11
11
  GroqProviderCard,
12
12
  HunyuanProviderCard,
13
+ InternLMProviderCard,
13
14
  MinimaxProviderCard,
14
15
  MistralProviderCard,
15
16
  MoonshotProviderCard,
@@ -85,6 +86,7 @@ export const useProviderList = (): ProviderItem[] => {
85
86
  MinimaxProviderCard,
86
87
  Ai360ProviderCard,
87
88
  TaichuProviderCard,
89
+ InternLMProviderCard,
88
90
  SiliconCloudProviderCard,
89
91
  ],
90
92
  [
package/src/config/llm.ts CHANGED
@@ -124,6 +124,9 @@ export const getLLMConfig = () => {
124
124
 
125
125
  ENABLED_XAI: z.boolean(),
126
126
  XAI_API_KEY: z.string().optional(),
127
+
128
+ ENABLED_INTERNLM: z.boolean(),
129
+ INTERNLM_API_KEY: z.string().optional(),
127
130
  },
128
131
  runtimeEnv: {
129
132
  API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -246,6 +249,9 @@ export const getLLMConfig = () => {
246
249
 
247
250
  ENABLED_XAI: !!process.env.XAI_API_KEY,
248
251
  XAI_API_KEY: process.env.XAI_API_KEY,
252
+
253
+ ENABLED_INTERNLM: !!process.env.INTERNLM_API_KEY,
254
+ INTERNLM_API_KEY: process.env.INTERNLM_API_KEY,
249
255
  },
250
256
  });
251
257
  };
@@ -14,6 +14,7 @@ import GoogleProvider from './google';
14
14
  import GroqProvider from './groq';
15
15
  import HuggingFaceProvider from './huggingface';
16
16
  import HunyuanProvider from './hunyuan';
17
+ import InternLMProvider from './internlm';
17
18
  import MinimaxProvider from './minimax';
18
19
  import MistralProvider from './mistral';
19
20
  import MoonshotProvider from './moonshot';
@@ -69,6 +70,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
69
70
  HunyuanProvider.chatModels,
70
71
  WenxinProvider.chatModels,
71
72
  SenseNovaProvider.chatModels,
73
+ InternLMProvider.chatModels,
72
74
  ].flat();
73
75
 
74
76
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -105,6 +107,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
105
107
  MinimaxProvider,
106
108
  Ai360Provider,
107
109
  TaichuProvider,
110
+ InternLMProvider,
108
111
  SiliconCloudProvider,
109
112
  ];
110
113
 
@@ -131,6 +134,7 @@ export { default as GoogleProviderCard } from './google';
131
134
  export { default as GroqProviderCard } from './groq';
132
135
  export { default as HuggingFaceProviderCard } from './huggingface';
133
136
  export { default as HunyuanProviderCard } from './hunyuan';
137
+ export { default as InternLMProviderCard } from './internlm';
134
138
  export { default as MinimaxProviderCard } from './minimax';
135
139
  export { default as MistralProviderCard } from './mistral';
136
140
  export { default as MoonshotProviderCard } from './moonshot';
@@ -0,0 +1,42 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const InternLM: ModelProviderCard = {
4
+ chatModels: [
5
+ {
6
+ description: '我们最新的模型系列,有着卓越的推理性能,支持 1M 的上下文长度以及更强的指令跟随和工具调用能力。',
7
+ displayName: 'InternLM2.5',
8
+ enabled: true,
9
+ functionCall: true,
10
+ id: 'internlm2.5-latest',
11
+ maxOutput: 4096,
12
+ pricing: {
13
+ input: 0,
14
+ output: 0,
15
+ },
16
+ tokens: 32_768,
17
+ },
18
+ {
19
+ description: '我们仍在维护的老版本模型,有 7B、20B 多种模型参数量可选。',
20
+ displayName: 'InternLM2 Pro Chat',
21
+ functionCall: true,
22
+ id: 'internlm2-pro-chat',
23
+ maxOutput: 4096,
24
+ pricing: {
25
+ input: 0,
26
+ output: 0,
27
+ },
28
+ tokens: 32_768,
29
+ },
30
+ ],
31
+ checkModel: 'internlm2.5-latest',
32
+ description:
33
+ '致力于大模型研究与开发工具链的开源组织。为所有 AI 开发者提供高效、易用的开源平台,让最前沿的大模型与算法技术触手可及',
34
+ disableBrowserRequest: true,
35
+ id: 'internlm',
36
+ modelList: { showModelFetcher: true },
37
+ modelsUrl: 'https://internlm.intern-ai.org.cn/doc/docs/Models#%E8%8E%B7%E5%8F%96%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8',
38
+ name: 'InternLM',
39
+ url: 'https://internlm.intern-ai.org.cn',
40
+ };
41
+
42
+ export default InternLM;
@@ -17,6 +17,7 @@ import { LobeGoogleAI } from './google';
17
17
  import { LobeGroq } from './groq';
18
18
  import { LobeHuggingFaceAI } from './huggingface';
19
19
  import { LobeHunyuanAI } from './hunyuan';
20
+ import { LobeInternLMAI } from './internlm';
20
21
  import { LobeMinimaxAI } from './minimax';
21
22
  import { LobeMistralAI } from './mistral';
22
23
  import { LobeMoonshotAI } from './moonshot';
@@ -141,6 +142,7 @@ class AgentRuntime {
141
142
  groq: Partial<ClientOptions>;
142
143
  huggingface: { apiKey?: string; baseURL?: string };
143
144
  hunyuan: Partial<ClientOptions>;
145
+ internlm: Partial<ClientOptions>;
144
146
  minimax: Partial<ClientOptions>;
145
147
  mistral: Partial<ClientOptions>;
146
148
  moonshot: Partial<ClientOptions>;
@@ -335,6 +337,11 @@ class AgentRuntime {
335
337
  runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
336
338
  break;
337
339
  }
340
+
341
+ case ModelProvider.InternLM: {
342
+ runtimeModel = new LobeInternLMAI(params.internlm);
343
+ break;
344
+ }
338
345
  }
339
346
  return new AgentRuntime(runtimeModel);
340
347
  }
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeInternLMAI } from './index';
13
+
14
+ const provider = ModelProvider.InternLM;
15
+ const defaultBaseURL = 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeInternLMAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeInternLMAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeInternLMAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeInternLMAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'internlm2.5-latest',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeInternLMAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'internlm2.5-latest',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeInternLMAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'internlm2.5-latest',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidInternLMAIAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'internlm2.5-latest',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidInternLMAIAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'internlm2.5-latest',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_INTERNLM_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_INTERNLM_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_INTERNLM_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'internlm2.5-latest',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_INTERNLM_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,18 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1',
6
+ chatCompletion: {
7
+ handlePayload: (payload) => {
8
+ return {
9
+ ...payload,
10
+ stream: !payload.tools,
11
+ } as any;
12
+ },
13
+ },
14
+ debug: {
15
+ chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
16
+ },
17
+ provider: ModelProvider.InternLM,
18
+ });
@@ -36,6 +36,7 @@ export enum ModelProvider {
36
36
  Groq = 'groq',
37
37
  HuggingFace = 'huggingface',
38
38
  Hunyuan = 'hunyuan',
39
+ InternLM = 'internlm',
39
40
  Minimax = 'minimax',
40
41
  Mistral = 'mistral',
41
42
  Moonshot = 'moonshot',
@@ -293,6 +293,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
293
293
 
294
294
  const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
295
295
 
296
+ return { apiKey };
297
+ }
298
+ case ModelProvider.InternLM: {
299
+ const { INTERNLM_API_KEY } = getLLMConfig();
300
+
301
+ const apiKey = apiKeyManager.pick(payload?.apiKey || INTERNLM_API_KEY);
302
+
296
303
  return { apiKey };
297
304
  }
298
305
  }
@@ -46,6 +46,7 @@ export interface UserKeyVaults {
46
46
  groq?: OpenAICompatibleKeyVault;
47
47
  huggingface?: OpenAICompatibleKeyVault;
48
48
  hunyuan?: OpenAICompatibleKeyVault;
49
+ internlm?: OpenAICompatibleKeyVault;
49
50
  lobehub?: any;
50
51
  minimax?: OpenAICompatibleKeyVault;
51
52
  mistral?: OpenAICompatibleKeyVault;