@lobehub/chat 1.31.10 → 1.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/Dockerfile +2 -0
  3. package/Dockerfile.database +2 -0
  4. package/locales/ar/error.json +1 -0
  5. package/locales/ar/models.json +3 -9
  6. package/locales/bg-BG/error.json +1 -0
  7. package/locales/bg-BG/models.json +3 -9
  8. package/locales/de-DE/error.json +1 -0
  9. package/locales/de-DE/models.json +3 -9
  10. package/locales/en-US/error.json +1 -0
  11. package/locales/en-US/models.json +3 -9
  12. package/locales/es-ES/error.json +1 -0
  13. package/locales/es-ES/models.json +4 -10
  14. package/locales/fa-IR/error.json +1 -0
  15. package/locales/fa-IR/models.json +3 -9
  16. package/locales/fr-FR/error.json +1 -0
  17. package/locales/fr-FR/models.json +4 -10
  18. package/locales/it-IT/error.json +1 -0
  19. package/locales/it-IT/models.json +3 -9
  20. package/locales/ja-JP/error.json +1 -0
  21. package/locales/ja-JP/models.json +3 -9
  22. package/locales/ko-KR/error.json +1 -0
  23. package/locales/ko-KR/models.json +3 -9
  24. package/locales/nl-NL/error.json +1 -0
  25. package/locales/nl-NL/models.json +3 -9
  26. package/locales/pl-PL/error.json +1 -0
  27. package/locales/pl-PL/models.json +4 -10
  28. package/locales/pt-BR/error.json +1 -0
  29. package/locales/pt-BR/models.json +3 -9
  30. package/locales/ru-RU/error.json +1 -0
  31. package/locales/ru-RU/models.json +3 -9
  32. package/locales/tr-TR/error.json +1 -0
  33. package/locales/tr-TR/models.json +3 -9
  34. package/locales/vi-VN/error.json +1 -0
  35. package/locales/vi-VN/models.json +3 -9
  36. package/locales/zh-CN/error.json +2 -1
  37. package/locales/zh-CN/models.json +3 -9
  38. package/locales/zh-TW/error.json +1 -0
  39. package/locales/zh-TW/models.json +3 -9
  40. package/package.json +3 -3
  41. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +2 -0
  42. package/src/app/(main)/settings/llm/components/Checker.tsx +9 -2
  43. package/src/config/llm.ts +6 -0
  44. package/src/config/modelProviders/index.ts +4 -0
  45. package/src/config/modelProviders/internlm.ts +42 -0
  46. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  47. package/src/libs/agent-runtime/error.ts +2 -0
  48. package/src/libs/agent-runtime/internlm/index.test.ts +255 -0
  49. package/src/libs/agent-runtime/internlm/index.ts +18 -0
  50. package/src/libs/agent-runtime/types/type.ts +1 -0
  51. package/src/locales/default/error.ts +1 -0
  52. package/src/server/modules/AgentRuntime/index.ts +7 -0
  53. package/src/types/user/settings/keyVaults.ts +1 -0
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeInternLMAI } from './index';
13
+
14
+ const provider = ModelProvider.InternLM;
15
+ const defaultBaseURL = 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeInternLMAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeInternLMAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeInternLMAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeInternLMAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'internlm2.5-latest',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeInternLMAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'internlm2.5-latest',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeInternLMAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'internlm2.5-latest',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidInternLMAIAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'internlm2.5-latest',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidInternLMAIAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'internlm2.5-latest',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_INTERNLM_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_INTERNLM_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_INTERNLM_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'internlm2.5-latest',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_INTERNLM_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,18 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1',
6
+ chatCompletion: {
7
+ handlePayload: (payload) => {
8
+ return {
9
+ ...payload,
10
+ stream: !payload.tools,
11
+ } as any;
12
+ },
13
+ },
14
+ debug: {
15
+ chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
16
+ },
17
+ provider: ModelProvider.InternLM,
18
+ });
@@ -36,6 +36,7 @@ export enum ModelProvider {
36
36
  Groq = 'groq',
37
37
  HuggingFace = 'huggingface',
38
38
  Hunyuan = 'hunyuan',
39
+ InternLM = 'internlm',
39
40
  Minimax = 'minimax',
40
41
  Mistral = 'mistral',
41
42
  Moonshot = 'moonshot',
@@ -124,6 +124,7 @@ export default {
124
124
 
125
125
  // Github Token
126
126
  InvalidGithubToken: 'Github PAT 不正确或为空,请检查 Github PAT 后重试',
127
+ ConnectionCheckFailed: '请求返回为空,请检查 API 代理地址末尾是否未包含 `/v1`',
127
128
 
128
129
  /* eslint-enable */
129
130
  },
@@ -293,6 +293,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
293
293
 
294
294
  const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
295
295
 
296
+ return { apiKey };
297
+ }
298
+ case ModelProvider.InternLM: {
299
+ const { INTERNLM_API_KEY } = getLLMConfig();
300
+
301
+ const apiKey = apiKeyManager.pick(payload?.apiKey || INTERNLM_API_KEY);
302
+
296
303
  return { apiKey };
297
304
  }
298
305
  }
@@ -46,6 +46,7 @@ export interface UserKeyVaults {
46
46
  groq?: OpenAICompatibleKeyVault;
47
47
  huggingface?: OpenAICompatibleKeyVault;
48
48
  hunyuan?: OpenAICompatibleKeyVault;
49
+ internlm?: OpenAICompatibleKeyVault;
49
50
  lobehub?: any;
50
51
  minimax?: OpenAICompatibleKeyVault;
51
52
  mistral?: OpenAICompatibleKeyVault;