@lobehub/chat 0.158.2 → 0.159.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.env.example +4 -0
  2. package/CHANGELOG.md +25 -0
  3. package/Dockerfile +3 -0
  4. package/README.md +1 -0
  5. package/README.zh-CN.md +2 -0
  6. package/docs/self-hosting/environment-variables/model-provider.mdx +16 -7
  7. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +16 -7
  8. package/docs/usage/features/multi-ai-providers.mdx +1 -0
  9. package/docs/usage/features/multi-ai-providers.zh-CN.mdx +1 -0
  10. package/locales/ar/error.json +2 -0
  11. package/locales/ar/modelProvider.json +12 -0
  12. package/locales/bg-BG/error.json +2 -0
  13. package/locales/bg-BG/modelProvider.json +12 -0
  14. package/locales/de-DE/error.json +2 -0
  15. package/locales/de-DE/modelProvider.json +12 -0
  16. package/locales/en-US/error.json +2 -0
  17. package/locales/en-US/modelProvider.json +12 -0
  18. package/locales/es-ES/error.json +2 -0
  19. package/locales/es-ES/modelProvider.json +12 -0
  20. package/locales/fr-FR/error.json +2 -0
  21. package/locales/fr-FR/modelProvider.json +12 -0
  22. package/locales/it-IT/error.json +2 -0
  23. package/locales/it-IT/modelProvider.json +12 -0
  24. package/locales/ja-JP/error.json +2 -0
  25. package/locales/ja-JP/modelProvider.json +12 -0
  26. package/locales/ko-KR/error.json +2 -0
  27. package/locales/ko-KR/modelProvider.json +12 -0
  28. package/locales/nl-NL/error.json +2 -0
  29. package/locales/nl-NL/modelProvider.json +12 -0
  30. package/locales/pl-PL/error.json +2 -0
  31. package/locales/pl-PL/modelProvider.json +12 -0
  32. package/locales/pt-BR/error.json +2 -0
  33. package/locales/pt-BR/modelProvider.json +12 -0
  34. package/locales/ru-RU/error.json +2 -0
  35. package/locales/ru-RU/modelProvider.json +12 -0
  36. package/locales/tr-TR/error.json +2 -0
  37. package/locales/tr-TR/modelProvider.json +12 -0
  38. package/locales/vi-VN/error.json +2 -0
  39. package/locales/vi-VN/modelProvider.json +12 -0
  40. package/locales/zh-CN/error.json +2 -0
  41. package/locales/zh-CN/modelProvider.json +12 -0
  42. package/locales/zh-TW/error.json +2 -0
  43. package/locales/zh-TW/modelProvider.json +12 -0
  44. package/package.json +1 -1
  45. package/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +13 -15
  46. package/src/app/(main)/settings/llm/DeepSeek/index.tsx +21 -0
  47. package/src/app/(main)/settings/llm/index.tsx +2 -0
  48. package/src/app/api/chat/agentRuntime.test.ts +17 -0
  49. package/src/app/api/chat/agentRuntime.ts +5 -0
  50. package/src/app/api/errorResponse.test.ts +6 -0
  51. package/src/app/api/errorResponse.ts +3 -0
  52. package/src/components/ModelIcon/index.tsx +2 -0
  53. package/src/components/ModelProviderIcon/index.tsx +5 -0
  54. package/src/components/ModelTag/ModelIcon.tsx +2 -0
  55. package/src/config/modelProviders/deepseek.ts +23 -0
  56. package/src/config/modelProviders/index.ts +4 -0
  57. package/src/config/server/provider.ts +10 -0
  58. package/src/const/settings/index.ts +6 -0
  59. package/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +5 -0
  60. package/src/features/Conversation/Error/APIKeyForm/index.tsx +4 -0
  61. package/src/features/Conversation/Error/index.tsx +1 -0
  62. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  63. package/src/libs/agent-runtime/deepseek/index.test.ts +254 -0
  64. package/src/libs/agent-runtime/deepseek/index.ts +15 -0
  65. package/src/libs/agent-runtime/error.ts +3 -0
  66. package/src/libs/agent-runtime/index.ts +1 -0
  67. package/src/libs/agent-runtime/types/type.ts +1 -0
  68. package/src/locales/default/error.ts +3 -0
  69. package/src/locales/default/modelProvider.ts +12 -0
  70. package/src/migrations/FromV3ToV4/types/v3.ts +0 -8
  71. package/src/server/globalConfig/index.ts +2 -0
  72. package/src/services/__tests__/chat.test.ts +16 -0
  73. package/src/services/chat.ts +3 -0
  74. package/src/store/user/slices/settings/actions/llm.ts +2 -0
  75. package/src/types/settings/modelProvider.ts +1 -0
@@ -6,6 +6,7 @@ import { LobeRuntimeAI } from './BaseAI';
6
6
  import { LobeAnthropicAI } from './anthropic';
7
7
  import { LobeAzureOpenAI } from './azureOpenai';
8
8
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
9
+ import { LobeDeepSeekAI } from './deepseek';
9
10
  import { LobeGoogleAI } from './google';
10
11
  import { LobeGroq } from './groq';
11
12
  import { LobeMinimaxAI } from './minimax';
@@ -101,6 +102,7 @@ class AgentRuntime {
101
102
  anthropic: Partial<ClientOptions>;
102
103
  azure: { apiVersion?: string; apikey?: string; endpoint?: string };
103
104
  bedrock: Partial<LobeBedrockAIParams>;
105
+ deepseek: Partial<ClientOptions>;
104
106
  google: { apiKey?: string; baseURL?: string };
105
107
  groq: Partial<ClientOptions>;
106
108
  minimax: Partial<ClientOptions>;
@@ -169,6 +171,11 @@ class AgentRuntime {
169
171
  break;
170
172
  }
171
173
 
174
+ case ModelProvider.DeepSeek: {
175
+ runtimeModel = new LobeDeepSeekAI(params.deepseek ?? {});
176
+ break;
177
+ }
178
+
172
179
  case ModelProvider.Minimax: {
173
180
  runtimeModel = new LobeMinimaxAI(params.minimax ?? {});
174
181
  break;
@@ -0,0 +1,254 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeDeepSeekAI } from './index';
13
+
14
+ const provider = ModelProvider.DeepSeek;
15
+ const defaultBaseURL = 'https://api.deepseek.com/v1';
16
+ const bizErrorType = 'DeepSeekBizError';
17
+ const invalidErrorType = 'InvalidDeepSeekAPIKey';
18
+
19
+ // Mock the console.error to avoid polluting test output
20
+ vi.spyOn(console, 'error').mockImplementation(() => {});
21
+
22
+ let instance: LobeOpenAICompatibleRuntime;
23
+
24
+ beforeEach(() => {
25
+ instance = new LobeDeepSeekAI({ apiKey: 'test' });
26
+
27
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
28
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
29
+ new ReadableStream() as any,
30
+ );
31
+ });
32
+
33
+ afterEach(() => {
34
+ vi.clearAllMocks();
35
+ });
36
+
37
+ describe('LobeDeepSeekAI', () => {
38
+ describe('init', () => {
39
+ it('should correctly initialize with an API key', async () => {
40
+ const instance = new LobeDeepSeekAI({ apiKey: 'test_api_key' });
41
+ expect(instance).toBeInstanceOf(LobeDeepSeekAI);
42
+ expect(instance.baseURL).toEqual(defaultBaseURL);
43
+ });
44
+ });
45
+
46
+ describe('chat', () => {
47
+ describe('Error', () => {
48
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
49
+ // Arrange
50
+ const apiError = new OpenAI.APIError(
51
+ 400,
52
+ {
53
+ status: 400,
54
+ error: {
55
+ message: 'Bad Request',
56
+ },
57
+ },
58
+ 'Error message',
59
+ {},
60
+ );
61
+
62
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
63
+
64
+ // Act
65
+ try {
66
+ await instance.chat({
67
+ messages: [{ content: 'Hello', role: 'user' }],
68
+ model: 'deepseek-chat',
69
+ temperature: 0,
70
+ });
71
+ } catch (e) {
72
+ expect(e).toEqual({
73
+ endpoint: defaultBaseURL,
74
+ error: {
75
+ error: { message: 'Bad Request' },
76
+ status: 400,
77
+ },
78
+ errorType: bizErrorType,
79
+ provider,
80
+ });
81
+ }
82
+ });
83
+
84
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
85
+ try {
86
+ new LobeDeepSeekAI({});
87
+ } catch (e) {
88
+ expect(e).toEqual({ errorType: invalidErrorType });
89
+ }
90
+ });
91
+
92
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
93
+ // Arrange
94
+ const errorInfo = {
95
+ stack: 'abc',
96
+ cause: {
97
+ message: 'api is undefined',
98
+ },
99
+ };
100
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
101
+
102
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
103
+
104
+ // Act
105
+ try {
106
+ await instance.chat({
107
+ messages: [{ content: 'Hello', role: 'user' }],
108
+ model: 'deepseek-chat',
109
+ temperature: 0,
110
+ });
111
+ } catch (e) {
112
+ expect(e).toEqual({
113
+ endpoint: defaultBaseURL,
114
+ error: {
115
+ cause: { message: 'api is undefined' },
116
+ stack: 'abc',
117
+ },
118
+ errorType: bizErrorType,
119
+ provider,
120
+ });
121
+ }
122
+ });
123
+
124
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
125
+ // Arrange
126
+ const errorInfo = {
127
+ stack: 'abc',
128
+ cause: { message: 'api is undefined' },
129
+ };
130
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
131
+
132
+ instance = new LobeDeepSeekAI({
133
+ apiKey: 'test',
134
+
135
+ baseURL: 'https://api.abc.com/v1',
136
+ });
137
+
138
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
139
+
140
+ // Act
141
+ try {
142
+ await instance.chat({
143
+ messages: [{ content: 'Hello', role: 'user' }],
144
+ model: 'deepseek-chat',
145
+ temperature: 0,
146
+ });
147
+ } catch (e) {
148
+ expect(e).toEqual({
149
+ endpoint: 'https://api.***.com/v1',
150
+ error: {
151
+ cause: { message: 'api is undefined' },
152
+ stack: 'abc',
153
+ },
154
+ errorType: bizErrorType,
155
+ provider,
156
+ });
157
+ }
158
+ });
159
+
160
+ it('should throw an InvalidDeepSeekAPIKey error type on 401 status code', async () => {
161
+ // Mock the API call to simulate a 401 error
162
+ const error = new Error('Unauthorized') as any;
163
+ error.status = 401;
164
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
165
+
166
+ try {
167
+ await instance.chat({
168
+ messages: [{ content: 'Hello', role: 'user' }],
169
+ model: 'deepseek-chat',
170
+ temperature: 0,
171
+ });
172
+ } catch (e) {
173
+ // Expect the chat method to throw an error with InvalidDeepSeekAPIKey
174
+ expect(e).toEqual({
175
+ endpoint: defaultBaseURL,
176
+ error: new Error('Unauthorized'),
177
+ errorType: invalidErrorType,
178
+ provider,
179
+ });
180
+ }
181
+ });
182
+
183
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
184
+ // Arrange
185
+ const genericError = new Error('Generic Error');
186
+
187
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
188
+
189
+ // Act
190
+ try {
191
+ await instance.chat({
192
+ messages: [{ content: 'Hello', role: 'user' }],
193
+ model: 'deepseek-chat',
194
+ temperature: 0,
195
+ });
196
+ } catch (e) {
197
+ expect(e).toEqual({
198
+ endpoint: defaultBaseURL,
199
+ errorType: 'AgentRuntimeError',
200
+ provider,
201
+ error: {
202
+ name: genericError.name,
203
+ cause: genericError.cause,
204
+ message: genericError.message,
205
+ stack: genericError.stack,
206
+ },
207
+ });
208
+ }
209
+ });
210
+ });
211
+
212
+ describe('DEBUG', () => {
213
+ it('should call debugStream and return StreamingTextResponse when DEBUG_DEEPSEEK_CHAT_COMPLETION is 1', async () => {
214
+ // Arrange
215
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
216
+ const mockDebugStream = new ReadableStream({
217
+ start(controller) {
218
+ controller.enqueue('Debug stream content');
219
+ controller.close();
220
+ },
221
+ }) as any;
222
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
223
+
224
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
225
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
226
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
227
+ });
228
+
229
+ // 保存原始环境变量值
230
+ const originalDebugValue = process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION;
231
+
232
+ // 模拟环境变量
233
+ process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION = '1';
234
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
235
+
236
+ // 执行测试
237
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
238
+ // 假设的测试函数调用,你可能需要根据实际情况调整
239
+ await instance.chat({
240
+ messages: [{ content: 'Hello', role: 'user' }],
241
+ model: 'deepseek-chat',
242
+ stream: true,
243
+ temperature: 0,
244
+ });
245
+
246
+ // 验证 debugStream 被调用
247
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
248
+
249
+ // 恢复原始环境变量值
250
+ process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION = originalDebugValue;
251
+ });
252
+ });
253
+ });
254
+ });
@@ -0,0 +1,15 @@
1
+ import { AgentRuntimeErrorType } from '../error';
2
+ import { ModelProvider } from '../types';
3
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+
5
+ export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
6
+ baseURL: 'https://api.deepseek.com/v1',
7
+ debug: {
8
+ chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
9
+ },
10
+ errorType: {
11
+ bizError: AgentRuntimeErrorType.DeepSeekBizError,
12
+ invalidAPIKey: AgentRuntimeErrorType.InvalidDeepSeekAPIKey,
13
+ },
14
+ provider: ModelProvider.DeepSeek,
15
+ });
@@ -49,6 +49,9 @@ export const AgentRuntimeErrorType = {
49
49
 
50
50
  InvalidMinimaxAPIKey: 'InvalidMinimaxAPIKey',
51
51
  MinimaxBizError: 'MinimaxBizError',
52
+
53
+ InvalidDeepSeekAPIKey: 'InvalidDeepSeekAPIKey',
54
+ DeepSeekBizError: 'DeepSeekBizError',
52
55
  } as const;
53
56
 
54
57
  export type ILobeAgentRuntimeErrorType =
@@ -3,6 +3,7 @@ export { LobeAnthropicAI } from './anthropic';
3
3
  export { LobeAzureOpenAI } from './azureOpenai';
4
4
  export * from './BaseAI';
5
5
  export { LobeBedrockAI } from './bedrock';
6
+ export { LobeDeepSeekAI } from './deepseek';
6
7
  export * from './error';
7
8
  export { LobeGoogleAI } from './google';
8
9
  export { LobeGroq } from './groq';
@@ -26,6 +26,7 @@ export enum ModelProvider {
26
26
  Azure = 'azure',
27
27
  Bedrock = 'bedrock',
28
28
  ChatGLM = 'chatglm',
29
+ DeepSeek = 'deepseek',
29
30
  Google = 'google',
30
31
  Groq = 'groq',
31
32
  Minimax = 'minimax',
@@ -92,6 +92,9 @@ export default {
92
92
  InvalidBedrockCredentials: 'Bedrock 鉴权未通过,请检查 AccessKeyId/SecretAccessKey 后重试',
93
93
  BedrockBizError: '请求 Bedrock 服务出错,请根据以下信息排查或重试',
94
94
 
95
+ InvalidDeepSeekAPIKey: 'DeepSeek API Key 不正确或为空,请检查 DeepSeek API Key 后重试',
96
+ DeepSeekBizError: '请求 DeepSeek 服务出错,请根据以下信息排查或重试',
97
+
95
98
  InvalidAzureAPIKey: 'Azure API Key 不正确或为空,请检查 Azure API Key 后重试',
96
99
  AzureBizError: '请求 Azure AI 服务出错,请根据以下信息排查或重试',
97
100
 
@@ -62,6 +62,18 @@ export default {
62
62
  title: '使用自定义 Bedrock 鉴权信息',
63
63
  },
64
64
  },
65
+ deepseek: {
66
+ title: 'DeepSeek',
67
+ token: {
68
+ desc: '填入来自 DeepSeek 的 API Key',
69
+ placeholder: 'DeepSeek API Key',
70
+ title: 'API Key',
71
+ },
72
+ unlock: {
73
+ description: '输入你的 DeepSeek API Key 即可开始会话。应用不会记录你的 API Key',
74
+ title: '使用自定义 DeepSeek API Key',
75
+ },
76
+ },
65
77
  google: {
66
78
  title: 'Google',
67
79
  token: {
@@ -22,20 +22,12 @@ export interface V3LegacyConfig {
22
22
  }
23
23
 
24
24
  export interface V3LLMConfig {
25
- anthropic: V3GeneralConfig;
26
25
  bedrock: any;
27
26
  google: V3GeneralConfig;
28
- groq: V3GeneralConfig;
29
- minimax: V3GeneralConfig;
30
- mistral: V3GeneralConfig;
31
- moonshot: V3GeneralConfig;
32
27
  ollama: V3LegacyConfig;
33
28
  openAI: V3OpenAIConfig;
34
29
  openrouter: V3LegacyConfig;
35
- perplexity: V3GeneralConfig;
36
30
  togetherai: V3LegacyConfig;
37
- zeroone: V3GeneralConfig;
38
- zhipu: V3GeneralConfig;
39
31
  }
40
32
 
41
33
  /**
@@ -23,6 +23,7 @@ export const getServerGlobalConfig = () => {
23
23
  ENABLED_AWS_BEDROCK,
24
24
  ENABLED_GOOGLE,
25
25
  ENABLED_GROQ,
26
+ ENABLED_DEEPSEEK,
26
27
  ENABLED_PERPLEXITY,
27
28
  ENABLED_ANTHROPIC,
28
29
  ENABLED_MINIMAX,
@@ -63,6 +64,7 @@ export const getServerGlobalConfig = () => {
63
64
  }),
64
65
  },
65
66
  bedrock: { enabled: ENABLED_AWS_BEDROCK },
67
+ deepseek: { enabled: ENABLED_DEEPSEEK },
66
68
  google: { enabled: ENABLED_GOOGLE },
67
69
  groq: { enabled: ENABLED_GROQ },
68
70
  minimax: { enabled: ENABLED_MINIMAX },
@@ -10,6 +10,7 @@ import {
10
10
  LobeBedrockAI,
11
11
  LobeGoogleAI,
12
12
  LobeGroq,
13
+ LobeDeepSeekAI,
13
14
  LobeMistralAI,
14
15
  LobeMoonshotAI,
15
16
  LobeOllamaAI,
@@ -873,6 +874,21 @@ describe('AgentRuntimeOnClient', () => {
873
874
  expect(runtime['_runtime']).toBeInstanceOf(LobeGroq);
874
875
  });
875
876
 
877
+ it('DeepSeek provider: with apiKey', async () => {
878
+ merge(initialSettingsState, {
879
+ settings: {
880
+ languageModel: {
881
+ deepseek: {
882
+ apiKey: 'user-deepseek-key',
883
+ },
884
+ },
885
+ },
886
+ } as UserSettingsState) as unknown as UserStore;
887
+ const runtime = await initializeWithClientStore(ModelProvider.DeepSeek, {});
888
+ expect(runtime).toBeInstanceOf(AgentRuntime);
889
+ expect(runtime['_runtime']).toBeInstanceOf(LobeDeepSeekAI);
890
+ });
891
+
876
892
  /**
877
893
  * Should not have a unknown provider in client, but has
878
894
  * similar cases in server side
@@ -139,6 +139,9 @@ export function initializeWithClientStore(provider: string, payload: any) {
139
139
  case ModelProvider.Groq: {
140
140
  break;
141
141
  }
142
+ case ModelProvider.DeepSeek: {
143
+ break;
144
+ }
142
145
  case ModelProvider.OpenRouter: {
143
146
  break;
144
147
  }
@@ -5,6 +5,7 @@ import {
5
5
  AnthropicProviderCard,
6
6
  AzureProviderCard,
7
7
  BedrockProviderCard,
8
+ DeepSeekProviderCard,
8
9
  GoogleProviderCard,
9
10
  GroqProviderCard,
10
11
  MinimaxProviderCard,
@@ -109,6 +110,7 @@ export const llmSettingsSlice: StateCreator<
109
110
  chatModels: mergeModels('togetherai', TogetherAIProviderCard.chatModels),
110
111
  },
111
112
  BedrockProviderCard,
113
+ DeepSeekProviderCard,
112
114
  PerplexityProviderCard,
113
115
  MinimaxProviderCard,
114
116
  MistralProviderCard,
@@ -44,6 +44,7 @@ export interface GlobalLLMConfig {
44
44
  anthropic: GeneralModelProviderConfig;
45
45
  azure: AzureOpenAIConfig;
46
46
  bedrock: AWSBedrockConfig;
47
+ deepseek: GeneralModelProviderConfig;
47
48
  google: GeneralModelProviderConfig;
48
49
  groq: GeneralModelProviderConfig;
49
50
  minimax: GeneralModelProviderConfig;