@lobehub/chat 1.29.6 → 1.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.env.example +5 -0
  2. package/CHANGELOG.md +50 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/docs/usage/features/database.zh-CN.mdx +3 -3
  6. package/locales/ar/modelProvider.json +12 -0
  7. package/locales/bg-BG/modelProvider.json +12 -0
  8. package/locales/de-DE/modelProvider.json +12 -0
  9. package/locales/en-US/modelProvider.json +12 -0
  10. package/locales/es-ES/modelProvider.json +12 -0
  11. package/locales/fr-FR/modelProvider.json +12 -0
  12. package/locales/it-IT/modelProvider.json +12 -0
  13. package/locales/ja-JP/modelProvider.json +12 -0
  14. package/locales/ko-KR/modelProvider.json +12 -0
  15. package/locales/nl-NL/modelProvider.json +12 -0
  16. package/locales/pl-PL/modelProvider.json +12 -0
  17. package/locales/pt-BR/modelProvider.json +12 -0
  18. package/locales/ru-RU/modelProvider.json +12 -0
  19. package/locales/tr-TR/modelProvider.json +12 -0
  20. package/locales/vi-VN/modelProvider.json +12 -0
  21. package/locales/zh-CN/modelProvider.json +12 -0
  22. package/locales/zh-TW/modelProvider.json +12 -0
  23. package/package.json +2 -2
  24. package/src/app/(main)/settings/llm/ProviderList/Cloudflare/index.tsx +43 -0
  25. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +6 -0
  26. package/src/config/llm.ts +17 -0
  27. package/src/config/modelProviders/cloudflare.ts +89 -0
  28. package/src/config/modelProviders/index.ts +8 -0
  29. package/src/config/modelProviders/xai.ts +29 -0
  30. package/src/const/auth.ts +2 -0
  31. package/src/const/settings/llm.ts +10 -0
  32. package/src/libs/agent-runtime/AgentRuntime.ts +14 -1
  33. package/src/libs/agent-runtime/cloudflare/index.test.ts +648 -0
  34. package/src/libs/agent-runtime/cloudflare/index.ts +123 -0
  35. package/src/libs/agent-runtime/types/type.ts +2 -0
  36. package/src/libs/agent-runtime/utils/cloudflareHelpers.test.ts +339 -0
  37. package/src/libs/agent-runtime/utils/cloudflareHelpers.ts +134 -0
  38. package/src/libs/agent-runtime/xai/index.test.ts +255 -0
  39. package/src/libs/agent-runtime/xai/index.ts +10 -0
  40. package/src/locales/default/modelProvider.ts +13 -1
  41. package/src/server/globalConfig/index.ts +16 -0
  42. package/src/server/modules/AgentRuntime/index.ts +18 -0
  43. package/src/services/_auth.ts +9 -0
  44. package/src/services/chat.ts +7 -0
  45. package/src/store/user/slices/modelList/selectors/keyVaults.ts +2 -0
  46. package/src/store/user/slices/modelList/selectors/modelConfig.ts +2 -0
  47. package/src/types/user/settings/keyVaults.ts +7 -0
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeXAI } from './index';
13
+
14
+ const provider = ModelProvider.XAI;
15
+ const defaultBaseURL = 'https://api.x.ai/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeXAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeXAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeXAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeXAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'grok-beta',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeXAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'grok-beta',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeXAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'grok-beta',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidXAIAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'grok-beta',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidXAIAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'grok-beta',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_XAI_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_XAI_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_XAI_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'grok-beta',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_XAI_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,10 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeXAI = LobeOpenAICompatibleFactory({
5
+ baseURL: 'https://api.x.ai/v1',
6
+ debug: {
7
+ chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
8
+ },
9
+ provider: ModelProvider.XAI,
10
+ });
@@ -21,7 +21,7 @@ export default {
21
21
  },
22
22
  bedrock: {
23
23
  accessKeyId: {
24
- desc: '填入AWS Access Key Id',
24
+ desc: '填入 AWS Access Key Id',
25
25
  placeholder: 'AWS Access Key Id',
26
26
  title: 'AWS Access Key Id',
27
27
  },
@@ -52,6 +52,18 @@ export default {
52
52
  title: '使用自定义 Bedrock 鉴权信息',
53
53
  },
54
54
  },
55
+ cloudflare: {
56
+ apiKey: {
57
+ desc: '请填写 Cloudflare API Key',
58
+ placeholder: 'Cloudflare API Key',
59
+ title: 'Cloudflare API Key',
60
+ },
61
+ baseURLOrAccountID: {
62
+ desc: '填入 Cloudflare 账户 ID 或 自定义 API 地址',
63
+ placeholder: 'Cloudflare Account ID / custom API URL',
64
+ title: 'Cloudflare 账户 ID / API 地址',
65
+ }
66
+ },
55
67
  github: {
56
68
  personalAccessToken: {
57
69
  desc: '填入你的 Github PAT,点击 [这里](https://github.com/settings/tokens) 创建',
@@ -33,6 +33,7 @@ import {
33
33
  TogetherAIProviderCard,
34
34
  UpstageProviderCard,
35
35
  WenxinProviderCard,
36
+ XAIProviderCard,
36
37
  ZeroOneProviderCard,
37
38
  ZhiPuProviderCard,
38
39
  } from '@/config/modelProviders';
@@ -99,6 +100,9 @@ export const getServerGlobalConfig = () => {
99
100
  BAICHUAN_MODEL_LIST,
100
101
 
101
102
  ENABLED_TAICHU,
103
+
104
+ ENABLED_CLOUDFLARE,
105
+
102
106
  TAICHU_MODEL_LIST,
103
107
 
104
108
  ENABLED_AI21,
@@ -143,6 +147,9 @@ export const getServerGlobalConfig = () => {
143
147
 
144
148
  ENABLED_HUGGINGFACE,
145
149
  HUGGINGFACE_MODEL_LIST,
150
+
151
+ ENABLED_XAI,
152
+ XAI_MODEL_LIST,
146
153
  } = getLLMConfig();
147
154
 
148
155
  const config: GlobalServerConfig = {
@@ -202,6 +209,7 @@ export const getServerGlobalConfig = () => {
202
209
  modelString: AWS_BEDROCK_MODEL_LIST,
203
210
  }),
204
211
  },
212
+ cloudflare: { enabled: ENABLED_CLOUDFLARE },
205
213
  deepseek: {
206
214
  enabled: ENABLED_DEEPSEEK,
207
215
  enabledModels: extractEnabledModels(DEEPSEEK_MODEL_LIST),
@@ -395,6 +403,14 @@ export const getServerGlobalConfig = () => {
395
403
  modelString: WENXIN_MODEL_LIST,
396
404
  }),
397
405
  },
406
+ xai: {
407
+ enabled: ENABLED_XAI,
408
+ enabledModels: extractEnabledModels(XAI_MODEL_LIST),
409
+ serverModelCards: transformToChatModelCards({
410
+ defaultChatModels: XAIProviderCard.chatModels,
411
+ modelString: XAI_MODEL_LIST,
412
+ }),
413
+ },
398
414
  zeroone: {
399
415
  enabled: ENABLED_ZEROONE,
400
416
  enabledModels: extractEnabledModels(ZEROONE_MODEL_LIST),
@@ -210,6 +210,17 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
210
210
 
211
211
  return { apiKey };
212
212
  }
213
+ case ModelProvider.Cloudflare: {
214
+ const { CLOUDFLARE_API_KEY, CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID } = getLLMConfig();
215
+
216
+ const apiKey = apiKeyManager.pick(payload?.apiKey || CLOUDFLARE_API_KEY);
217
+ const baseURLOrAccountID =
218
+ payload.apiKey && payload.cloudflareBaseURLOrAccountID
219
+ ? payload.cloudflareBaseURLOrAccountID
220
+ : CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID;
221
+
222
+ return { apiKey, baseURLOrAccountID };
223
+ }
213
224
  case ModelProvider.Ai360: {
214
225
  const { AI360_API_KEY } = getLLMConfig();
215
226
 
@@ -275,6 +286,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
275
286
 
276
287
  const apiKey = sensenovaAccessKeyID + ':' + sensenovaAccessKeySecret;
277
288
 
289
+ return { apiKey };
290
+ }
291
+ case ModelProvider.XAI: {
292
+ const { XAI_API_KEY } = getLLMConfig();
293
+
294
+ const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
295
+
278
296
  return { apiKey };
279
297
  }
280
298
  }
@@ -69,6 +69,15 @@ export const getProviderAuthPayload = (provider: string) => {
69
69
  return { endpoint: config?.baseURL };
70
70
  }
71
71
 
72
+ case ModelProvider.Cloudflare: {
73
+ const config = keyVaultsConfigSelectors.cloudflareConfig(useUserStore.getState());
74
+
75
+ return {
76
+ apiKey: config?.apiKey,
77
+ cloudflareBaseURLOrAccountID: config?.baseURLOrAccountID,
78
+ };
79
+ }
80
+
72
81
  default: {
73
82
  const config = keyVaultsConfigSelectors.getVaultByProvider(provider as GlobalLLMProviderKey)(
74
83
  useUserStore.getState(),
@@ -175,6 +175,13 @@ export function initializeWithClientStore(provider: string, payload: any) {
175
175
  case ModelProvider.ZeroOne: {
176
176
  break;
177
177
  }
178
+ case ModelProvider.Cloudflare: {
179
+ providerOptions = {
180
+ apikey: providerAuthPayload?.apiKey,
181
+ baseURLOrAccountID: providerAuthPayload?.cloudflareBaseURLOrAccountID,
182
+ };
183
+ break;
184
+ }
178
185
  }
179
186
 
180
187
  /**
@@ -18,6 +18,7 @@ const wenxinConfig = (s: UserStore) => keyVaultsSettings(s).wenxin || {};
18
18
  const ollamaConfig = (s: UserStore) => keyVaultsSettings(s).ollama || {};
19
19
  const sensenovaConfig = (s: UserStore) => keyVaultsSettings(s).sensenova || {};
20
20
  const azureConfig = (s: UserStore) => keyVaultsSettings(s).azure || {};
21
+ const cloudflareConfig = (s: UserStore) => keyVaultsSettings(s).cloudflare || {};
21
22
  const getVaultByProvider = (provider: GlobalLLMProviderKey) => (s: UserStore) =>
22
23
  (keyVaultsSettings(s)[provider] || {}) as OpenAICompatibleKeyVault &
23
24
  AzureOpenAIKeyVault &
@@ -38,6 +39,7 @@ const password = (s: UserStore) => keyVaultsSettings(s).password || '';
38
39
  export const keyVaultsConfigSelectors = {
39
40
  azureConfig,
40
41
  bedrockConfig,
42
+ cloudflareConfig,
41
43
  getVaultByProvider,
42
44
  isProviderApiKeyNotEmpty,
43
45
  isProviderEndpointNotEmpty,
@@ -69,6 +69,7 @@ const openAIConfig = (s: UserStore) => currentLLMSettings(s).openai;
69
69
  const bedrockConfig = (s: UserStore) => currentLLMSettings(s).bedrock;
70
70
  const ollamaConfig = (s: UserStore) => currentLLMSettings(s).ollama;
71
71
  const azureConfig = (s: UserStore) => currentLLMSettings(s).azure;
72
+ const cloudflareConfig = (s: UserStore) => currentLLMSettings(s).cloudflare;
72
73
  const sensenovaConfig = (s: UserStore) => currentLLMSettings(s).sensenova;
73
74
 
74
75
  const isAzureEnabled = (s: UserStore) => currentLLMSettings(s).azure.enabled;
@@ -76,6 +77,7 @@ const isAzureEnabled = (s: UserStore) => currentLLMSettings(s).azure.enabled;
76
77
  export const modelConfigSelectors = {
77
78
  azureConfig,
78
79
  bedrockConfig,
80
+ cloudflareConfig,
79
81
 
80
82
  currentEditingCustomModelCard,
81
83
  getCustomModelCard,
@@ -16,6 +16,11 @@ export interface AWSBedrockKeyVault {
16
16
  sessionToken?: string;
17
17
  }
18
18
 
19
+ export interface CloudflareKeyVault {
20
+ apiKey?: string;
21
+ baseURLOrAccountID?: string;
22
+ }
23
+
19
24
  export interface SenseNovaKeyVault {
20
25
  sensenovaAccessKeyID?: string;
21
26
  sensenovaAccessKeySecret?: string;
@@ -33,6 +38,7 @@ export interface UserKeyVaults {
33
38
  azure?: AzureOpenAIKeyVault;
34
39
  baichuan?: OpenAICompatibleKeyVault;
35
40
  bedrock?: AWSBedrockKeyVault;
41
+ cloudflare?: CloudflareKeyVault;
36
42
  deepseek?: OpenAICompatibleKeyVault;
37
43
  fireworksai?: OpenAICompatibleKeyVault;
38
44
  github?: OpenAICompatibleKeyVault;
@@ -59,6 +65,7 @@ export interface UserKeyVaults {
59
65
  togetherai?: OpenAICompatibleKeyVault;
60
66
  upstage?: OpenAICompatibleKeyVault;
61
67
  wenxin?: WenxinKeyVault;
68
+ xai?: OpenAICompatibleKeyVault;
62
69
  zeroone?: OpenAICompatibleKeyVault;
63
70
  zhipu?: OpenAICompatibleKeyVault;
64
71
  }