@lobehub/chat 1.57.0 → 1.58.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docker-compose/local/docker-compose.yml +1 -0
  4. package/locales/ar/modelProvider.json +24 -0
  5. package/locales/ar/models.json +60 -0
  6. package/locales/ar/providers.json +12 -0
  7. package/locales/bg-BG/modelProvider.json +24 -0
  8. package/locales/bg-BG/models.json +60 -0
  9. package/locales/bg-BG/providers.json +12 -0
  10. package/locales/de-DE/modelProvider.json +24 -0
  11. package/locales/de-DE/models.json +60 -0
  12. package/locales/de-DE/providers.json +12 -0
  13. package/locales/en-US/modelProvider.json +24 -0
  14. package/locales/en-US/models.json +60 -0
  15. package/locales/en-US/providers.json +12 -0
  16. package/locales/es-ES/modelProvider.json +24 -0
  17. package/locales/es-ES/models.json +60 -0
  18. package/locales/es-ES/providers.json +12 -0
  19. package/locales/fa-IR/modelProvider.json +30 -0
  20. package/locales/fa-IR/models.json +60 -0
  21. package/locales/fa-IR/providers.json +12 -0
  22. package/locales/fr-FR/modelProvider.json +24 -0
  23. package/locales/fr-FR/models.json +60 -0
  24. package/locales/fr-FR/providers.json +12 -0
  25. package/locales/it-IT/modelProvider.json +24 -0
  26. package/locales/it-IT/models.json +60 -0
  27. package/locales/it-IT/providers.json +12 -0
  28. package/locales/ja-JP/modelProvider.json +24 -0
  29. package/locales/ja-JP/models.json +60 -0
  30. package/locales/ja-JP/providers.json +12 -0
  31. package/locales/ko-KR/modelProvider.json +24 -0
  32. package/locales/ko-KR/models.json +60 -0
  33. package/locales/ko-KR/providers.json +12 -0
  34. package/locales/nl-NL/modelProvider.json +24 -0
  35. package/locales/nl-NL/models.json +60 -0
  36. package/locales/nl-NL/providers.json +12 -0
  37. package/locales/pl-PL/modelProvider.json +24 -0
  38. package/locales/pl-PL/models.json +60 -0
  39. package/locales/pl-PL/providers.json +12 -0
  40. package/locales/pt-BR/modelProvider.json +24 -0
  41. package/locales/pt-BR/models.json +60 -0
  42. package/locales/pt-BR/providers.json +12 -0
  43. package/locales/ru-RU/modelProvider.json +24 -0
  44. package/locales/ru-RU/models.json +60 -0
  45. package/locales/ru-RU/providers.json +12 -0
  46. package/locales/tr-TR/modelProvider.json +30 -0
  47. package/locales/tr-TR/models.json +60 -0
  48. package/locales/tr-TR/providers.json +12 -0
  49. package/locales/vi-VN/modelProvider.json +24 -0
  50. package/locales/vi-VN/models.json +60 -0
  51. package/locales/vi-VN/providers.json +12 -0
  52. package/locales/zh-CN/modelProvider.json +24 -0
  53. package/locales/zh-CN/models.json +1112 -1052
  54. package/locales/zh-CN/providers.json +80 -68
  55. package/locales/zh-TW/modelProvider.json +24 -0
  56. package/locales/zh-TW/models.json +60 -0
  57. package/locales/zh-TW/providers.json +12 -0
  58. package/package.json +4 -2
  59. package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/File.tsx +1 -1
  60. package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/index.tsx +19 -9
  61. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/index.tsx +1 -2
  62. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +15 -1
  63. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Mobile/ChatHeader/index.tsx +0 -2
  64. package/src/app/[variants]/(main)/chat/(workspace)/features/AgentSettings/index.tsx +1 -1
  65. package/src/app/[variants]/(main)/chat/settings/page.tsx +95 -5
  66. package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +58 -0
  67. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +13 -2
  68. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +6 -8
  69. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +5 -6
  70. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +4 -3
  71. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext.ts +2 -0
  72. package/src/app/[variants]/(main)/settings/provider/features/ModelList/index.tsx +6 -7
  73. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -1
  74. package/src/config/aiModels/azureai.ts +18 -0
  75. package/src/config/aiModels/index.ts +3 -0
  76. package/src/config/modelProviders/azure.ts +2 -1
  77. package/src/config/modelProviders/azureai.ts +19 -0
  78. package/src/config/modelProviders/index.ts +3 -0
  79. package/src/database/server/models/aiProvider.ts +2 -0
  80. package/src/features/AgentSetting/AgentMeta/index.tsx +19 -9
  81. package/src/features/AgentSetting/AgentPrompt/index.tsx +32 -2
  82. package/src/features/AgentSetting/AgentSettings.tsx +4 -5
  83. package/src/features/AgentSetting/AgentSettingsProvider.tsx +17 -0
  84. package/src/features/AgentSetting/StoreUpdater.tsx +5 -2
  85. package/src/features/AgentSetting/index.tsx +1 -1
  86. package/src/features/AgentSetting/store/initialState.ts +2 -1
  87. package/src/hooks/useInterceptingRoutes.test.ts +1 -1
  88. package/src/hooks/useInterceptingRoutes.ts +1 -1
  89. package/src/layout/GlobalProvider/StoreInitialization.tsx +1 -1
  90. package/src/libs/agent-runtime/AgentRuntime.ts +13 -6
  91. package/src/libs/agent-runtime/azureai/index.ts +109 -0
  92. package/src/libs/agent-runtime/baichuan/index.test.ts +8 -250
  93. package/src/libs/agent-runtime/cloudflare/index.ts +22 -18
  94. package/src/libs/agent-runtime/index.ts +1 -0
  95. package/src/libs/agent-runtime/types/type.ts +1 -0
  96. package/src/libs/agent-runtime/utils/streams/__snapshots__/protocol.test.ts.snap +331 -0
  97. package/src/libs/agent-runtime/utils/streams/protocol.test.ts +137 -0
  98. package/src/libs/agent-runtime/utils/streams/protocol.ts +34 -0
  99. package/src/locales/default/modelProvider.ts +25 -0
  100. package/src/server/modules/AgentRuntime/index.ts +8 -1
  101. package/src/services/chat.ts +12 -3
  102. package/src/store/agent/slices/chat/action.test.ts +3 -3
  103. package/src/store/agent/slices/chat/action.ts +2 -5
  104. package/src/types/aiProvider.ts +1 -0
  105. package/src/types/user/settings/keyVaults.ts +1 -0
  106. package/src/app/[variants]/(main)/chat/settings/features/EditPage.tsx +0 -45
  107. package/src/features/AgentSetting/AgentSettingsStore.tsx +0 -14
  108. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/Image.tsx +0 -0
  109. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/index.tsx +0 -0
  110. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/FileItem/style.ts +0 -0
  111. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Files/index.tsx +0 -0
  112. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/InputArea/Container.tsx +0 -0
  113. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/InputArea/index.tsx +0 -0
  114. /package/src/{features → app/[variants]/(main)/chat/(workspace)/@conversation/features}/ChatInput/Mobile/Send.tsx +0 -0
@@ -7,12 +7,14 @@ import { AgentSettingsInstance, useAgentSettings } from './hooks/useAgentSetting
7
7
  import { State, useStoreApi } from './store';
8
8
 
9
9
  export interface StoreUpdaterProps
10
- extends Partial<Pick<State, 'onMetaChange' | 'onConfigChange' | 'meta' | 'config' | 'id'>> {
10
+ extends Partial<
11
+ Pick<State, 'onMetaChange' | 'onConfigChange' | 'meta' | 'config' | 'id' | 'loading'>
12
+ > {
11
13
  instanceRef?: ForwardedRef<AgentSettingsInstance> | null;
12
14
  }
13
15
 
14
16
  const StoreUpdater = memo<StoreUpdaterProps>(
15
- ({ onConfigChange, instanceRef, id, onMetaChange, meta, config }) => {
17
+ ({ onConfigChange, instanceRef, id, onMetaChange, meta, config, loading }) => {
16
18
  const storeApi = useStoreApi();
17
19
  const useStoreUpdater = createStoreUpdater(storeApi);
18
20
 
@@ -20,6 +22,7 @@ const StoreUpdater = memo<StoreUpdaterProps>(
20
22
  useStoreUpdater('config', config);
21
23
  useStoreUpdater('onConfigChange', onConfigChange);
22
24
  useStoreUpdater('onMetaChange', onMetaChange);
25
+ useStoreUpdater('loading', loading);
23
26
  useStoreUpdater('id', id);
24
27
 
25
28
  const instance = useAgentSettings();
@@ -1,3 +1,3 @@
1
1
  export { AgentSettings } from './AgentSettings';
2
- export { AgentSettingsStore } from './AgentSettingsStore';
2
+ export { AgentSettingsProvider } from './AgentSettingsProvider';
3
3
  export type { AgentSettingsInstance } from './hooks/useAgentSettings';
@@ -7,8 +7,9 @@ export interface State {
7
7
  autocompleteLoading: SessionLoadingState;
8
8
  config: LobeAgentConfig;
9
9
  id?: string;
10
- meta: MetaData;
10
+ loading?: boolean;
11
11
 
12
+ meta: MetaData;
12
13
  onConfigChange?: (config: LobeAgentConfig) => void;
13
14
  onMetaChange?: (meta: MetaData) => void;
14
15
  }
@@ -43,7 +43,7 @@ describe('useOpenChatSettings', () => {
43
43
  vi.mocked(useSessionStore).mockReturnValue('123');
44
44
  vi.mocked(useIsMobile).mockReturnValue(true);
45
45
  const { result } = renderHook(() => useOpenChatSettings(ChatSettingsTabs.Meta));
46
- expect(result.current()).toBe('/chat/settings');
46
+ expect(result.current()).toBe('/chat/settings?session=123');
47
47
  });
48
48
 
49
49
  it('should handle desktop route for chat settings with session and tab', () => {
@@ -20,7 +20,7 @@ export const useOpenChatSettings = (tab: ChatSettingsTabs = ChatSettingsTabs.Met
20
20
  return () => router.push(urlJoin('/settings', SettingsTabs.Agent));
21
21
  }
22
22
 
23
- if (isMobile) return () => router.push('/chat/settings');
23
+ if (isMobile) return () => router.push('/chat/settings', { query: { session: activeId } });
24
24
 
25
25
  return () => {
26
26
  useAgentStore.setState({ showAgentSetting: true });
@@ -32,7 +32,7 @@ const StoreInitialization = memo(() => {
32
32
 
33
33
  const useInitSystemStatus = useGlobalStore((s) => s.useInitSystemStatus);
34
34
 
35
- const useInitAgentStore = useAgentStore((s) => s.useInitAgentStore);
35
+ const useInitAgentStore = useAgentStore((s) => s.useInitInboxAgentStore);
36
36
  const useInitAiProviderKeyVaults = useAiInfraStore((s) => s.useFetchAiProviderRuntimeState);
37
37
 
38
38
  // init the system preference
@@ -7,6 +7,7 @@ import { LobeAi21AI } from './ai21';
7
7
  import { LobeAi360AI } from './ai360';
8
8
  import { LobeAnthropicAI } from './anthropic';
9
9
  import { LobeAzureOpenAI } from './azureOpenai';
10
+ import { LobeAzureAI } from './azureai';
10
11
  import { LobeBaichuanAI } from './baichuan';
11
12
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
13
  import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
@@ -141,6 +142,7 @@ class AgentRuntime {
141
142
  ai360: Partial<ClientOptions>;
142
143
  anthropic: Partial<ClientOptions>;
143
144
  azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
145
+ azureai: { apiKey?: string; apiVersion?: string; baseURL?: string };
144
146
  baichuan: Partial<ClientOptions>;
145
147
  bedrock: Partial<LobeBedrockAIParams>;
146
148
  cloudflare: Partial<LobeCloudflareParams>;
@@ -201,6 +203,11 @@ class AgentRuntime {
201
203
  break;
202
204
  }
203
205
 
206
+ case ModelProvider.AzureAI: {
207
+ runtimeModel = new LobeAzureAI(params.azureai);
208
+ break;
209
+ }
210
+
204
211
  case ModelProvider.ZhiPu: {
205
212
  runtimeModel = new LobeZhipuAI(params.zhipu);
206
213
  break;
@@ -307,7 +314,7 @@ class AgentRuntime {
307
314
  }
308
315
 
309
316
  case ModelProvider.Novita: {
310
- runtimeModel = new LobeNovitaAI(params.novita ?? {});
317
+ runtimeModel = new LobeNovitaAI(params.novita);
311
318
  break;
312
319
  }
313
320
 
@@ -317,7 +324,7 @@ class AgentRuntime {
317
324
  }
318
325
 
319
326
  case ModelProvider.Baichuan: {
320
- runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
327
+ runtimeModel = new LobeBaichuanAI(params.baichuan);
321
328
  break;
322
329
  }
323
330
 
@@ -327,12 +334,12 @@ class AgentRuntime {
327
334
  }
328
335
 
329
336
  case ModelProvider.Ai360: {
330
- runtimeModel = new LobeAi360AI(params.ai360 ?? {});
337
+ runtimeModel = new LobeAi360AI(params.ai360);
331
338
  break;
332
339
  }
333
340
 
334
341
  case ModelProvider.SiliconCloud: {
335
- runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
342
+ runtimeModel = new LobeSiliconCloudAI(params.siliconcloud);
336
343
  break;
337
344
  }
338
345
 
@@ -372,12 +379,12 @@ class AgentRuntime {
372
379
  }
373
380
 
374
381
  case ModelProvider.Jina: {
375
- runtimeModel = new LobeJinaAI(params.jina ?? {});
382
+ runtimeModel = new LobeJinaAI(params.jina);
376
383
  break;
377
384
  }
378
385
 
379
386
  case ModelProvider.Cloudflare: {
380
- runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
387
+ runtimeModel = new LobeCloudflareAI(params.cloudflare);
381
388
  break;
382
389
  }
383
390
 
@@ -0,0 +1,109 @@
1
+ import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
+ import { AzureKeyCredential } from '@azure/core-auth';
3
+ import OpenAI from 'openai';
4
+
5
+ import { LobeRuntimeAI } from '../BaseAI';
6
+ import { AgentRuntimeErrorType } from '../error';
7
+ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
8
+ import { AgentRuntimeError } from '../utils/createError';
9
+ import { debugStream } from '../utils/debugStream';
10
+ import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
11
+ import { StreamingResponse } from '../utils/response';
12
+ import { OpenAIStream, createSSEDataExtractor } from '../utils/streams';
13
+
14
+ export class LobeAzureAI implements LobeRuntimeAI {
15
+ client: ModelClient;
16
+
17
+ constructor(params?: { apiKey?: string; apiVersion?: string; baseURL?: string }) {
18
+ if (!params?.apiKey || !params?.baseURL)
19
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
20
+
21
+ this.client = createClient(params?.baseURL, new AzureKeyCredential(params?.apiKey));
22
+
23
+ this.baseURL = params?.baseURL;
24
+ }
25
+
26
+ baseURL: string;
27
+
28
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
29
+ const { messages, model, ...params } = payload;
30
+ // o1 series models on Azure OpenAI does not support streaming currently
31
+ const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
32
+ try {
33
+ const response = this.client.path('/chat/completions').post({
34
+ body: {
35
+ messages: messages as OpenAI.ChatCompletionMessageParam[],
36
+ model,
37
+ ...params,
38
+ stream: enableStreaming,
39
+ tool_choice: params.tools ? 'auto' : undefined,
40
+ },
41
+ });
42
+
43
+ if (enableStreaming) {
44
+ const stream = await response.asBrowserStream();
45
+
46
+ const [prod, debug] = stream.body!.tee();
47
+
48
+ if (process.env.DEBUG_AZURE_AI_CHAT_COMPLETION === '1') {
49
+ debugStream(debug).catch(console.error);
50
+ }
51
+
52
+ return StreamingResponse(
53
+ OpenAIStream(prod.pipeThrough(createSSEDataExtractor()), {
54
+ callbacks: options?.callback,
55
+ }),
56
+ {
57
+ headers: options?.headers,
58
+ },
59
+ );
60
+ } else {
61
+ const res = await response;
62
+
63
+ // the azure AI inference response is openai compatible
64
+ const stream = transformResponseToStream(res.body as OpenAI.ChatCompletion);
65
+ return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
66
+ headers: options?.headers,
67
+ });
68
+ }
69
+ } catch (e) {
70
+ let error = e as { [key: string]: any; code: string; message: string };
71
+
72
+ if (error.code) {
73
+ switch (error.code) {
74
+ case 'DeploymentNotFound': {
75
+ error = { ...error, deployId: model };
76
+ }
77
+ }
78
+ } else {
79
+ error = {
80
+ cause: error.cause,
81
+ message: error.message,
82
+ name: error.name,
83
+ } as any;
84
+ }
85
+
86
+ const errorType = error.code
87
+ ? AgentRuntimeErrorType.ProviderBizError
88
+ : AgentRuntimeErrorType.AgentRuntimeError;
89
+
90
+ throw AgentRuntimeError.chat({
91
+ endpoint: this.maskSensitiveUrl(this.baseURL),
92
+ error,
93
+ errorType,
94
+ provider: ModelProvider.Azure,
95
+ });
96
+ }
97
+ }
98
+
99
+ private maskSensitiveUrl = (url: string) => {
100
+ // 使用正则表达式匹配 'https://' 后面和 '.azure.com/' 前面的内容
101
+ const regex = /^(https:\/\/)([^.]+)(\.azure\.com\/.*)$/;
102
+
103
+ // 使用替换函数
104
+ return url.replace(regex, (match, protocol, subdomain, rest) => {
105
+ // 将子域名替换为 '***'
106
+ return `${protocol}***${rest}`;
107
+ });
108
+ };
109
+ }
@@ -1,255 +1,13 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
+ import { ModelProvider } from '@/libs/agent-runtime';
4
3
 
5
- import {
6
- ChatStreamCallbacks,
7
- LobeOpenAICompatibleRuntime,
8
- ModelProvider,
9
- } from '@/libs/agent-runtime';
10
-
11
- import * as debugStreamModule from '../utils/debugStream';
4
+ import { testProvider } from '../providerTestUtils';
12
5
  import { LobeBaichuanAI } from './index';
13
6
 
14
- const provider = ModelProvider.Baichuan;
15
- const defaultBaseURL = 'https://api.baichuan-ai.com/v1';
16
-
17
- const bizErrorType = 'ProviderBizError';
18
- const invalidErrorType = 'InvalidProviderAPIKey';
19
-
20
- // Mock the console.error to avoid polluting test output
21
- vi.spyOn(console, 'error').mockImplementation(() => {});
22
-
23
- let instance: LobeOpenAICompatibleRuntime;
24
-
25
- beforeEach(() => {
26
- instance = new LobeBaichuanAI({ apiKey: 'test' });
27
-
28
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
- new ReadableStream() as any,
31
- );
32
- });
33
-
34
- afterEach(() => {
35
- vi.clearAllMocks();
36
- });
37
-
38
- describe('LobeBaichuanAI', () => {
39
- describe('init', () => {
40
- it('should correctly initialize with an API key', async () => {
41
- const instance = new LobeBaichuanAI({ apiKey: 'test_api_key' });
42
- expect(instance).toBeInstanceOf(LobeBaichuanAI);
43
- expect(instance.baseURL).toEqual(defaultBaseURL);
44
- });
45
- });
46
-
47
- describe('chat', () => {
48
- describe('Error', () => {
49
- it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
- // Arrange
51
- const apiError = new OpenAI.APIError(
52
- 400,
53
- {
54
- status: 400,
55
- error: {
56
- message: 'Bad Request',
57
- },
58
- },
59
- 'Error message',
60
- {},
61
- );
62
-
63
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
-
65
- // Act
66
- try {
67
- await instance.chat({
68
- messages: [{ content: 'Hello', role: 'user' }],
69
- model: 'Baichuan4',
70
- temperature: 0,
71
- });
72
- } catch (e) {
73
- expect(e).toEqual({
74
- endpoint: defaultBaseURL,
75
- error: {
76
- error: { message: 'Bad Request' },
77
- status: 400,
78
- },
79
- errorType: bizErrorType,
80
- provider,
81
- });
82
- }
83
- });
84
-
85
- it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
- try {
87
- new LobeBaichuanAI({});
88
- } catch (e) {
89
- expect(e).toEqual({ errorType: invalidErrorType });
90
- }
91
- });
92
-
93
- it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
- // Arrange
95
- const errorInfo = {
96
- stack: 'abc',
97
- cause: {
98
- message: 'api is undefined',
99
- },
100
- };
101
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
-
103
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
-
105
- // Act
106
- try {
107
- await instance.chat({
108
- messages: [{ content: 'Hello', role: 'user' }],
109
- model: 'Baichuan4',
110
- temperature: 0,
111
- });
112
- } catch (e) {
113
- expect(e).toEqual({
114
- endpoint: defaultBaseURL,
115
- error: {
116
- cause: { message: 'api is undefined' },
117
- stack: 'abc',
118
- },
119
- errorType: bizErrorType,
120
- provider,
121
- });
122
- }
123
- });
124
-
125
- it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
- // Arrange
127
- const errorInfo = {
128
- stack: 'abc',
129
- cause: { message: 'api is undefined' },
130
- };
131
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
-
133
- instance = new LobeBaichuanAI({
134
- apiKey: 'test',
135
-
136
- baseURL: 'https://api.abc.com/v1',
137
- });
138
-
139
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
-
141
- // Act
142
- try {
143
- await instance.chat({
144
- messages: [{ content: 'Hello', role: 'user' }],
145
- model: 'Baichuan4',
146
- temperature: 0,
147
- });
148
- } catch (e) {
149
- expect(e).toEqual({
150
- endpoint: 'https://api.***.com/v1',
151
- error: {
152
- cause: { message: 'api is undefined' },
153
- stack: 'abc',
154
- },
155
- errorType: bizErrorType,
156
- provider,
157
- });
158
- }
159
- });
160
-
161
- it('should throw an InvalidBaichuanAPIKey error type on 401 status code', async () => {
162
- // Mock the API call to simulate a 401 error
163
- const error = new Error('Unauthorized') as any;
164
- error.status = 401;
165
- vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
-
167
- try {
168
- await instance.chat({
169
- messages: [{ content: 'Hello', role: 'user' }],
170
- model: 'Baichuan4',
171
- temperature: 0,
172
- });
173
- } catch (e) {
174
- // Expect the chat method to throw an error with InvalidBaichuanAPIKey
175
- expect(e).toEqual({
176
- endpoint: defaultBaseURL,
177
- error: new Error('Unauthorized'),
178
- errorType: invalidErrorType,
179
- provider,
180
- });
181
- }
182
- });
183
-
184
- it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
- // Arrange
186
- const genericError = new Error('Generic Error');
187
-
188
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
-
190
- // Act
191
- try {
192
- await instance.chat({
193
- messages: [{ content: 'Hello', role: 'user' }],
194
- model: 'Baichuan4',
195
- temperature: 0,
196
- });
197
- } catch (e) {
198
- expect(e).toEqual({
199
- endpoint: defaultBaseURL,
200
- errorType: 'AgentRuntimeError',
201
- provider,
202
- error: {
203
- name: genericError.name,
204
- cause: genericError.cause,
205
- message: genericError.message,
206
- stack: genericError.stack,
207
- },
208
- });
209
- }
210
- });
211
- });
212
-
213
- describe('DEBUG', () => {
214
- it('should call debugStream and return StreamingTextResponse when DEBUG_BAICHUAN_CHAT_COMPLETION is 1', async () => {
215
- // Arrange
216
- const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
- const mockDebugStream = new ReadableStream({
218
- start(controller) {
219
- controller.enqueue('Debug stream content');
220
- controller.close();
221
- },
222
- }) as any;
223
- mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
-
225
- // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
- (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
- tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
- });
229
-
230
- // 保存原始环境变量值
231
- const originalDebugValue = process.env.DEBUG_BAICHUAN_CHAT_COMPLETION;
232
-
233
- // 模拟环境变量
234
- process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = '1';
235
- vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
-
237
- // 执行测试
238
- // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
- // 假设的测试函数调用,你可能需要根据实际情况调整
240
- await instance.chat({
241
- messages: [{ content: 'Hello', role: 'user' }],
242
- model: 'Baichuan4',
243
- stream: true,
244
- temperature: 0,
245
- });
246
-
247
- // 验证 debugStream 被调用
248
- expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
-
250
- // 恢复原始环境变量值
251
- process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = originalDebugValue;
252
- });
253
- });
254
- });
7
+ testProvider({
8
+ Runtime: LobeBaichuanAI,
9
+ provider: ModelProvider.Baichuan,
10
+ defaultBaseURL: 'https://api.baichuan-ai.com/v1',
11
+ chatDebugEnv: 'DEBUG_BAICHUAN_CHAT_COMPLETION',
12
+ chatModel: 'hunyuan-lite',
255
13
  });
@@ -1,3 +1,5 @@
1
+ import { ChatModelCard } from '@/types/llm';
2
+
1
3
  import { LobeRuntimeAI } from '../BaseAI';
2
4
  import { AgentRuntimeErrorType } from '../error';
3
5
  import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
@@ -12,8 +14,6 @@ import { debugStream } from '../utils/debugStream';
12
14
  import { StreamingResponse } from '../utils/response';
13
15
  import { createCallbacksTransformer } from '../utils/streams';
14
16
 
15
- import { ChatModelCard } from '@/types/llm';
16
-
17
17
  export interface CloudflareModelCard {
18
18
  description: string;
19
19
  name: string;
@@ -34,7 +34,7 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
34
34
  accountID: string;
35
35
  apiKey?: string;
36
36
 
37
- constructor({ apiKey, baseURLOrAccountID }: LobeCloudflareParams) {
37
+ constructor({ apiKey, baseURLOrAccountID }: LobeCloudflareParams = {}) {
38
38
  if (!baseURLOrAccountID) {
39
39
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
40
40
  }
@@ -128,30 +128,34 @@ export class LobeCloudflareAI implements LobeRuntimeAI {
128
128
 
129
129
  return modelList
130
130
  .map((model) => {
131
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.toLowerCase() === m.id.toLowerCase());
131
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
132
+ (m) => model.name.toLowerCase() === m.id.toLowerCase(),
133
+ );
132
134
 
133
135
  return {
134
136
  contextWindowTokens: model.properties?.max_total_tokens
135
137
  ? Number(model.properties.max_total_tokens)
136
- : knownModel?.contextWindowTokens ?? undefined,
137
- displayName: knownModel?.displayName ?? (model.properties?.["beta"] === "true" ? `${model.name} (Beta)` : undefined),
138
+ : (knownModel?.contextWindowTokens ?? undefined),
139
+ displayName:
140
+ knownModel?.displayName ??
141
+ (model.properties?.['beta'] === 'true' ? `${model.name} (Beta)` : undefined),
138
142
  enabled: knownModel?.enabled || false,
139
143
  functionCall:
140
- model.description.toLowerCase().includes('function call')
141
- || model.properties?.["function_calling"] === "true"
142
- || knownModel?.abilities?.functionCall
143
- || false,
144
+ model.description.toLowerCase().includes('function call') ||
145
+ model.properties?.['function_calling'] === 'true' ||
146
+ knownModel?.abilities?.functionCall ||
147
+ false,
144
148
  id: model.name,
145
149
  reasoning:
146
- model.name.toLowerCase().includes('deepseek-r1')
147
- || knownModel?.abilities?.reasoning
148
- || false,
150
+ model.name.toLowerCase().includes('deepseek-r1') ||
151
+ knownModel?.abilities?.reasoning ||
152
+ false,
149
153
  vision:
150
- model.name.toLowerCase().includes('vision')
151
- || model.task?.name.toLowerCase().includes('image-to-text')
152
- || model.description.toLowerCase().includes('vision')
153
- || knownModel?.abilities?.vision
154
- || false,
154
+ model.name.toLowerCase().includes('vision') ||
155
+ model.task?.name.toLowerCase().includes('image-to-text') ||
156
+ model.description.toLowerCase().includes('vision') ||
157
+ knownModel?.abilities?.vision ||
158
+ false,
155
159
  };
156
160
  })
157
161
  .filter(Boolean) as ChatModelCard[];
@@ -1,5 +1,6 @@
1
1
  export { default as AgentRuntime } from './AgentRuntime';
2
2
  export { LobeAnthropicAI } from './anthropic';
3
+ export { LobeAzureAI } from './azureai';
3
4
  export { LobeAzureOpenAI } from './azureOpenai';
4
5
  export * from './BaseAI';
5
6
  export { LobeBedrockAI } from './bedrock';
@@ -26,6 +26,7 @@ export enum ModelProvider {
26
26
  Ai360 = 'ai360',
27
27
  Anthropic = 'anthropic',
28
28
  Azure = 'azure',
29
+ AzureAI = 'azureai',
29
30
  Baichuan = 'baichuan',
30
31
  Bedrock = 'bedrock',
31
32
  Cloudflare = 'cloudflare',