@lobehub/chat 1.57.1 → 1.58.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/docker-compose/local/docker-compose.yml +1 -0
  4. package/locales/ar/modelProvider.json +24 -0
  5. package/locales/ar/models.json +60 -0
  6. package/locales/ar/providers.json +12 -0
  7. package/locales/bg-BG/modelProvider.json +24 -0
  8. package/locales/bg-BG/models.json +60 -0
  9. package/locales/bg-BG/providers.json +12 -0
  10. package/locales/de-DE/modelProvider.json +24 -0
  11. package/locales/de-DE/models.json +60 -0
  12. package/locales/de-DE/providers.json +12 -0
  13. package/locales/en-US/modelProvider.json +24 -0
  14. package/locales/en-US/models.json +60 -0
  15. package/locales/en-US/providers.json +12 -0
  16. package/locales/es-ES/modelProvider.json +24 -0
  17. package/locales/es-ES/models.json +60 -0
  18. package/locales/es-ES/providers.json +12 -0
  19. package/locales/fa-IR/modelProvider.json +30 -0
  20. package/locales/fa-IR/models.json +60 -0
  21. package/locales/fa-IR/providers.json +12 -0
  22. package/locales/fr-FR/modelProvider.json +24 -0
  23. package/locales/fr-FR/models.json +60 -0
  24. package/locales/fr-FR/providers.json +12 -0
  25. package/locales/it-IT/modelProvider.json +24 -0
  26. package/locales/it-IT/models.json +60 -0
  27. package/locales/it-IT/providers.json +12 -0
  28. package/locales/ja-JP/modelProvider.json +24 -0
  29. package/locales/ja-JP/models.json +60 -0
  30. package/locales/ja-JP/providers.json +12 -0
  31. package/locales/ko-KR/modelProvider.json +24 -0
  32. package/locales/ko-KR/models.json +60 -0
  33. package/locales/ko-KR/providers.json +12 -0
  34. package/locales/nl-NL/modelProvider.json +24 -0
  35. package/locales/nl-NL/models.json +60 -0
  36. package/locales/nl-NL/providers.json +12 -0
  37. package/locales/pl-PL/modelProvider.json +24 -0
  38. package/locales/pl-PL/models.json +60 -0
  39. package/locales/pl-PL/providers.json +12 -0
  40. package/locales/pt-BR/modelProvider.json +24 -0
  41. package/locales/pt-BR/models.json +60 -0
  42. package/locales/pt-BR/providers.json +12 -0
  43. package/locales/ru-RU/modelProvider.json +24 -0
  44. package/locales/ru-RU/models.json +60 -0
  45. package/locales/ru-RU/providers.json +12 -0
  46. package/locales/tr-TR/modelProvider.json +30 -0
  47. package/locales/tr-TR/models.json +60 -0
  48. package/locales/tr-TR/providers.json +12 -0
  49. package/locales/vi-VN/modelProvider.json +24 -0
  50. package/locales/vi-VN/models.json +60 -0
  51. package/locales/vi-VN/providers.json +12 -0
  52. package/locales/zh-CN/modelProvider.json +24 -0
  53. package/locales/zh-CN/models.json +1112 -1052
  54. package/locales/zh-CN/providers.json +80 -68
  55. package/locales/zh-TW/modelProvider.json +24 -0
  56. package/locales/zh-TW/models.json +60 -0
  57. package/locales/zh-TW/providers.json +12 -0
  58. package/package.json +4 -2
  59. package/src/app/[variants]/(main)/settings/provider/(detail)/azureai/page.tsx +58 -0
  60. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +13 -2
  61. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +6 -8
  62. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/index.tsx +5 -6
  63. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ModelConfigModal/index.tsx +4 -3
  64. package/src/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext.ts +2 -0
  65. package/src/app/[variants]/(main)/settings/provider/features/ModelList/index.tsx +6 -7
  66. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -1
  67. package/src/config/aiModels/azureai.ts +18 -0
  68. package/src/config/aiModels/index.ts +3 -0
  69. package/src/config/modelProviders/azure.ts +2 -1
  70. package/src/config/modelProviders/azureai.ts +19 -0
  71. package/src/config/modelProviders/index.ts +3 -0
  72. package/src/database/server/models/aiProvider.ts +2 -0
  73. package/src/libs/agent-runtime/AgentRuntime.ts +13 -6
  74. package/src/libs/agent-runtime/azureai/index.ts +109 -0
  75. package/src/libs/agent-runtime/baichuan/index.test.ts +8 -250
  76. package/src/libs/agent-runtime/cloudflare/index.ts +22 -18
  77. package/src/libs/agent-runtime/index.ts +1 -0
  78. package/src/libs/agent-runtime/types/type.ts +1 -0
  79. package/src/libs/agent-runtime/utils/streams/__snapshots__/protocol.test.ts.snap +331 -0
  80. package/src/libs/agent-runtime/utils/streams/protocol.test.ts +137 -0
  81. package/src/libs/agent-runtime/utils/streams/protocol.ts +34 -0
  82. package/src/locales/default/modelProvider.ts +25 -0
  83. package/src/server/modules/AgentRuntime/index.ts +8 -1
  84. package/src/services/chat.ts +12 -3
  85. package/src/types/aiProvider.ts +1 -0
  86. package/src/types/user/settings/keyVaults.ts +1 -0
@@ -1,13 +1,13 @@
1
1
  import { Modal } from '@lobehub/ui';
2
2
  import { Button, FormInstance } from 'antd';
3
3
  import isEqual from 'fast-deep-equal';
4
- import { memo, useState } from 'react';
4
+ import { memo, use, useState } from 'react';
5
5
  import { useTranslation } from 'react-i18next';
6
6
 
7
- import { ModelProvider } from '@/libs/agent-runtime';
8
7
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
9
8
 
10
9
  import ModelConfigForm from '../CreateNewModelModal/Form';
10
+ import { ProviderSettingsContext } from '../ProviderSettingsContext';
11
11
 
12
12
  interface ModelConfigModalProps {
13
13
  id: string;
@@ -28,6 +28,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ id, open, setOpen }) =>
28
28
  const closeModal = () => {
29
29
  setOpen(false);
30
30
  };
31
+ const { showDeployName } = use(ProviderSettingsContext);
31
32
 
32
33
  return (
33
34
  <Modal
@@ -66,7 +67,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ id, open, setOpen }) =>
66
67
  idEditable={false}
67
68
  initialValues={model}
68
69
  onFormInstanceReady={setFormInstance}
69
- showAzureDeployName={editingProvider === ModelProvider.Azure || editingProvider === ModelProvider.Doubao}
70
+ showDeployName={showDeployName}
70
71
  type={model?.type}
71
72
  />
72
73
  </Modal>
@@ -2,7 +2,9 @@ import { createContext } from 'react';
2
2
 
3
3
  export interface ProviderSettingsContextValue {
4
4
  modelEditable?: boolean;
5
+ sdkType?: string;
5
6
  showAddNewModel?: boolean;
7
+ showDeployName?: boolean;
6
8
  showModelFetcher?: boolean;
7
9
  }
8
10
 
@@ -3,7 +3,6 @@
3
3
  import { Suspense, memo } from 'react';
4
4
  import { Flexbox } from 'react-layout-kit';
5
5
 
6
- import { ProviderSettingsContext } from '@/app/[variants]/(main)/settings/provider/features/ModelList/ProviderSettingsContext';
7
6
  import { useIsMobile } from '@/hooks/useIsMobile';
8
7
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
9
8
 
@@ -11,6 +10,7 @@ import DisabledModels from './DisabledModels';
11
10
  import EmptyModels from './EmptyModels';
12
11
  import EnabledModelList from './EnabledModelList';
13
12
  import ModelTitle from './ModelTitle';
13
+ import { ProviderSettingsContext, ProviderSettingsContextValue } from './ProviderSettingsContext';
14
14
  import SearchResult from './SearchResult';
15
15
  import SkeletonList from './SkeletonList';
16
16
 
@@ -41,19 +41,18 @@ const Content = memo<ContentProps>(({ id }) => {
41
41
  );
42
42
  });
43
43
 
44
- interface ModelListProps {
44
+ interface ModelListProps extends ProviderSettingsContextValue {
45
45
  id: string;
46
- modelEditable?: boolean;
47
- showAddNewModel?: boolean;
48
- showModelFetcher?: boolean;
49
46
  }
50
47
 
51
48
  const ModelList = memo<ModelListProps>(
52
- ({ id, showModelFetcher, showAddNewModel, modelEditable = true }) => {
49
+ ({ id, showModelFetcher, sdkType, showAddNewModel, showDeployName, modelEditable = true }) => {
53
50
  const mobile = useIsMobile();
54
51
 
55
52
  return (
56
- <ProviderSettingsContext value={{ modelEditable, showAddNewModel, showModelFetcher }}>
53
+ <ProviderSettingsContext
54
+ value={{ modelEditable, sdkType, showAddNewModel, showDeployName, showModelFetcher }}
55
+ >
57
56
  <Flexbox gap={16} paddingInline={mobile ? 12 : 0}>
58
57
  <ModelTitle
59
58
  provider={id}
@@ -127,7 +127,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
127
127
  defaultShowBrowserRequest,
128
128
  disableBrowserRequest,
129
129
  showChecker = true,
130
- } = settings;
130
+ } = settings || {};
131
131
  const { t } = useTranslation('modelProvider');
132
132
  const [form] = Form.useForm();
133
133
  const { cx, styles, theme } = useStyles();
@@ -0,0 +1,18 @@
1
+ import { AIChatModelCard } from '@/types/aiModel';
2
+
3
+ const azureChatModels: AIChatModelCard[] = [
4
+ {
5
+ abilities: {
6
+ reasoning: true,
7
+ },
8
+ contextWindowTokens: 128_000,
9
+ displayName: 'DeepSeek R1',
10
+ id: 'DeepSeek-R1',
11
+ maxOutput: 4096,
12
+ type: 'chat',
13
+ },
14
+ ];
15
+
16
+ export const allModels = [...azureChatModels];
17
+
18
+ export default allModels;
@@ -4,6 +4,7 @@ import { default as ai21 } from './ai21';
4
4
  import { default as ai360 } from './ai360';
5
5
  import { default as anthropic } from './anthropic';
6
6
  import { default as azure } from './azure';
7
+ import { default as azureai } from './azureai';
7
8
  import { default as baichuan } from './baichuan';
8
9
  import { default as bedrock } from './bedrock';
9
10
  import { default as cloudflare } from './cloudflare';
@@ -68,6 +69,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
68
69
  ai360,
69
70
  anthropic,
70
71
  azure,
72
+ azureai,
71
73
  baichuan,
72
74
  bedrock,
73
75
  cloudflare,
@@ -113,6 +115,7 @@ export { default as ai21 } from './ai21';
113
115
  export { default as ai360 } from './ai360';
114
116
  export { default as anthropic } from './anthropic';
115
117
  export { default as azure } from './azure';
118
+ export { default as azureai } from './azureai';
116
119
  export { default as baichuan } from './baichuan';
117
120
  export { default as bedrock } from './bedrock';
118
121
  export { default as cloudflare } from './cloudflare';
@@ -58,10 +58,11 @@ const Azure: ModelProviderCard = {
58
58
  'Azure 提供多种先进的AI模型,包括GPT-3.5和最新的GPT-4系列,支持多种数据类型和复杂任务,致力于安全、可靠和可持续的AI解决方案。',
59
59
  id: 'azure',
60
60
  modelsUrl: 'https://learn.microsoft.com/azure/ai-services/openai/concepts/models',
61
- name: 'Azure',
61
+ name: 'Azure OpenAI',
62
62
  settings: {
63
63
  defaultShowBrowserRequest: true,
64
64
  sdkType: 'azure',
65
+ showDeployName: true,
65
66
  },
66
67
  url: 'https://azure.microsoft.com',
67
68
  };
@@ -0,0 +1,19 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://learn.microsoft.com/azure/ai-services/openai/concepts/models
4
+ const Azure: ModelProviderCard = {
5
+ chatModels: [],
6
+ description:
7
+ 'Azure 提供多种先进的AI模型,包括GPT-3.5和最新的GPT-4系列,支持多种数据类型和复杂任务,致力于安全、可靠和可持续的AI解决方案。',
8
+ id: 'azureai',
9
+ modelsUrl: 'https://ai.azure.com/explore/models',
10
+ name: 'Azure AI',
11
+ settings: {
12
+ defaultShowBrowserRequest: true,
13
+ sdkType: 'azureai',
14
+ showDeployName: true,
15
+ },
16
+ url: 'https://ai.azure.com',
17
+ };
18
+
19
+ export default Azure;
@@ -4,6 +4,7 @@ import Ai21Provider from './ai21';
4
4
  import Ai360Provider from './ai360';
5
5
  import AnthropicProvider from './anthropic';
6
6
  import AzureProvider from './azure';
7
+ import AzureAIProvider from './azureai';
7
8
  import BaichuanProvider from './baichuan';
8
9
  import BedrockProvider from './bedrock';
9
10
  import CloudflareProvider from './cloudflare';
@@ -92,6 +93,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
92
93
  export const DEFAULT_MODEL_PROVIDER_LIST = [
93
94
  OpenAIProvider,
94
95
  { ...AzureProvider, chatModels: [] },
96
+ AzureAIProvider,
95
97
  OllamaProvider,
96
98
  VLLMProvider,
97
99
  AnthropicProvider,
@@ -148,6 +150,7 @@ export { default as Ai21ProviderCard } from './ai21';
148
150
  export { default as Ai360ProviderCard } from './ai360';
149
151
  export { default as AnthropicProviderCard } from './anthropic';
150
152
  export { default as AzureProviderCard } from './azure';
153
+ export { default as AzureAIProviderCard } from './azureai';
151
154
  export { default as BaichuanProviderCard } from './baichuan';
152
155
  export { default as BedrockProviderCard } from './bedrock';
153
156
  export { default as CloudflareProviderCard } from './cloudflare';
@@ -1,4 +1,5 @@
1
1
  import { and, asc, desc, eq } from 'drizzle-orm/expressions';
2
+ import { isEmpty } from 'lodash-es';
2
3
 
3
4
  import { LobeChatDatabase } from '@/database/type';
4
5
  import { ModelProvider } from '@/libs/agent-runtime';
@@ -207,6 +208,7 @@ export class AiProviderModel {
207
208
  ...result,
208
209
  fetchOnClient: typeof result.fetchOnClient === 'boolean' ? result.fetchOnClient : undefined,
209
210
  keyVaults,
211
+ settings: isEmpty(result.settings) ? undefined : result.settings,
210
212
  } as AiProviderDetailItem;
211
213
  };
212
214
 
@@ -7,6 +7,7 @@ import { LobeAi21AI } from './ai21';
7
7
  import { LobeAi360AI } from './ai360';
8
8
  import { LobeAnthropicAI } from './anthropic';
9
9
  import { LobeAzureOpenAI } from './azureOpenai';
10
+ import { LobeAzureAI } from './azureai';
10
11
  import { LobeBaichuanAI } from './baichuan';
11
12
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
13
  import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
@@ -141,6 +142,7 @@ class AgentRuntime {
141
142
  ai360: Partial<ClientOptions>;
142
143
  anthropic: Partial<ClientOptions>;
143
144
  azure: { apiKey?: string; apiVersion?: string; baseURL?: string };
145
+ azureai: { apiKey?: string; apiVersion?: string; baseURL?: string };
144
146
  baichuan: Partial<ClientOptions>;
145
147
  bedrock: Partial<LobeBedrockAIParams>;
146
148
  cloudflare: Partial<LobeCloudflareParams>;
@@ -201,6 +203,11 @@ class AgentRuntime {
201
203
  break;
202
204
  }
203
205
 
206
+ case ModelProvider.AzureAI: {
207
+ runtimeModel = new LobeAzureAI(params.azureai);
208
+ break;
209
+ }
210
+
204
211
  case ModelProvider.ZhiPu: {
205
212
  runtimeModel = new LobeZhipuAI(params.zhipu);
206
213
  break;
@@ -307,7 +314,7 @@ class AgentRuntime {
307
314
  }
308
315
 
309
316
  case ModelProvider.Novita: {
310
- runtimeModel = new LobeNovitaAI(params.novita ?? {});
317
+ runtimeModel = new LobeNovitaAI(params.novita);
311
318
  break;
312
319
  }
313
320
 
@@ -317,7 +324,7 @@ class AgentRuntime {
317
324
  }
318
325
 
319
326
  case ModelProvider.Baichuan: {
320
- runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
327
+ runtimeModel = new LobeBaichuanAI(params.baichuan);
321
328
  break;
322
329
  }
323
330
 
@@ -327,12 +334,12 @@ class AgentRuntime {
327
334
  }
328
335
 
329
336
  case ModelProvider.Ai360: {
330
- runtimeModel = new LobeAi360AI(params.ai360 ?? {});
337
+ runtimeModel = new LobeAi360AI(params.ai360);
331
338
  break;
332
339
  }
333
340
 
334
341
  case ModelProvider.SiliconCloud: {
335
- runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
342
+ runtimeModel = new LobeSiliconCloudAI(params.siliconcloud);
336
343
  break;
337
344
  }
338
345
 
@@ -372,12 +379,12 @@ class AgentRuntime {
372
379
  }
373
380
 
374
381
  case ModelProvider.Jina: {
375
- runtimeModel = new LobeJinaAI(params.jina ?? {});
382
+ runtimeModel = new LobeJinaAI(params.jina);
376
383
  break;
377
384
  }
378
385
 
379
386
  case ModelProvider.Cloudflare: {
380
- runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
387
+ runtimeModel = new LobeCloudflareAI(params.cloudflare);
381
388
  break;
382
389
  }
383
390
 
@@ -0,0 +1,109 @@
1
+ import createClient, { ModelClient } from '@azure-rest/ai-inference';
2
+ import { AzureKeyCredential } from '@azure/core-auth';
3
+ import OpenAI from 'openai';
4
+
5
+ import { LobeRuntimeAI } from '../BaseAI';
6
+ import { AgentRuntimeErrorType } from '../error';
7
+ import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
8
+ import { AgentRuntimeError } from '../utils/createError';
9
+ import { debugStream } from '../utils/debugStream';
10
+ import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
11
+ import { StreamingResponse } from '../utils/response';
12
+ import { OpenAIStream, createSSEDataExtractor } from '../utils/streams';
13
+
14
+ export class LobeAzureAI implements LobeRuntimeAI {
15
+ client: ModelClient;
16
+
17
+ constructor(params?: { apiKey?: string; apiVersion?: string; baseURL?: string }) {
18
+ if (!params?.apiKey || !params?.baseURL)
19
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
20
+
21
+ this.client = createClient(params?.baseURL, new AzureKeyCredential(params?.apiKey));
22
+
23
+ this.baseURL = params?.baseURL;
24
+ }
25
+
26
+ baseURL: string;
27
+
28
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
29
+ const { messages, model, ...params } = payload;
30
+ // o1 series models on Azure OpenAI does not support streaming currently
31
+ const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
32
+ try {
33
+ const response = this.client.path('/chat/completions').post({
34
+ body: {
35
+ messages: messages as OpenAI.ChatCompletionMessageParam[],
36
+ model,
37
+ ...params,
38
+ stream: enableStreaming,
39
+ tool_choice: params.tools ? 'auto' : undefined,
40
+ },
41
+ });
42
+
43
+ if (enableStreaming) {
44
+ const stream = await response.asBrowserStream();
45
+
46
+ const [prod, debug] = stream.body!.tee();
47
+
48
+ if (process.env.DEBUG_AZURE_AI_CHAT_COMPLETION === '1') {
49
+ debugStream(debug).catch(console.error);
50
+ }
51
+
52
+ return StreamingResponse(
53
+ OpenAIStream(prod.pipeThrough(createSSEDataExtractor()), {
54
+ callbacks: options?.callback,
55
+ }),
56
+ {
57
+ headers: options?.headers,
58
+ },
59
+ );
60
+ } else {
61
+ const res = await response;
62
+
63
+ // the azure AI inference response is openai compatible
64
+ const stream = transformResponseToStream(res.body as OpenAI.ChatCompletion);
65
+ return StreamingResponse(OpenAIStream(stream, { callbacks: options?.callback }), {
66
+ headers: options?.headers,
67
+ });
68
+ }
69
+ } catch (e) {
70
+ let error = e as { [key: string]: any; code: string; message: string };
71
+
72
+ if (error.code) {
73
+ switch (error.code) {
74
+ case 'DeploymentNotFound': {
75
+ error = { ...error, deployId: model };
76
+ }
77
+ }
78
+ } else {
79
+ error = {
80
+ cause: error.cause,
81
+ message: error.message,
82
+ name: error.name,
83
+ } as any;
84
+ }
85
+
86
+ const errorType = error.code
87
+ ? AgentRuntimeErrorType.ProviderBizError
88
+ : AgentRuntimeErrorType.AgentRuntimeError;
89
+
90
+ throw AgentRuntimeError.chat({
91
+ endpoint: this.maskSensitiveUrl(this.baseURL),
92
+ error,
93
+ errorType,
94
+ provider: ModelProvider.Azure,
95
+ });
96
+ }
97
+ }
98
+
99
+ private maskSensitiveUrl = (url: string) => {
100
+ // 使用正则表达式匹配 'https://' 后面和 '.azure.com/' 前面的内容
101
+ const regex = /^(https:\/\/)([^.]+)(\.azure\.com\/.*)$/;
102
+
103
+ // 使用替换函数
104
+ return url.replace(regex, (match, protocol, subdomain, rest) => {
105
+ // 将子域名替换为 '***'
106
+ return `${protocol}***${rest}`;
107
+ });
108
+ };
109
+ }
@@ -1,255 +1,13 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
2
+ import { ModelProvider } from '@/libs/agent-runtime';
4
3
 
5
- import {
6
- ChatStreamCallbacks,
7
- LobeOpenAICompatibleRuntime,
8
- ModelProvider,
9
- } from '@/libs/agent-runtime';
10
-
11
- import * as debugStreamModule from '../utils/debugStream';
4
+ import { testProvider } from '../providerTestUtils';
12
5
  import { LobeBaichuanAI } from './index';
13
6
 
14
- const provider = ModelProvider.Baichuan;
15
- const defaultBaseURL = 'https://api.baichuan-ai.com/v1';
16
-
17
- const bizErrorType = 'ProviderBizError';
18
- const invalidErrorType = 'InvalidProviderAPIKey';
19
-
20
- // Mock the console.error to avoid polluting test output
21
- vi.spyOn(console, 'error').mockImplementation(() => {});
22
-
23
- let instance: LobeOpenAICompatibleRuntime;
24
-
25
- beforeEach(() => {
26
- instance = new LobeBaichuanAI({ apiKey: 'test' });
27
-
28
- // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
- vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
- new ReadableStream() as any,
31
- );
32
- });
33
-
34
- afterEach(() => {
35
- vi.clearAllMocks();
36
- });
37
-
38
- describe('LobeBaichuanAI', () => {
39
- describe('init', () => {
40
- it('should correctly initialize with an API key', async () => {
41
- const instance = new LobeBaichuanAI({ apiKey: 'test_api_key' });
42
- expect(instance).toBeInstanceOf(LobeBaichuanAI);
43
- expect(instance.baseURL).toEqual(defaultBaseURL);
44
- });
45
- });
46
-
47
- describe('chat', () => {
48
- describe('Error', () => {
49
- it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
- // Arrange
51
- const apiError = new OpenAI.APIError(
52
- 400,
53
- {
54
- status: 400,
55
- error: {
56
- message: 'Bad Request',
57
- },
58
- },
59
- 'Error message',
60
- {},
61
- );
62
-
63
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
-
65
- // Act
66
- try {
67
- await instance.chat({
68
- messages: [{ content: 'Hello', role: 'user' }],
69
- model: 'Baichuan4',
70
- temperature: 0,
71
- });
72
- } catch (e) {
73
- expect(e).toEqual({
74
- endpoint: defaultBaseURL,
75
- error: {
76
- error: { message: 'Bad Request' },
77
- status: 400,
78
- },
79
- errorType: bizErrorType,
80
- provider,
81
- });
82
- }
83
- });
84
-
85
- it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
- try {
87
- new LobeBaichuanAI({});
88
- } catch (e) {
89
- expect(e).toEqual({ errorType: invalidErrorType });
90
- }
91
- });
92
-
93
- it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
- // Arrange
95
- const errorInfo = {
96
- stack: 'abc',
97
- cause: {
98
- message: 'api is undefined',
99
- },
100
- };
101
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
-
103
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
-
105
- // Act
106
- try {
107
- await instance.chat({
108
- messages: [{ content: 'Hello', role: 'user' }],
109
- model: 'Baichuan4',
110
- temperature: 0,
111
- });
112
- } catch (e) {
113
- expect(e).toEqual({
114
- endpoint: defaultBaseURL,
115
- error: {
116
- cause: { message: 'api is undefined' },
117
- stack: 'abc',
118
- },
119
- errorType: bizErrorType,
120
- provider,
121
- });
122
- }
123
- });
124
-
125
- it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
- // Arrange
127
- const errorInfo = {
128
- stack: 'abc',
129
- cause: { message: 'api is undefined' },
130
- };
131
- const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
-
133
- instance = new LobeBaichuanAI({
134
- apiKey: 'test',
135
-
136
- baseURL: 'https://api.abc.com/v1',
137
- });
138
-
139
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
-
141
- // Act
142
- try {
143
- await instance.chat({
144
- messages: [{ content: 'Hello', role: 'user' }],
145
- model: 'Baichuan4',
146
- temperature: 0,
147
- });
148
- } catch (e) {
149
- expect(e).toEqual({
150
- endpoint: 'https://api.***.com/v1',
151
- error: {
152
- cause: { message: 'api is undefined' },
153
- stack: 'abc',
154
- },
155
- errorType: bizErrorType,
156
- provider,
157
- });
158
- }
159
- });
160
-
161
- it('should throw an InvalidBaichuanAPIKey error type on 401 status code', async () => {
162
- // Mock the API call to simulate a 401 error
163
- const error = new Error('Unauthorized') as any;
164
- error.status = 401;
165
- vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
-
167
- try {
168
- await instance.chat({
169
- messages: [{ content: 'Hello', role: 'user' }],
170
- model: 'Baichuan4',
171
- temperature: 0,
172
- });
173
- } catch (e) {
174
- // Expect the chat method to throw an error with InvalidBaichuanAPIKey
175
- expect(e).toEqual({
176
- endpoint: defaultBaseURL,
177
- error: new Error('Unauthorized'),
178
- errorType: invalidErrorType,
179
- provider,
180
- });
181
- }
182
- });
183
-
184
- it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
- // Arrange
186
- const genericError = new Error('Generic Error');
187
-
188
- vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
-
190
- // Act
191
- try {
192
- await instance.chat({
193
- messages: [{ content: 'Hello', role: 'user' }],
194
- model: 'Baichuan4',
195
- temperature: 0,
196
- });
197
- } catch (e) {
198
- expect(e).toEqual({
199
- endpoint: defaultBaseURL,
200
- errorType: 'AgentRuntimeError',
201
- provider,
202
- error: {
203
- name: genericError.name,
204
- cause: genericError.cause,
205
- message: genericError.message,
206
- stack: genericError.stack,
207
- },
208
- });
209
- }
210
- });
211
- });
212
-
213
- describe('DEBUG', () => {
214
- it('should call debugStream and return StreamingTextResponse when DEBUG_BAICHUAN_CHAT_COMPLETION is 1', async () => {
215
- // Arrange
216
- const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
- const mockDebugStream = new ReadableStream({
218
- start(controller) {
219
- controller.enqueue('Debug stream content');
220
- controller.close();
221
- },
222
- }) as any;
223
- mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
-
225
- // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
- (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
- tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
- });
229
-
230
- // 保存原始环境变量值
231
- const originalDebugValue = process.env.DEBUG_BAICHUAN_CHAT_COMPLETION;
232
-
233
- // 模拟环境变量
234
- process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = '1';
235
- vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
-
237
- // 执行测试
238
- // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
- // 假设的测试函数调用,你可能需要根据实际情况调整
240
- await instance.chat({
241
- messages: [{ content: 'Hello', role: 'user' }],
242
- model: 'Baichuan4',
243
- stream: true,
244
- temperature: 0,
245
- });
246
-
247
- // 验证 debugStream 被调用
248
- expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
-
250
- // 恢复原始环境变量值
251
- process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = originalDebugValue;
252
- });
253
- });
254
- });
7
+ testProvider({
8
+ Runtime: LobeBaichuanAI,
9
+ provider: ModelProvider.Baichuan,
10
+ defaultBaseURL: 'https://api.baichuan-ai.com/v1',
11
+ chatDebugEnv: 'DEBUG_BAICHUAN_CHAT_COMPLETION',
12
+ chatModel: 'hunyuan-lite',
255
13
  });