@lobehub/chat 1.1.18 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/package.json +1 -1
- package/src/app/(main)/chat/(workspace)/@portal/features/Tools/ToolUI/ToolRender.tsx +2 -2
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +7 -0
- package/src/app/api/chat/agentRuntime.ts +7 -0
- package/src/components/ModelProviderIcon/index.tsx +5 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/baichuan.ts +55 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/const/settings/llm.ts +5 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/baichuan/index.test.ts +255 -0
- package/src/libs/agent-runtime/baichuan/index.ts +32 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/server/globalConfig/index.ts +2 -0
- package/src/tools/portals.ts +3 -0
- package/src/types/tool/builtin.ts +2 -2
- package/src/types/user/settings/keyVaults.ts +1 -0
- package/src/tools/docks.ts +0 -3
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,56 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 1.2.1](https://github.com/lobehub/lobe-chat/compare/v1.2.0...v1.2.1)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-07-01**</sup>
|
|
8
|
+
|
|
9
|
+
#### ♻ Code Refactoring
|
|
10
|
+
|
|
11
|
+
- **misc**: Refactor the portal file.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Code refactoring
|
|
19
|
+
|
|
20
|
+
- **misc**: Refactor the portal file, closes [#3106](https://github.com/lobehub/lobe-chat/issues/3106) ([2d9635f](https://github.com/lobehub/lobe-chat/commit/2d9635f))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
## [Version 1.2.0](https://github.com/lobehub/lobe-chat/compare/v1.1.18...v1.2.0)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2024-07-01**</sup>
|
|
33
|
+
|
|
34
|
+
#### ✨ Features
|
|
35
|
+
|
|
36
|
+
- **misc**: Add Baichuan model provider.
|
|
37
|
+
|
|
38
|
+
<br/>
|
|
39
|
+
|
|
40
|
+
<details>
|
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
42
|
+
|
|
43
|
+
#### What's improved
|
|
44
|
+
|
|
45
|
+
- **misc**: Add Baichuan model provider, closes [#3097](https://github.com/lobehub/lobe-chat/issues/3097) ([378cf02](https://github.com/lobehub/lobe-chat/commit/378cf02))
|
|
46
|
+
|
|
47
|
+
</details>
|
|
48
|
+
|
|
49
|
+
<div align="right">
|
|
50
|
+
|
|
51
|
+
[](#readme-top)
|
|
52
|
+
|
|
53
|
+
</div>
|
|
54
|
+
|
|
5
55
|
### [Version 1.1.18](https://github.com/lobehub/lobe-chat/compare/v1.1.17...v1.1.18)
|
|
6
56
|
|
|
7
57
|
<sup>Released on **2024-07-01**</sup>
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "1.1
|
|
3
|
+
"version": "1.2.1",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -3,7 +3,7 @@ import isEqual from 'fast-deep-equal';
|
|
|
3
3
|
import PluginRender from '@/features/PluginsUI/Render';
|
|
4
4
|
import { useChatStore } from '@/store/chat';
|
|
5
5
|
import { chatPortalSelectors, chatSelectors } from '@/store/chat/selectors';
|
|
6
|
-
import {
|
|
6
|
+
import { BuiltinToolsPortals } from '@/tools/portals';
|
|
7
7
|
import { safeParseJSON } from '@/utils/safeParseJSON';
|
|
8
8
|
|
|
9
9
|
const ToolRender = () => {
|
|
@@ -22,7 +22,7 @@ const ToolRender = () => {
|
|
|
22
22
|
|
|
23
23
|
if (!args) return;
|
|
24
24
|
|
|
25
|
-
const Render =
|
|
25
|
+
const Render = BuiltinToolsPortals[plugin.identifier];
|
|
26
26
|
|
|
27
27
|
if (!Render)
|
|
28
28
|
return (
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
Anthropic,
|
|
3
|
+
Baichuan,
|
|
3
4
|
Claude,
|
|
4
5
|
DeepSeek,
|
|
5
6
|
Gemini,
|
|
@@ -24,6 +25,7 @@ import urlJoin from 'url-join';
|
|
|
24
25
|
|
|
25
26
|
import {
|
|
26
27
|
AnthropicProviderCard,
|
|
28
|
+
BaichuanProviderCard,
|
|
27
29
|
DeepSeekProviderCard,
|
|
28
30
|
GoogleProviderCard,
|
|
29
31
|
GroqProviderCard,
|
|
@@ -170,6 +172,11 @@ export const useProviderList = (): ProviderItem[] => {
|
|
|
170
172
|
docUrl: urlJoin(BASE_DOC_URL, 'stepfun'),
|
|
171
173
|
title: <Stepfun.Combine size={20} type={'color'} />,
|
|
172
174
|
},
|
|
175
|
+
{
|
|
176
|
+
...BaichuanProviderCard,
|
|
177
|
+
docUrl: urlJoin(BASE_DOC_URL, 'baichuan'),
|
|
178
|
+
title: <Baichuan.Combine size={ 20 } type={ 'color' } />,
|
|
179
|
+
},
|
|
173
180
|
],
|
|
174
181
|
[azureProvider, ollamaProvider, ollamaProvider, bedrockProvider],
|
|
175
182
|
);
|
|
@@ -170,6 +170,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
|
170
170
|
|
|
171
171
|
const apiKey = apiKeyManager.pick(payload?.apiKey || STEPFUN_API_KEY);
|
|
172
172
|
|
|
173
|
+
return { apiKey };
|
|
174
|
+
}
|
|
175
|
+
case ModelProvider.Baichuan: {
|
|
176
|
+
const { BAICHUAN_API_KEY } = getLLMConfig();
|
|
177
|
+
|
|
178
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || BAICHUAN_API_KEY);
|
|
179
|
+
|
|
173
180
|
return { apiKey };
|
|
174
181
|
}
|
|
175
182
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import {
|
|
2
2
|
Anthropic,
|
|
3
3
|
Azure,
|
|
4
|
+
Baichuan,
|
|
4
5
|
Bedrock,
|
|
5
6
|
DeepSeek,
|
|
6
7
|
Google,
|
|
@@ -114,6 +115,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
|
|
|
114
115
|
return <Stepfun size={20} />;
|
|
115
116
|
}
|
|
116
117
|
|
|
118
|
+
case ModelProvider.Baichuan: {
|
|
119
|
+
return <Baichuan size={20} />;
|
|
120
|
+
}
|
|
121
|
+
|
|
117
122
|
default: {
|
|
118
123
|
return null;
|
|
119
124
|
}
|
package/src/config/llm.ts
CHANGED
|
@@ -82,6 +82,9 @@ export const getLLMConfig = () => {
|
|
|
82
82
|
|
|
83
83
|
ENABLED_STEPFUN: z.boolean(),
|
|
84
84
|
STEPFUN_API_KEY: z.string().optional(),
|
|
85
|
+
|
|
86
|
+
ENABLED_BAICHUAN: z.boolean(),
|
|
87
|
+
BAICHUAN_API_KEY: z.string().optional(),
|
|
85
88
|
},
|
|
86
89
|
runtimeEnv: {
|
|
87
90
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
|
@@ -155,6 +158,9 @@ export const getLLMConfig = () => {
|
|
|
155
158
|
|
|
156
159
|
ENABLED_STEPFUN: !!process.env.STEPFUN_API_KEY,
|
|
157
160
|
STEPFUN_API_KEY: process.env.STEPFUN_API_KEY,
|
|
161
|
+
|
|
162
|
+
ENABLED_BAICHUAN: !!process.env.BAICHUAN_API_KEY,
|
|
163
|
+
BAICHUAN_API_KEY: process.env.BAICHUAN_API_KEY,
|
|
158
164
|
},
|
|
159
165
|
});
|
|
160
166
|
};
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
|
2
|
+
|
|
3
|
+
// ref https://platform.baichuan-ai.com/price
|
|
4
|
+
const Baichuan: ModelProviderCard = {
|
|
5
|
+
chatModels: [
|
|
6
|
+
{
|
|
7
|
+
description: '模型能力国内第一,在知识百科、长文本、生成创作等中文任务上超越国外主流模型。还具备行业领先的多模态能力,多项权威评测基准表现优异。',
|
|
8
|
+
displayName: 'Baichuan 4',
|
|
9
|
+
enabled: true,
|
|
10
|
+
functionCall: true,
|
|
11
|
+
id: 'Baichuan4',
|
|
12
|
+
maxOutput: 4096,
|
|
13
|
+
tokens: 32_768,
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
description: '针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
|
|
17
|
+
displayName: 'Baichuan 3 Turbo',
|
|
18
|
+
enabled: true,
|
|
19
|
+
functionCall: true,
|
|
20
|
+
id: 'Baichuan3-Turbo',
|
|
21
|
+
maxOutput: 8192,
|
|
22
|
+
tokens: 32_768,
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
description: '具备 128K 超长上下文窗口,针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
|
|
26
|
+
displayName: 'Baichuan 3 Turbo 128k',
|
|
27
|
+
enabled: true,
|
|
28
|
+
id: 'Baichuan3-Turbo-128k',
|
|
29
|
+
maxOutput: 4096,
|
|
30
|
+
tokens: 128_000,
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
description: '采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
|
|
34
|
+
displayName: 'Baichuan 2 Turbo',
|
|
35
|
+
enabled: true,
|
|
36
|
+
id: 'Baichuan2-Turbo',
|
|
37
|
+
maxOutput: 8192,
|
|
38
|
+
tokens: 32_768,
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
description: '具备 192K 超长上下文窗口,采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
|
|
42
|
+
displayName: 'Baichuan 2 Turbo 192k',
|
|
43
|
+
enabled: true,
|
|
44
|
+
id: 'Baichuan2-Turbo-192k',
|
|
45
|
+
maxOutput: 2048,
|
|
46
|
+
tokens: 192_000,
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
checkModel: 'Baichuan4',
|
|
50
|
+
id: 'baichuan',
|
|
51
|
+
modelList: { showModelFetcher: true },
|
|
52
|
+
name: 'Baichuan',
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
export default Baichuan;
|
|
@@ -2,6 +2,7 @@ import { ChatModelCard, ModelProviderCard } from '@/types/llm';
|
|
|
2
2
|
|
|
3
3
|
import AnthropicProvider from './anthropic';
|
|
4
4
|
import AzureProvider from './azure';
|
|
5
|
+
import BaichuanProvider from './baichuan';
|
|
5
6
|
import BedrockProvider from './bedrock';
|
|
6
7
|
import DeepSeekProvider from './deepseek';
|
|
7
8
|
import GoogleProvider from './google';
|
|
@@ -37,6 +38,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
|
37
38
|
AnthropicProvider.chatModels,
|
|
38
39
|
ZeroOneProvider.chatModels,
|
|
39
40
|
StepfunProvider.chatModels,
|
|
41
|
+
BaichuanProvider.chatModels,
|
|
40
42
|
].flat();
|
|
41
43
|
|
|
42
44
|
export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
@@ -58,6 +60,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
|
58
60
|
ZeroOneProvider,
|
|
59
61
|
ZhiPuProvider,
|
|
60
62
|
StepfunProvider,
|
|
63
|
+
BaichuanProvider,
|
|
61
64
|
];
|
|
62
65
|
|
|
63
66
|
export const filterEnabledModels = (provider: ModelProviderCard) => {
|
|
@@ -66,6 +69,7 @@ export const filterEnabledModels = (provider: ModelProviderCard) => {
|
|
|
66
69
|
|
|
67
70
|
export { default as AnthropicProviderCard } from './anthropic';
|
|
68
71
|
export { default as AzureProviderCard } from './azure';
|
|
72
|
+
export { default as BaichuanProviderCard } from './baichuan';
|
|
69
73
|
export { default as BedrockProviderCard } from './bedrock';
|
|
70
74
|
export { default as DeepSeekProviderCard } from './deepseek';
|
|
71
75
|
export { default as GoogleProviderCard } from './google';
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
AnthropicProviderCard,
|
|
3
|
+
BaichuanProviderCard,
|
|
3
4
|
BedrockProviderCard,
|
|
4
5
|
DeepSeekProviderCard,
|
|
5
6
|
GoogleProviderCard,
|
|
@@ -29,6 +30,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
|
29
30
|
azure: {
|
|
30
31
|
enabled: false,
|
|
31
32
|
},
|
|
33
|
+
baichuan: {
|
|
34
|
+
enabled: false,
|
|
35
|
+
enabledModels: filterEnabledModels(BaichuanProviderCard),
|
|
36
|
+
},
|
|
32
37
|
bedrock: {
|
|
33
38
|
enabled: false,
|
|
34
39
|
enabledModels: filterEnabledModels(BedrockProviderCard),
|
|
@@ -5,6 +5,7 @@ import type { TracePayload } from '@/const/trace';
|
|
|
5
5
|
import { LobeRuntimeAI } from './BaseAI';
|
|
6
6
|
import { LobeAnthropicAI } from './anthropic';
|
|
7
7
|
import { LobeAzureOpenAI } from './azureOpenai';
|
|
8
|
+
import { LobeBaichuanAI } from './baichuan';
|
|
8
9
|
import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
|
|
9
10
|
import { LobeDeepSeekAI } from './deepseek';
|
|
10
11
|
import { LobeGoogleAI } from './google';
|
|
@@ -103,6 +104,7 @@ class AgentRuntime {
|
|
|
103
104
|
params: Partial<{
|
|
104
105
|
anthropic: Partial<ClientOptions>;
|
|
105
106
|
azure: { apiVersion?: string; apikey?: string; endpoint?: string };
|
|
107
|
+
baichuan: Partial<ClientOptions>;
|
|
106
108
|
bedrock: Partial<LobeBedrockAIParams>;
|
|
107
109
|
deepseek: Partial<ClientOptions>;
|
|
108
110
|
google: { apiKey?: string; baseURL?: string };
|
|
@@ -219,6 +221,11 @@ class AgentRuntime {
|
|
|
219
221
|
runtimeModel = new LobeStepfunAI(params.stepfun ?? {});
|
|
220
222
|
break;
|
|
221
223
|
}
|
|
224
|
+
|
|
225
|
+
case ModelProvider.Baichuan: {
|
|
226
|
+
runtimeModel = new LobeBaichuanAI(params.baichuan ?? {});
|
|
227
|
+
break
|
|
228
|
+
}
|
|
222
229
|
}
|
|
223
230
|
|
|
224
231
|
return new AgentRuntime(runtimeModel);
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import {
|
|
6
|
+
ChatStreamCallbacks,
|
|
7
|
+
LobeOpenAICompatibleRuntime,
|
|
8
|
+
ModelProvider,
|
|
9
|
+
} from '@/libs/agent-runtime';
|
|
10
|
+
|
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
|
12
|
+
import { LobeBaichuanAI } from './index';
|
|
13
|
+
|
|
14
|
+
const provider = ModelProvider.Baichuan;
|
|
15
|
+
const defaultBaseURL = 'https://api.baichuan-ai.com/v1';
|
|
16
|
+
|
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
|
19
|
+
|
|
20
|
+
// Mock the console.error to avoid polluting test output
|
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
22
|
+
|
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
|
24
|
+
|
|
25
|
+
beforeEach(() => {
|
|
26
|
+
instance = new LobeBaichuanAI({ apiKey: 'test' });
|
|
27
|
+
|
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
30
|
+
new ReadableStream() as any,
|
|
31
|
+
);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
afterEach(() => {
|
|
35
|
+
vi.clearAllMocks();
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
describe('LobeBaichuanAI', () => {
|
|
39
|
+
describe('init', () => {
|
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
|
41
|
+
const instance = new LobeBaichuanAI({ apiKey: 'test_api_key' });
|
|
42
|
+
expect(instance).toBeInstanceOf(LobeBaichuanAI);
|
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
describe('chat', () => {
|
|
48
|
+
describe('Error', () => {
|
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
|
50
|
+
// Arrange
|
|
51
|
+
const apiError = new OpenAI.APIError(
|
|
52
|
+
400,
|
|
53
|
+
{
|
|
54
|
+
status: 400,
|
|
55
|
+
error: {
|
|
56
|
+
message: 'Bad Request',
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
'Error message',
|
|
60
|
+
{},
|
|
61
|
+
);
|
|
62
|
+
|
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
64
|
+
|
|
65
|
+
// Act
|
|
66
|
+
try {
|
|
67
|
+
await instance.chat({
|
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
69
|
+
model: 'Baichuan4',
|
|
70
|
+
temperature: 0,
|
|
71
|
+
});
|
|
72
|
+
} catch (e) {
|
|
73
|
+
expect(e).toEqual({
|
|
74
|
+
endpoint: defaultBaseURL,
|
|
75
|
+
error: {
|
|
76
|
+
error: { message: 'Bad Request' },
|
|
77
|
+
status: 400,
|
|
78
|
+
},
|
|
79
|
+
errorType: bizErrorType,
|
|
80
|
+
provider,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
|
86
|
+
try {
|
|
87
|
+
new LobeBaichuanAI({});
|
|
88
|
+
} catch (e) {
|
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
|
94
|
+
// Arrange
|
|
95
|
+
const errorInfo = {
|
|
96
|
+
stack: 'abc',
|
|
97
|
+
cause: {
|
|
98
|
+
message: 'api is undefined',
|
|
99
|
+
},
|
|
100
|
+
};
|
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
102
|
+
|
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
104
|
+
|
|
105
|
+
// Act
|
|
106
|
+
try {
|
|
107
|
+
await instance.chat({
|
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
109
|
+
model: 'Baichuan4',
|
|
110
|
+
temperature: 0,
|
|
111
|
+
});
|
|
112
|
+
} catch (e) {
|
|
113
|
+
expect(e).toEqual({
|
|
114
|
+
endpoint: defaultBaseURL,
|
|
115
|
+
error: {
|
|
116
|
+
cause: { message: 'api is undefined' },
|
|
117
|
+
stack: 'abc',
|
|
118
|
+
},
|
|
119
|
+
errorType: bizErrorType,
|
|
120
|
+
provider,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
|
126
|
+
// Arrange
|
|
127
|
+
const errorInfo = {
|
|
128
|
+
stack: 'abc',
|
|
129
|
+
cause: { message: 'api is undefined' },
|
|
130
|
+
};
|
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
132
|
+
|
|
133
|
+
instance = new LobeBaichuanAI({
|
|
134
|
+
apiKey: 'test',
|
|
135
|
+
|
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
140
|
+
|
|
141
|
+
// Act
|
|
142
|
+
try {
|
|
143
|
+
await instance.chat({
|
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
145
|
+
model: 'Baichuan4',
|
|
146
|
+
temperature: 0,
|
|
147
|
+
});
|
|
148
|
+
} catch (e) {
|
|
149
|
+
expect(e).toEqual({
|
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
|
151
|
+
error: {
|
|
152
|
+
cause: { message: 'api is undefined' },
|
|
153
|
+
stack: 'abc',
|
|
154
|
+
},
|
|
155
|
+
errorType: bizErrorType,
|
|
156
|
+
provider,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
it('should throw an InvalidBaichuanAPIKey error type on 401 status code', async () => {
|
|
162
|
+
// Mock the API call to simulate a 401 error
|
|
163
|
+
const error = new Error('Unauthorized') as any;
|
|
164
|
+
error.status = 401;
|
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
|
166
|
+
|
|
167
|
+
try {
|
|
168
|
+
await instance.chat({
|
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
170
|
+
model: 'Baichuan4',
|
|
171
|
+
temperature: 0,
|
|
172
|
+
});
|
|
173
|
+
} catch (e) {
|
|
174
|
+
// Expect the chat method to throw an error with InvalidBaichuanAPIKey
|
|
175
|
+
expect(e).toEqual({
|
|
176
|
+
endpoint: defaultBaseURL,
|
|
177
|
+
error: new Error('Unauthorized'),
|
|
178
|
+
errorType: invalidErrorType,
|
|
179
|
+
provider,
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
|
185
|
+
// Arrange
|
|
186
|
+
const genericError = new Error('Generic Error');
|
|
187
|
+
|
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
|
189
|
+
|
|
190
|
+
// Act
|
|
191
|
+
try {
|
|
192
|
+
await instance.chat({
|
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
194
|
+
model: 'Baichuan4',
|
|
195
|
+
temperature: 0,
|
|
196
|
+
});
|
|
197
|
+
} catch (e) {
|
|
198
|
+
expect(e).toEqual({
|
|
199
|
+
endpoint: defaultBaseURL,
|
|
200
|
+
errorType: 'AgentRuntimeError',
|
|
201
|
+
provider,
|
|
202
|
+
error: {
|
|
203
|
+
name: genericError.name,
|
|
204
|
+
cause: genericError.cause,
|
|
205
|
+
message: genericError.message,
|
|
206
|
+
stack: genericError.stack,
|
|
207
|
+
},
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
});
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
describe('DEBUG', () => {
|
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_BAICHUAN_CHAT_COMPLETION is 1', async () => {
|
|
215
|
+
// Arrange
|
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
|
217
|
+
const mockDebugStream = new ReadableStream({
|
|
218
|
+
start(controller) {
|
|
219
|
+
controller.enqueue('Debug stream content');
|
|
220
|
+
controller.close();
|
|
221
|
+
},
|
|
222
|
+
}) as any;
|
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
|
224
|
+
|
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
// 保存原始环境变量值
|
|
231
|
+
const originalDebugValue = process.env.DEBUG_BAICHUAN_CHAT_COMPLETION;
|
|
232
|
+
|
|
233
|
+
// 模拟环境变量
|
|
234
|
+
process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = '1';
|
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
|
236
|
+
|
|
237
|
+
// 执行测试
|
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
|
240
|
+
await instance.chat({
|
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
242
|
+
model: 'Baichuan4',
|
|
243
|
+
stream: true,
|
|
244
|
+
temperature: 0,
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
// 验证 debugStream 被调用
|
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
|
249
|
+
|
|
250
|
+
// 恢复原始环境变量值
|
|
251
|
+
process.env.DEBUG_BAICHUAN_CHAT_COMPLETION = originalDebugValue;
|
|
252
|
+
});
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
});
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
|
|
3
|
+
import { ChatStreamPayload, ModelProvider } from '../types';
|
|
4
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
|
5
|
+
|
|
6
|
+
export const LobeBaichuanAI = LobeOpenAICompatibleFactory({
|
|
7
|
+
baseURL: 'https://api.baichuan-ai.com/v1',
|
|
8
|
+
chatCompletion: {
|
|
9
|
+
handlePayload: (payload: ChatStreamPayload) => {
|
|
10
|
+
const { frequency_penalty, ...rest } = payload;
|
|
11
|
+
|
|
12
|
+
let adjustedFrequencyPenalty = frequency_penalty ?? 1;
|
|
13
|
+
|
|
14
|
+
if (frequency_penalty !== undefined) {
|
|
15
|
+
if (frequency_penalty < 1) {
|
|
16
|
+
// If less than 1 (including negative values), add 1 to bring it into the 1-2 range
|
|
17
|
+
adjustedFrequencyPenalty = Math.min(Math.max(frequency_penalty + 1, 1), 2);
|
|
18
|
+
} else if (frequency_penalty > 2) {
|
|
19
|
+
// If greater than 2, cap it at 2
|
|
20
|
+
adjustedFrequencyPenalty = 2;
|
|
21
|
+
}
|
|
22
|
+
// If between 1 and 2, keep the original value
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
return { ...rest, frequency_penalty: adjustedFrequencyPenalty } as OpenAI.ChatCompletionCreateParamsStreaming;
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
debug: {
|
|
29
|
+
chatCompletion: () => process.env.DEBUG_BAICHUAN_CHAT_COMPLETION === '1',
|
|
30
|
+
},
|
|
31
|
+
provider: ModelProvider.Baichuan,
|
|
32
|
+
});
|
|
@@ -34,6 +34,7 @@ export const getServerGlobalConfig = () => {
|
|
|
34
34
|
ENABLED_MISTRAL,
|
|
35
35
|
ENABLED_QWEN,
|
|
36
36
|
ENABLED_STEPFUN,
|
|
37
|
+
ENABLED_BAICHUAN,
|
|
37
38
|
|
|
38
39
|
ENABLED_AZURE_OPENAI,
|
|
39
40
|
AZURE_MODEL_LIST,
|
|
@@ -70,6 +71,7 @@ export const getServerGlobalConfig = () => {
|
|
|
70
71
|
withDeploymentName: true,
|
|
71
72
|
}),
|
|
72
73
|
},
|
|
74
|
+
baichuan: { enabled: ENABLED_BAICHUAN },
|
|
73
75
|
bedrock: { enabled: ENABLED_AWS_BEDROCK },
|
|
74
76
|
deepseek: { enabled: ENABLED_DEEPSEEK },
|
|
75
77
|
google: { enabled: ENABLED_GOOGLE },
|
|
@@ -37,11 +37,11 @@ export interface BuiltinRenderProps<Content = any, Arguments = any, State = any>
|
|
|
37
37
|
|
|
38
38
|
export type BuiltinRender = <T = any>(props: BuiltinRenderProps<T>) => ReactNode;
|
|
39
39
|
|
|
40
|
-
export interface
|
|
40
|
+
export interface BuiltinPortalProps<Arguments = Record<string, any>, State = any> {
|
|
41
41
|
arguments: Arguments;
|
|
42
42
|
identifier: string;
|
|
43
43
|
messageId: string;
|
|
44
44
|
state: State;
|
|
45
45
|
}
|
|
46
46
|
|
|
47
|
-
export type
|
|
47
|
+
export type BuiltinPortal = <T = any>(props: BuiltinPortalProps<T>) => ReactNode;
|
|
@@ -18,6 +18,7 @@ export interface AWSBedrockKeyVault {
|
|
|
18
18
|
export interface UserKeyVaults {
|
|
19
19
|
anthropic?: OpenAICompatibleKeyVault;
|
|
20
20
|
azure?: AzureOpenAIKeyVault;
|
|
21
|
+
baichuan?: OpenAICompatibleKeyVault;
|
|
21
22
|
bedrock?: AWSBedrockKeyVault;
|
|
22
23
|
deepseek?: OpenAICompatibleKeyVault;
|
|
23
24
|
google?: OpenAICompatibleKeyVault;
|
package/src/tools/docks.ts
DELETED