@lobehub/chat 1.14.12 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/package.json +1 -1
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +2 -0
- package/src/app/api/chat/agentRuntime.ts +7 -0
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/config/modelProviders/upstage.ts +45 -0
- package/src/const/settings/llm.ts +5 -0
- package/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +5 -0
- package/src/features/FileViewer/Renderer/Image/index.tsx +36 -0
- package/src/features/FileViewer/Renderer/index.ts +2 -1
- package/src/features/FileViewer/index.tsx +9 -9
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/libs/agent-runtime/upstage/index.test.ts +255 -0
- package/src/libs/agent-runtime/upstage/index.ts +10 -0
- package/src/server/globalConfig/index.ts +3 -0
- package/src/types/user/settings/keyVaults.ts +1 -0
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,31 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
## [Version 1.15.0](https://github.com/lobehub/lobe-chat/compare/v1.14.12...v1.15.0)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-08-30**</sup>
|
|
8
|
+
|
|
9
|
+
#### ✨ Features
|
|
10
|
+
|
|
11
|
+
- **misc**: Add Upstage model provider support.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### What's improved
|
|
19
|
+
|
|
20
|
+
- **misc**: Add Upstage model provider support, closes [#3670](https://github.com/lobehub/lobe-chat/issues/3670) ([4b8591b](https://github.com/lobehub/lobe-chat/commit/4b8591b))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
5
30
|
### [Version 1.14.12](https://github.com/lobehub/lobe-chat/compare/v1.14.11...v1.14.12)
|
|
6
31
|
|
|
7
32
|
<sup>Released on **2024-08-30**</sup>
|
package/Dockerfile
CHANGED
package/Dockerfile.database
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.15.0",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -18,6 +18,7 @@ import {
|
|
|
18
18
|
StepfunProviderCard,
|
|
19
19
|
TaichuProviderCard,
|
|
20
20
|
TogetherAIProviderCard,
|
|
21
|
+
UpstageProviderCard,
|
|
21
22
|
ZeroOneProviderCard,
|
|
22
23
|
ZhiPuProviderCard,
|
|
23
24
|
} from '@/config/modelProviders';
|
|
@@ -59,6 +60,7 @@ export const useProviderList = (): ProviderItem[] => {
|
|
|
59
60
|
TaichuProviderCard,
|
|
60
61
|
Ai360ProviderCard,
|
|
61
62
|
SiliconCloudProviderCard,
|
|
63
|
+
UpstageProviderCard,
|
|
62
64
|
],
|
|
63
65
|
[AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider],
|
|
64
66
|
);
|
|
@@ -208,6 +208,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
|
208
208
|
|
|
209
209
|
return { apiKey, baseURL };
|
|
210
210
|
}
|
|
211
|
+
case ModelProvider.Upstage: {
|
|
212
|
+
const { UPSTAGE_API_KEY } = getLLMConfig();
|
|
213
|
+
|
|
214
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || UPSTAGE_API_KEY);
|
|
215
|
+
|
|
216
|
+
return { apiKey };
|
|
217
|
+
}
|
|
211
218
|
}
|
|
212
219
|
};
|
|
213
220
|
|
package/src/config/llm.ts
CHANGED
|
@@ -93,6 +93,9 @@ export const getLLMConfig = () => {
|
|
|
93
93
|
SILICONCLOUD_API_KEY: z.string().optional(),
|
|
94
94
|
SILICONCLOUD_MODEL_LIST: z.string().optional(),
|
|
95
95
|
SILICONCLOUD_PROXY_URL: z.string().optional(),
|
|
96
|
+
|
|
97
|
+
ENABLED_UPSTAGE: z.boolean(),
|
|
98
|
+
UPSTAGE_API_KEY: z.string().optional(),
|
|
96
99
|
},
|
|
97
100
|
runtimeEnv: {
|
|
98
101
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
|
@@ -183,6 +186,9 @@ export const getLLMConfig = () => {
|
|
|
183
186
|
SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
|
|
184
187
|
SILICONCLOUD_MODEL_LIST: process.env.SILICONCLOUD_MODEL_LIST,
|
|
185
188
|
SILICONCLOUD_PROXY_URL: process.env.SILICONCLOUD_PROXY_URL,
|
|
189
|
+
|
|
190
|
+
ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
|
|
191
|
+
UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
|
|
186
192
|
},
|
|
187
193
|
});
|
|
188
194
|
};
|
|
@@ -21,6 +21,7 @@ import SiliconCloudProvider from './siliconcloud';
|
|
|
21
21
|
import StepfunProvider from './stepfun';
|
|
22
22
|
import TaichuProvider from './taichu';
|
|
23
23
|
import TogetherAIProvider from './togetherai';
|
|
24
|
+
import UpstageProvider from './upstage';
|
|
24
25
|
import ZeroOneProvider from './zeroone';
|
|
25
26
|
import ZhiPuProvider from './zhipu';
|
|
26
27
|
|
|
@@ -47,6 +48,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
|
47
48
|
TaichuProvider.chatModels,
|
|
48
49
|
Ai360Provider.chatModels,
|
|
49
50
|
SiliconCloudProvider.chatModels,
|
|
51
|
+
UpstageProvider.chatModels,
|
|
50
52
|
].flat();
|
|
51
53
|
|
|
52
54
|
export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
@@ -73,6 +75,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
|
73
75
|
TaichuProvider,
|
|
74
76
|
Ai360Provider,
|
|
75
77
|
SiliconCloudProvider,
|
|
78
|
+
UpstageProvider,
|
|
76
79
|
];
|
|
77
80
|
|
|
78
81
|
export const filterEnabledModels = (provider: ModelProviderCard) => {
|
|
@@ -105,5 +108,6 @@ export { default as SiliconCloudProviderCard } from './siliconcloud';
|
|
|
105
108
|
export { default as StepfunProviderCard } from './stepfun';
|
|
106
109
|
export { default as TaichuProviderCard } from './taichu';
|
|
107
110
|
export { default as TogetherAIProviderCard } from './togetherai';
|
|
111
|
+
export { default as UpstageProviderCard } from './upstage';
|
|
108
112
|
export { default as ZeroOneProviderCard } from './zeroone';
|
|
109
113
|
export { default as ZhiPuProviderCard } from './zhipu';
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
|
2
|
+
|
|
3
|
+
// ref https://developers.upstage.ai/docs/getting-started/models
|
|
4
|
+
const Upstage: ModelProviderCard = {
|
|
5
|
+
chatModels: [
|
|
6
|
+
{
|
|
7
|
+
description: 'A compact LLM offering superior performance to GPT-3.5, with robust multilingual capabilities for both English and Korean, delivering high efficiency in a smaller package. solar-1-mini-chat is alias for our latest solar-1-mini-chat model. (Currently solar-1-mini-chat-240612)',
|
|
8
|
+
displayName: 'Solar 1 Mini Chat',
|
|
9
|
+
enabled: true,
|
|
10
|
+
functionCall: true,
|
|
11
|
+
id: 'solar-1-mini-chat',
|
|
12
|
+
tokens: 32_768,
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
description: 'A compact LLM that extends the capabilities of solar-mini-chat with specialization in Japanese, while maintaining high efficiency and performance in English and Korean. solar-1-mini-chat-ja is alias for our latest solar-1-mini-chat-ja model.(Currently solar-1-mini-chat-ja-240612)',
|
|
16
|
+
displayName: 'Solar 1 Mini Chat Ja',
|
|
17
|
+
enabled: true,
|
|
18
|
+
functionCall: false,
|
|
19
|
+
id: 'solar-1-mini-chat-ja',
|
|
20
|
+
tokens: 32_768,
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
description: 'English-to-Korean translation specialized model based on the solar-mini. Maximum context length is 32k tokens. solar-1-mini-translate-enko is alias for our latest solar-1-mini-translate-enko model. (Currently solar-1-mini-translate-enko-240507)',
|
|
24
|
+
displayName: 'Solar 1 Mini Translate EnKo',
|
|
25
|
+
enabled: false,
|
|
26
|
+
functionCall: false,
|
|
27
|
+
id: 'solar-1-mini-translate-enko',
|
|
28
|
+
tokens: 32_768,
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
description: 'Korean-to-English translation specialized model based on the solar-mini. Maximum context length is 32k tokens. solar-1-mini-translate-koen is alias for our latest solar-1-mini-translate-koen model. (Currently solar-1-mini-translate-koen-240507)',
|
|
32
|
+
displayName: 'Solar 1 Mini Translate KoEn',
|
|
33
|
+
enabled: false,
|
|
34
|
+
functionCall: false,
|
|
35
|
+
id: 'solar-1-mini-translate-koen',
|
|
36
|
+
tokens: 32_768,
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
checkModel: 'solar-1-mini-chat',
|
|
40
|
+
id: 'upstage',
|
|
41
|
+
modelList: { showModelFetcher: true },
|
|
42
|
+
name: 'Upstage',
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
export default Upstage;
|
|
@@ -19,6 +19,7 @@ import {
|
|
|
19
19
|
StepfunProviderCard,
|
|
20
20
|
TaichuProviderCard,
|
|
21
21
|
TogetherAIProviderCard,
|
|
22
|
+
UpstageProviderCard,
|
|
22
23
|
ZeroOneProviderCard,
|
|
23
24
|
ZhiPuProviderCard,
|
|
24
25
|
filterEnabledModels,
|
|
@@ -111,6 +112,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
|
111
112
|
enabled: false,
|
|
112
113
|
enabledModels: filterEnabledModels(TogetherAIProviderCard),
|
|
113
114
|
},
|
|
115
|
+
upstage: {
|
|
116
|
+
enabled: false,
|
|
117
|
+
enabledModels: filterEnabledModels(UpstageProviderCard),
|
|
118
|
+
},
|
|
114
119
|
zeroone: {
|
|
115
120
|
enabled: false,
|
|
116
121
|
enabledModels: filterEnabledModels(ZeroOneProviderCard),
|
|
@@ -16,6 +16,7 @@ import {
|
|
|
16
16
|
Stepfun,
|
|
17
17
|
Together,
|
|
18
18
|
Tongyi,
|
|
19
|
+
Upstage,
|
|
19
20
|
ZeroOne,
|
|
20
21
|
Zhipu,
|
|
21
22
|
} from '@lobehub/icons';
|
|
@@ -103,6 +104,10 @@ const ProviderAvatar = memo<ProviderAvatarProps>(({ provider }) => {
|
|
|
103
104
|
return <Ai360 color={Ai360.colorPrimary} size={56} />;
|
|
104
105
|
}
|
|
105
106
|
|
|
107
|
+
case ModelProvider.Upstage: {
|
|
108
|
+
return <Upstage color={Upstage.colorPrimary} size={56} />;
|
|
109
|
+
}
|
|
110
|
+
|
|
106
111
|
default:
|
|
107
112
|
case ModelProvider.OpenAI: {
|
|
108
113
|
return <OpenAI color={theme.colorText} size={64} />;
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { DocRenderer } from '@cyntler/react-doc-viewer';
|
|
2
|
+
import { Center } from 'react-layout-kit';
|
|
3
|
+
|
|
4
|
+
const ImageRenderer: DocRenderer = ({ mainState: { currentDocument } }) => {
|
|
5
|
+
const { uri, fileName } = currentDocument || {};
|
|
6
|
+
|
|
7
|
+
return (
|
|
8
|
+
<Center height={'100%'} width={'100%'}>
|
|
9
|
+
<img
|
|
10
|
+
alt={fileName}
|
|
11
|
+
height={'100%'}
|
|
12
|
+
src={uri}
|
|
13
|
+
style={{ objectFit: 'contain', overflow: 'hidden' }}
|
|
14
|
+
width={'100%'}
|
|
15
|
+
/>
|
|
16
|
+
</Center>
|
|
17
|
+
);
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export default ImageRenderer;
|
|
21
|
+
|
|
22
|
+
ImageRenderer.fileTypes = [
|
|
23
|
+
'jpg',
|
|
24
|
+
'jpeg',
|
|
25
|
+
'image/jpg',
|
|
26
|
+
'image/jpeg',
|
|
27
|
+
'png',
|
|
28
|
+
'image/png',
|
|
29
|
+
'webp',
|
|
30
|
+
'image/webp',
|
|
31
|
+
'gif',
|
|
32
|
+
'image/gif',
|
|
33
|
+
'bmp',
|
|
34
|
+
'image/bmp',
|
|
35
|
+
];
|
|
36
|
+
ImageRenderer.weight = 0;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
'use client';
|
|
2
2
|
|
|
3
3
|
import DocViewer from '@cyntler/react-doc-viewer';
|
|
4
|
-
import {
|
|
4
|
+
import { css, cx } from 'antd-style';
|
|
5
5
|
import { CSSProperties, memo } from 'react';
|
|
6
6
|
|
|
7
7
|
import { FileListItem } from '@/types/files';
|
|
@@ -10,12 +10,13 @@ import NotSupport from './NotSupport';
|
|
|
10
10
|
import { FileViewRenderers } from './Renderer';
|
|
11
11
|
import PDFRenderer from './Renderer/PDF';
|
|
12
12
|
|
|
13
|
-
const
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
}
|
|
13
|
+
const container = css`
|
|
14
|
+
background: transparent !important;
|
|
15
|
+
|
|
16
|
+
#proxy-renderer {
|
|
17
|
+
height: 100%;
|
|
18
|
+
}
|
|
19
|
+
`;
|
|
19
20
|
|
|
20
21
|
interface FileViewerProps extends FileListItem {
|
|
21
22
|
className?: string;
|
|
@@ -23,14 +24,13 @@ interface FileViewerProps extends FileListItem {
|
|
|
23
24
|
}
|
|
24
25
|
|
|
25
26
|
const FileViewer = memo<FileViewerProps>(({ id, style, fileType, url, name }) => {
|
|
26
|
-
const { styles } = useStyles();
|
|
27
27
|
if (fileType === 'pdf' || name.endsWith('.pdf')) {
|
|
28
28
|
return <PDFRenderer fileId={id} url={url} />;
|
|
29
29
|
}
|
|
30
30
|
|
|
31
31
|
return (
|
|
32
32
|
<DocViewer
|
|
33
|
-
className={
|
|
33
|
+
className={cx(container)}
|
|
34
34
|
config={{
|
|
35
35
|
header: { disableHeader: true },
|
|
36
36
|
noRenderer: { overrideComponent: NotSupport },
|
|
@@ -24,6 +24,7 @@ import { LobeSiliconCloudAI } from './siliconcloud';
|
|
|
24
24
|
import { LobeStepfunAI } from './stepfun';
|
|
25
25
|
import { LobeTaichuAI } from './taichu';
|
|
26
26
|
import { LobeTogetherAI } from './togetherai';
|
|
27
|
+
import { LobeUpstageAI } from './upstage';
|
|
27
28
|
import {
|
|
28
29
|
ChatCompetitionOptions,
|
|
29
30
|
ChatStreamPayload,
|
|
@@ -134,6 +135,7 @@ class AgentRuntime {
|
|
|
134
135
|
stepfun: Partial<ClientOptions>;
|
|
135
136
|
taichu: Partial<ClientOptions>;
|
|
136
137
|
togetherai: Partial<ClientOptions>;
|
|
138
|
+
upstage: Partial<ClientOptions>;
|
|
137
139
|
zeroone: Partial<ClientOptions>;
|
|
138
140
|
zhipu: Partial<ClientOptions>;
|
|
139
141
|
}>,
|
|
@@ -261,6 +263,11 @@ class AgentRuntime {
|
|
|
261
263
|
runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
|
|
262
264
|
break;
|
|
263
265
|
}
|
|
266
|
+
|
|
267
|
+
case ModelProvider.Upstage: {
|
|
268
|
+
runtimeModel = new LobeUpstageAI(params.upstage);
|
|
269
|
+
break
|
|
270
|
+
}
|
|
264
271
|
}
|
|
265
272
|
|
|
266
273
|
return new AgentRuntime(runtimeModel);
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import {
|
|
6
|
+
ChatStreamCallbacks,
|
|
7
|
+
LobeOpenAICompatibleRuntime,
|
|
8
|
+
ModelProvider,
|
|
9
|
+
} from '@/libs/agent-runtime';
|
|
10
|
+
|
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
|
12
|
+
import { LobeUpstageAI } from './index';
|
|
13
|
+
|
|
14
|
+
const provider = ModelProvider.Upstage;
|
|
15
|
+
const defaultBaseURL = 'https://api.upstage.ai/v1/solar';
|
|
16
|
+
|
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
|
19
|
+
|
|
20
|
+
// Mock the console.error to avoid polluting test output
|
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
22
|
+
|
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
|
24
|
+
|
|
25
|
+
beforeEach(() => {
|
|
26
|
+
instance = new LobeUpstageAI({ apiKey: 'test' });
|
|
27
|
+
|
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
30
|
+
new ReadableStream() as any,
|
|
31
|
+
);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
afterEach(() => {
|
|
35
|
+
vi.clearAllMocks();
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
describe('LobeUpstageAI', () => {
|
|
39
|
+
describe('init', () => {
|
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
|
41
|
+
const instance = new LobeUpstageAI({ apiKey: 'test_api_key' });
|
|
42
|
+
expect(instance).toBeInstanceOf(LobeUpstageAI);
|
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
describe('chat', () => {
|
|
48
|
+
describe('Error', () => {
|
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
|
50
|
+
// Arrange
|
|
51
|
+
const apiError = new OpenAI.APIError(
|
|
52
|
+
400,
|
|
53
|
+
{
|
|
54
|
+
status: 400,
|
|
55
|
+
error: {
|
|
56
|
+
message: 'Bad Request',
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
'Error message',
|
|
60
|
+
{},
|
|
61
|
+
);
|
|
62
|
+
|
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
64
|
+
|
|
65
|
+
// Act
|
|
66
|
+
try {
|
|
67
|
+
await instance.chat({
|
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
69
|
+
model: 'solar-1-mini-chat',
|
|
70
|
+
temperature: 0,
|
|
71
|
+
});
|
|
72
|
+
} catch (e) {
|
|
73
|
+
expect(e).toEqual({
|
|
74
|
+
endpoint: defaultBaseURL,
|
|
75
|
+
error: {
|
|
76
|
+
error: { message: 'Bad Request' },
|
|
77
|
+
status: 400,
|
|
78
|
+
},
|
|
79
|
+
errorType: bizErrorType,
|
|
80
|
+
provider,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
|
86
|
+
try {
|
|
87
|
+
new LobeUpstageAI({});
|
|
88
|
+
} catch (e) {
|
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
|
94
|
+
// Arrange
|
|
95
|
+
const errorInfo = {
|
|
96
|
+
stack: 'abc',
|
|
97
|
+
cause: {
|
|
98
|
+
message: 'api is undefined',
|
|
99
|
+
},
|
|
100
|
+
};
|
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
102
|
+
|
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
104
|
+
|
|
105
|
+
// Act
|
|
106
|
+
try {
|
|
107
|
+
await instance.chat({
|
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
109
|
+
model: 'solar-1-mini-chat',
|
|
110
|
+
temperature: 0,
|
|
111
|
+
});
|
|
112
|
+
} catch (e) {
|
|
113
|
+
expect(e).toEqual({
|
|
114
|
+
endpoint: defaultBaseURL,
|
|
115
|
+
error: {
|
|
116
|
+
cause: { message: 'api is undefined' },
|
|
117
|
+
stack: 'abc',
|
|
118
|
+
},
|
|
119
|
+
errorType: bizErrorType,
|
|
120
|
+
provider,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
|
126
|
+
// Arrange
|
|
127
|
+
const errorInfo = {
|
|
128
|
+
stack: 'abc',
|
|
129
|
+
cause: { message: 'api is undefined' },
|
|
130
|
+
};
|
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
|
132
|
+
|
|
133
|
+
instance = new LobeUpstageAI({
|
|
134
|
+
apiKey: 'test',
|
|
135
|
+
|
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
140
|
+
|
|
141
|
+
// Act
|
|
142
|
+
try {
|
|
143
|
+
await instance.chat({
|
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
145
|
+
model: 'solar-1-mini-chat',
|
|
146
|
+
temperature: 0,
|
|
147
|
+
});
|
|
148
|
+
} catch (e) {
|
|
149
|
+
expect(e).toEqual({
|
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
|
151
|
+
error: {
|
|
152
|
+
cause: { message: 'api is undefined' },
|
|
153
|
+
stack: 'abc',
|
|
154
|
+
},
|
|
155
|
+
errorType: bizErrorType,
|
|
156
|
+
provider,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
it('should throw an InvalidUpstageAPIKey error type on 401 status code', async () => {
|
|
162
|
+
// Mock the API call to simulate a 401 error
|
|
163
|
+
const error = new Error('Unauthorized') as any;
|
|
164
|
+
error.status = 401;
|
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
|
166
|
+
|
|
167
|
+
try {
|
|
168
|
+
await instance.chat({
|
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
170
|
+
model: 'solar-1-mini-chat',
|
|
171
|
+
temperature: 0,
|
|
172
|
+
});
|
|
173
|
+
} catch (e) {
|
|
174
|
+
// Expect the chat method to throw an error with InvalidUpstageAPIKey
|
|
175
|
+
expect(e).toEqual({
|
|
176
|
+
endpoint: defaultBaseURL,
|
|
177
|
+
error: new Error('Unauthorized'),
|
|
178
|
+
errorType: invalidErrorType,
|
|
179
|
+
provider,
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
|
185
|
+
// Arrange
|
|
186
|
+
const genericError = new Error('Generic Error');
|
|
187
|
+
|
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
|
189
|
+
|
|
190
|
+
// Act
|
|
191
|
+
try {
|
|
192
|
+
await instance.chat({
|
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
194
|
+
model: 'solar-1-mini-chat',
|
|
195
|
+
temperature: 0,
|
|
196
|
+
});
|
|
197
|
+
} catch (e) {
|
|
198
|
+
expect(e).toEqual({
|
|
199
|
+
endpoint: defaultBaseURL,
|
|
200
|
+
errorType: 'AgentRuntimeError',
|
|
201
|
+
provider,
|
|
202
|
+
error: {
|
|
203
|
+
name: genericError.name,
|
|
204
|
+
cause: genericError.cause,
|
|
205
|
+
message: genericError.message,
|
|
206
|
+
stack: genericError.stack,
|
|
207
|
+
},
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
});
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
describe('DEBUG', () => {
|
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_UPSTAGE_CHAT_COMPLETION is 1', async () => {
|
|
215
|
+
// Arrange
|
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
|
217
|
+
const mockDebugStream = new ReadableStream({
|
|
218
|
+
start(controller) {
|
|
219
|
+
controller.enqueue('Debug stream content');
|
|
220
|
+
controller.close();
|
|
221
|
+
},
|
|
222
|
+
}) as any;
|
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
|
224
|
+
|
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
// 保存原始环境变量值
|
|
231
|
+
const originalDebugValue = process.env.DEBUG_UPSTAGE_CHAT_COMPLETION;
|
|
232
|
+
|
|
233
|
+
// 模拟环境变量
|
|
234
|
+
process.env.DEBUG_UPSTAGE_CHAT_COMPLETION = '1';
|
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
|
236
|
+
|
|
237
|
+
// 执行测试
|
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
|
240
|
+
await instance.chat({
|
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
242
|
+
model: 'solar-1-mini-chat',
|
|
243
|
+
stream: true,
|
|
244
|
+
temperature: 0,
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
// 验证 debugStream 被调用
|
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
|
249
|
+
|
|
250
|
+
// 恢复原始环境变量值
|
|
251
|
+
process.env.DEBUG_UPSTAGE_CHAT_COMPLETION = originalDebugValue;
|
|
252
|
+
});
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
});
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { ModelProvider } from '../types';
|
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
|
3
|
+
|
|
4
|
+
export const LobeUpstageAI = LobeOpenAICompatibleFactory({
|
|
5
|
+
baseURL: 'https://api.upstage.ai/v1/solar',
|
|
6
|
+
debug: {
|
|
7
|
+
chatCompletion: () => process.env.DEBUG_UPSTAGE_CHAT_COMPLETION === '1',
|
|
8
|
+
},
|
|
9
|
+
provider: ModelProvider.Upstage,
|
|
10
|
+
});
|
|
@@ -47,6 +47,8 @@ export const getServerGlobalConfig = () => {
|
|
|
47
47
|
ENABLED_SILICONCLOUD,
|
|
48
48
|
SILICONCLOUD_MODEL_LIST,
|
|
49
49
|
|
|
50
|
+
ENABLED_UPSTAGE,
|
|
51
|
+
|
|
50
52
|
ENABLED_AZURE_OPENAI,
|
|
51
53
|
AZURE_MODEL_LIST,
|
|
52
54
|
|
|
@@ -138,6 +140,7 @@ export const getServerGlobalConfig = () => {
|
|
|
138
140
|
modelString: TOGETHERAI_MODEL_LIST,
|
|
139
141
|
}),
|
|
140
142
|
},
|
|
143
|
+
upstage: { enabled: ENABLED_UPSTAGE },
|
|
141
144
|
zeroone: { enabled: ENABLED_ZEROONE },
|
|
142
145
|
zhipu: {
|
|
143
146
|
enabled: ENABLED_ZHIPU,
|
|
@@ -39,6 +39,7 @@ export interface UserKeyVaults {
|
|
|
39
39
|
stepfun?: OpenAICompatibleKeyVault;
|
|
40
40
|
taichu?: OpenAICompatibleKeyVault;
|
|
41
41
|
togetherai?: OpenAICompatibleKeyVault;
|
|
42
|
+
upstage?: OpenAICompatibleKeyVault;
|
|
42
43
|
zeroone?: OpenAICompatibleKeyVault;
|
|
43
44
|
zhipu?: OpenAICompatibleKeyVault;
|
|
44
45
|
}
|