@lobehub/chat 1.15.34 → 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @lobehub/chat might be problematic. Click here for more details.
- package/CHANGELOG.md +51 -0
- package/Dockerfile +4 -0
- package/Dockerfile.database +4 -0
- package/package.json +1 -1
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +4 -0
- package/src/app/api/chat/agentRuntime.ts +14 -0
- package/src/components/Branding/CustomLogo.tsx +97 -44
- package/src/components/Branding/index.tsx +3 -10
- package/src/config/llm.ts +14 -0
- package/src/config/modelProviders/fireworksai.ts +143 -0
- package/src/config/modelProviders/index.ts +8 -0
- package/src/config/modelProviders/spark.ts +59 -0
- package/src/const/settings/llm.ts +10 -0
- package/src/features/Conversation/Error/APIKeyForm/index.tsx +2 -2
- package/src/libs/agent-runtime/AgentRuntime.ts +14 -0
- package/src/libs/agent-runtime/fireworksai/index.test.ts +255 -0
- package/src/libs/agent-runtime/fireworksai/index.ts +10 -0
- package/src/libs/agent-runtime/spark/index.test.ts +255 -0
- package/src/libs/agent-runtime/spark/index.ts +13 -0
- package/src/libs/agent-runtime/types/type.ts +2 -0
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/openai.ts +4 -1
- package/src/server/globalConfig/index.ts +18 -1
- package/src/types/user/settings/keyVaults.ts +2 -0
- package/src/features/Conversation/Error/APIKeyForm/ProviderAvatar.tsx +0 -118
@@ -4,6 +4,7 @@ import {
|
|
4
4
|
BaichuanProviderCard,
|
5
5
|
BedrockProviderCard,
|
6
6
|
DeepSeekProviderCard,
|
7
|
+
FireworksAIProviderCard,
|
7
8
|
GoogleProviderCard,
|
8
9
|
GroqProviderCard,
|
9
10
|
MinimaxProviderCard,
|
@@ -16,6 +17,7 @@ import {
|
|
16
17
|
PerplexityProviderCard,
|
17
18
|
QwenProviderCard,
|
18
19
|
SiliconCloudProviderCard,
|
20
|
+
SparkProviderCard,
|
19
21
|
StepfunProviderCard,
|
20
22
|
TaichuProviderCard,
|
21
23
|
TogetherAIProviderCard,
|
@@ -51,6 +53,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
51
53
|
enabled: false,
|
52
54
|
enabledModels: filterEnabledModels(DeepSeekProviderCard),
|
53
55
|
},
|
56
|
+
fireworksai: {
|
57
|
+
enabled: false,
|
58
|
+
enabledModels: filterEnabledModels(FireworksAIProviderCard),
|
59
|
+
},
|
54
60
|
google: {
|
55
61
|
enabled: false,
|
56
62
|
enabledModels: filterEnabledModels(GoogleProviderCard),
|
@@ -100,6 +106,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
100
106
|
enabled: false,
|
101
107
|
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
|
102
108
|
},
|
109
|
+
spark: {
|
110
|
+
enabled: false,
|
111
|
+
enabledModels: filterEnabledModels(SparkProviderCard),
|
112
|
+
},
|
103
113
|
stepfun: {
|
104
114
|
enabled: false,
|
105
115
|
enabledModels: filterEnabledModels(StepfunProviderCard),
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import { ProviderIcon } from '@lobehub/icons';
|
1
2
|
import { Button } from 'antd';
|
2
3
|
import { memo, useMemo } from 'react';
|
3
4
|
import { useTranslation } from 'react-i18next';
|
@@ -9,7 +10,6 @@ import { GlobalLLMProviderKey } from '@/types/user/settings';
|
|
9
10
|
|
10
11
|
import BedrockForm from './Bedrock';
|
11
12
|
import ProviderApiKeyForm from './ProviderApiKeyForm';
|
12
|
-
import ProviderAvatar from './ProviderAvatar';
|
13
13
|
|
14
14
|
interface APIKeyFormProps {
|
15
15
|
id: string;
|
@@ -64,7 +64,7 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
|
|
64
64
|
) : (
|
65
65
|
<ProviderApiKeyForm
|
66
66
|
apiKeyPlaceholder={apiKeyPlaceholder}
|
67
|
-
avatar={<
|
67
|
+
avatar={<ProviderIcon provider={provider} size={80} type={'avatar'} />}
|
68
68
|
provider={provider as GlobalLLMProviderKey}
|
69
69
|
showEndpoint={provider === ModelProvider.OpenAI}
|
70
70
|
/>
|
@@ -9,6 +9,7 @@ import { LobeAzureOpenAI } from './azureOpenai';
|
|
9
9
|
import { LobeBaichuanAI } from './baichuan';
|
10
10
|
import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
|
11
11
|
import { LobeDeepSeekAI } from './deepseek';
|
12
|
+
import { LobeFireworksAI } from './fireworksai';
|
12
13
|
import { LobeGoogleAI } from './google';
|
13
14
|
import { LobeGroq } from './groq';
|
14
15
|
import { LobeMinimaxAI } from './minimax';
|
@@ -21,6 +22,7 @@ import { LobeOpenRouterAI } from './openrouter';
|
|
21
22
|
import { LobePerplexityAI } from './perplexity';
|
22
23
|
import { LobeQwenAI } from './qwen';
|
23
24
|
import { LobeSiliconCloudAI } from './siliconcloud';
|
25
|
+
import { LobeSparkAI } from './spark';
|
24
26
|
import { LobeStepfunAI } from './stepfun';
|
25
27
|
import { LobeTaichuAI } from './taichu';
|
26
28
|
import { LobeTogetherAI } from './togetherai';
|
@@ -120,6 +122,7 @@ class AgentRuntime {
|
|
120
122
|
baichuan: Partial<ClientOptions>;
|
121
123
|
bedrock: Partial<LobeBedrockAIParams>;
|
122
124
|
deepseek: Partial<ClientOptions>;
|
125
|
+
fireworksai: Partial<ClientOptions>;
|
123
126
|
google: { apiKey?: string; baseURL?: string };
|
124
127
|
groq: Partial<ClientOptions>;
|
125
128
|
minimax: Partial<ClientOptions>;
|
@@ -132,6 +135,7 @@ class AgentRuntime {
|
|
132
135
|
perplexity: Partial<ClientOptions>;
|
133
136
|
qwen: Partial<ClientOptions>;
|
134
137
|
siliconcloud: Partial<ClientOptions>;
|
138
|
+
spark: Partial<ClientOptions>;
|
135
139
|
stepfun: Partial<ClientOptions>;
|
136
140
|
taichu: Partial<ClientOptions>;
|
137
141
|
togetherai: Partial<ClientOptions>;
|
@@ -224,6 +228,11 @@ class AgentRuntime {
|
|
224
228
|
break;
|
225
229
|
}
|
226
230
|
|
231
|
+
case ModelProvider.FireworksAI: {
|
232
|
+
runtimeModel = new LobeFireworksAI(params.fireworksai);
|
233
|
+
break
|
234
|
+
}
|
235
|
+
|
227
236
|
case ModelProvider.ZeroOne: {
|
228
237
|
runtimeModel = new LobeZeroOneAI(params.zeroone);
|
229
238
|
break;
|
@@ -268,6 +277,11 @@ class AgentRuntime {
|
|
268
277
|
runtimeModel = new LobeUpstageAI(params.upstage);
|
269
278
|
break
|
270
279
|
}
|
280
|
+
|
281
|
+
case ModelProvider.Spark: {
|
282
|
+
runtimeModel = new LobeSparkAI(params.spark);
|
283
|
+
break
|
284
|
+
}
|
271
285
|
}
|
272
286
|
|
273
287
|
return new AgentRuntime(runtimeModel);
|
@@ -0,0 +1,255 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import OpenAI from 'openai';
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import {
|
6
|
+
ChatStreamCallbacks,
|
7
|
+
LobeOpenAICompatibleRuntime,
|
8
|
+
ModelProvider,
|
9
|
+
} from '@/libs/agent-runtime';
|
10
|
+
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
12
|
+
import { LobeFireworksAI } from './index';
|
13
|
+
|
14
|
+
const provider = ModelProvider.FireworksAI;
|
15
|
+
const defaultBaseURL = 'https://api.fireworks.ai/inference/v1';
|
16
|
+
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
19
|
+
|
20
|
+
// Mock the console.error to avoid polluting test output
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
+
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
24
|
+
|
25
|
+
beforeEach(() => {
|
26
|
+
instance = new LobeFireworksAI({ apiKey: 'test' });
|
27
|
+
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
+
new ReadableStream() as any,
|
31
|
+
);
|
32
|
+
});
|
33
|
+
|
34
|
+
afterEach(() => {
|
35
|
+
vi.clearAllMocks();
|
36
|
+
});
|
37
|
+
|
38
|
+
describe('LobeFireworksAI', () => {
|
39
|
+
describe('init', () => {
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
41
|
+
const instance = new LobeFireworksAI({ apiKey: 'test_api_key' });
|
42
|
+
expect(instance).toBeInstanceOf(LobeFireworksAI);
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
+
});
|
45
|
+
});
|
46
|
+
|
47
|
+
describe('chat', () => {
|
48
|
+
describe('Error', () => {
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
+
// Arrange
|
51
|
+
const apiError = new OpenAI.APIError(
|
52
|
+
400,
|
53
|
+
{
|
54
|
+
status: 400,
|
55
|
+
error: {
|
56
|
+
message: 'Bad Request',
|
57
|
+
},
|
58
|
+
},
|
59
|
+
'Error message',
|
60
|
+
{},
|
61
|
+
);
|
62
|
+
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
+
|
65
|
+
// Act
|
66
|
+
try {
|
67
|
+
await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
} catch (e) {
|
73
|
+
expect(e).toEqual({
|
74
|
+
endpoint: defaultBaseURL,
|
75
|
+
error: {
|
76
|
+
error: { message: 'Bad Request' },
|
77
|
+
status: 400,
|
78
|
+
},
|
79
|
+
errorType: bizErrorType,
|
80
|
+
provider,
|
81
|
+
});
|
82
|
+
}
|
83
|
+
});
|
84
|
+
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
+
try {
|
87
|
+
new LobeFireworksAI({});
|
88
|
+
} catch (e) {
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
+
}
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
+
// Arrange
|
95
|
+
const errorInfo = {
|
96
|
+
stack: 'abc',
|
97
|
+
cause: {
|
98
|
+
message: 'api is undefined',
|
99
|
+
},
|
100
|
+
};
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
+
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
+
|
105
|
+
// Act
|
106
|
+
try {
|
107
|
+
await instance.chat({
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
110
|
+
temperature: 0,
|
111
|
+
});
|
112
|
+
} catch (e) {
|
113
|
+
expect(e).toEqual({
|
114
|
+
endpoint: defaultBaseURL,
|
115
|
+
error: {
|
116
|
+
cause: { message: 'api is undefined' },
|
117
|
+
stack: 'abc',
|
118
|
+
},
|
119
|
+
errorType: bizErrorType,
|
120
|
+
provider,
|
121
|
+
});
|
122
|
+
}
|
123
|
+
});
|
124
|
+
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
+
// Arrange
|
127
|
+
const errorInfo = {
|
128
|
+
stack: 'abc',
|
129
|
+
cause: { message: 'api is undefined' },
|
130
|
+
};
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
+
|
133
|
+
instance = new LobeFireworksAI({
|
134
|
+
apiKey: 'test',
|
135
|
+
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
137
|
+
});
|
138
|
+
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
+
|
141
|
+
// Act
|
142
|
+
try {
|
143
|
+
await instance.chat({
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
146
|
+
temperature: 0,
|
147
|
+
});
|
148
|
+
} catch (e) {
|
149
|
+
expect(e).toEqual({
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
151
|
+
error: {
|
152
|
+
cause: { message: 'api is undefined' },
|
153
|
+
stack: 'abc',
|
154
|
+
},
|
155
|
+
errorType: bizErrorType,
|
156
|
+
provider,
|
157
|
+
});
|
158
|
+
}
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should throw an InvalidFireworksAIAPIKey error type on 401 status code', async () => {
|
162
|
+
// Mock the API call to simulate a 401 error
|
163
|
+
const error = new Error('Unauthorized') as any;
|
164
|
+
error.status = 401;
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
+
|
167
|
+
try {
|
168
|
+
await instance.chat({
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
171
|
+
temperature: 0,
|
172
|
+
});
|
173
|
+
} catch (e) {
|
174
|
+
// Expect the chat method to throw an error with InvalidFireworksAIAPIKey
|
175
|
+
expect(e).toEqual({
|
176
|
+
endpoint: defaultBaseURL,
|
177
|
+
error: new Error('Unauthorized'),
|
178
|
+
errorType: invalidErrorType,
|
179
|
+
provider,
|
180
|
+
});
|
181
|
+
}
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
+
// Arrange
|
186
|
+
const genericError = new Error('Generic Error');
|
187
|
+
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
+
|
190
|
+
// Act
|
191
|
+
try {
|
192
|
+
await instance.chat({
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
195
|
+
temperature: 0,
|
196
|
+
});
|
197
|
+
} catch (e) {
|
198
|
+
expect(e).toEqual({
|
199
|
+
endpoint: defaultBaseURL,
|
200
|
+
errorType: 'AgentRuntimeError',
|
201
|
+
provider,
|
202
|
+
error: {
|
203
|
+
name: genericError.name,
|
204
|
+
cause: genericError.cause,
|
205
|
+
message: genericError.message,
|
206
|
+
stack: genericError.stack,
|
207
|
+
},
|
208
|
+
});
|
209
|
+
}
|
210
|
+
});
|
211
|
+
});
|
212
|
+
|
213
|
+
describe('DEBUG', () => {
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_FIREWORKSAI_CHAT_COMPLETION is 1', async () => {
|
215
|
+
// Arrange
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
+
const mockDebugStream = new ReadableStream({
|
218
|
+
start(controller) {
|
219
|
+
controller.enqueue('Debug stream content');
|
220
|
+
controller.close();
|
221
|
+
},
|
222
|
+
}) as any;
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
+
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
+
});
|
229
|
+
|
230
|
+
// 保存原始环境变量值
|
231
|
+
const originalDebugValue = process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION;
|
232
|
+
|
233
|
+
// 模拟环境变量
|
234
|
+
process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = '1';
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
+
|
237
|
+
// 执行测试
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
+
await instance.chat({
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
243
|
+
stream: true,
|
244
|
+
temperature: 0,
|
245
|
+
});
|
246
|
+
|
247
|
+
// 验证 debugStream 被调用
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
+
|
250
|
+
// 恢复原始环境变量值
|
251
|
+
process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = originalDebugValue;
|
252
|
+
});
|
253
|
+
});
|
254
|
+
});
|
255
|
+
});
|
@@ -0,0 +1,10 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
export const LobeFireworksAI = LobeOpenAICompatibleFactory({
|
5
|
+
baseURL: 'https://api.fireworks.ai/inference/v1',
|
6
|
+
debug: {
|
7
|
+
chatCompletion: () => process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION === '1',
|
8
|
+
},
|
9
|
+
provider: ModelProvider.FireworksAI,
|
10
|
+
});
|
@@ -0,0 +1,255 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import OpenAI from 'openai';
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import {
|
6
|
+
ChatStreamCallbacks,
|
7
|
+
LobeOpenAICompatibleRuntime,
|
8
|
+
ModelProvider,
|
9
|
+
} from '@/libs/agent-runtime';
|
10
|
+
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
12
|
+
import { LobeSparkAI } from './index';
|
13
|
+
|
14
|
+
const provider = ModelProvider.Spark;
|
15
|
+
const defaultBaseURL = 'https://spark-api-open.xf-yun.com/v1';
|
16
|
+
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
19
|
+
|
20
|
+
// Mock the console.error to avoid polluting test output
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
+
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
24
|
+
|
25
|
+
beforeEach(() => {
|
26
|
+
instance = new LobeSparkAI({ apiKey: 'test' });
|
27
|
+
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
+
new ReadableStream() as any,
|
31
|
+
);
|
32
|
+
});
|
33
|
+
|
34
|
+
afterEach(() => {
|
35
|
+
vi.clearAllMocks();
|
36
|
+
});
|
37
|
+
|
38
|
+
describe('LobeSparkAI', () => {
|
39
|
+
describe('init', () => {
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
41
|
+
const instance = new LobeSparkAI({ apiKey: 'test_api_key' });
|
42
|
+
expect(instance).toBeInstanceOf(LobeSparkAI);
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
+
});
|
45
|
+
});
|
46
|
+
|
47
|
+
describe('chat', () => {
|
48
|
+
describe('Error', () => {
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
+
// Arrange
|
51
|
+
const apiError = new OpenAI.APIError(
|
52
|
+
400,
|
53
|
+
{
|
54
|
+
status: 400,
|
55
|
+
error: {
|
56
|
+
message: 'Bad Request',
|
57
|
+
},
|
58
|
+
},
|
59
|
+
'Error message',
|
60
|
+
{},
|
61
|
+
);
|
62
|
+
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
+
|
65
|
+
// Act
|
66
|
+
try {
|
67
|
+
await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: 'general',
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
} catch (e) {
|
73
|
+
expect(e).toEqual({
|
74
|
+
endpoint: defaultBaseURL,
|
75
|
+
error: {
|
76
|
+
error: { message: 'Bad Request' },
|
77
|
+
status: 400,
|
78
|
+
},
|
79
|
+
errorType: bizErrorType,
|
80
|
+
provider,
|
81
|
+
});
|
82
|
+
}
|
83
|
+
});
|
84
|
+
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
+
try {
|
87
|
+
new LobeSparkAI({});
|
88
|
+
} catch (e) {
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
+
}
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
+
// Arrange
|
95
|
+
const errorInfo = {
|
96
|
+
stack: 'abc',
|
97
|
+
cause: {
|
98
|
+
message: 'api is undefined',
|
99
|
+
},
|
100
|
+
};
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
+
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
+
|
105
|
+
// Act
|
106
|
+
try {
|
107
|
+
await instance.chat({
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
+
model: 'general',
|
110
|
+
temperature: 0,
|
111
|
+
});
|
112
|
+
} catch (e) {
|
113
|
+
expect(e).toEqual({
|
114
|
+
endpoint: defaultBaseURL,
|
115
|
+
error: {
|
116
|
+
cause: { message: 'api is undefined' },
|
117
|
+
stack: 'abc',
|
118
|
+
},
|
119
|
+
errorType: bizErrorType,
|
120
|
+
provider,
|
121
|
+
});
|
122
|
+
}
|
123
|
+
});
|
124
|
+
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
+
// Arrange
|
127
|
+
const errorInfo = {
|
128
|
+
stack: 'abc',
|
129
|
+
cause: { message: 'api is undefined' },
|
130
|
+
};
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
+
|
133
|
+
instance = new LobeSparkAI({
|
134
|
+
apiKey: 'test',
|
135
|
+
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
137
|
+
});
|
138
|
+
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
+
|
141
|
+
// Act
|
142
|
+
try {
|
143
|
+
await instance.chat({
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
+
model: 'general',
|
146
|
+
temperature: 0,
|
147
|
+
});
|
148
|
+
} catch (e) {
|
149
|
+
expect(e).toEqual({
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
151
|
+
error: {
|
152
|
+
cause: { message: 'api is undefined' },
|
153
|
+
stack: 'abc',
|
154
|
+
},
|
155
|
+
errorType: bizErrorType,
|
156
|
+
provider,
|
157
|
+
});
|
158
|
+
}
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should throw an InvalidSparkAPIKey error type on 401 status code', async () => {
|
162
|
+
// Mock the API call to simulate a 401 error
|
163
|
+
const error = new Error('Unauthorized') as any;
|
164
|
+
error.status = 401;
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
+
|
167
|
+
try {
|
168
|
+
await instance.chat({
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
+
model: 'general',
|
171
|
+
temperature: 0,
|
172
|
+
});
|
173
|
+
} catch (e) {
|
174
|
+
// Expect the chat method to throw an error with InvalidSparkAPIKey
|
175
|
+
expect(e).toEqual({
|
176
|
+
endpoint: defaultBaseURL,
|
177
|
+
error: new Error('Unauthorized'),
|
178
|
+
errorType: invalidErrorType,
|
179
|
+
provider,
|
180
|
+
});
|
181
|
+
}
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
+
// Arrange
|
186
|
+
const genericError = new Error('Generic Error');
|
187
|
+
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
+
|
190
|
+
// Act
|
191
|
+
try {
|
192
|
+
await instance.chat({
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
+
model: 'general',
|
195
|
+
temperature: 0,
|
196
|
+
});
|
197
|
+
} catch (e) {
|
198
|
+
expect(e).toEqual({
|
199
|
+
endpoint: defaultBaseURL,
|
200
|
+
errorType: 'AgentRuntimeError',
|
201
|
+
provider,
|
202
|
+
error: {
|
203
|
+
name: genericError.name,
|
204
|
+
cause: genericError.cause,
|
205
|
+
message: genericError.message,
|
206
|
+
stack: genericError.stack,
|
207
|
+
},
|
208
|
+
});
|
209
|
+
}
|
210
|
+
});
|
211
|
+
});
|
212
|
+
|
213
|
+
describe('DEBUG', () => {
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_SPARK_CHAT_COMPLETION is 1', async () => {
|
215
|
+
// Arrange
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
+
const mockDebugStream = new ReadableStream({
|
218
|
+
start(controller) {
|
219
|
+
controller.enqueue('Debug stream content');
|
220
|
+
controller.close();
|
221
|
+
},
|
222
|
+
}) as any;
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
+
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
+
});
|
229
|
+
|
230
|
+
// 保存原始环境变量值
|
231
|
+
const originalDebugValue = process.env.DEBUG_SPARK_CHAT_COMPLETION;
|
232
|
+
|
233
|
+
// 模拟环境变量
|
234
|
+
process.env.DEBUG_SPARK_CHAT_COMPLETION = '1';
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
+
|
237
|
+
// 执行测试
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
+
await instance.chat({
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
+
model: 'general',
|
243
|
+
stream: true,
|
244
|
+
temperature: 0,
|
245
|
+
});
|
246
|
+
|
247
|
+
// 验证 debugStream 被调用
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
+
|
250
|
+
// 恢复原始环境变量值
|
251
|
+
process.env.DEBUG_SPARK_CHAT_COMPLETION = originalDebugValue;
|
252
|
+
});
|
253
|
+
});
|
254
|
+
});
|
255
|
+
});
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
export const LobeSparkAI = LobeOpenAICompatibleFactory({
|
5
|
+
baseURL: 'https://spark-api-open.xf-yun.com/v1',
|
6
|
+
chatCompletion: {
|
7
|
+
noUserId: true,
|
8
|
+
},
|
9
|
+
debug: {
|
10
|
+
chatCompletion: () => process.env.DEBUG_SPARK_CHAT_COMPLETION === '1',
|
11
|
+
},
|
12
|
+
provider: ModelProvider.Spark,
|
13
|
+
});
|
@@ -28,6 +28,7 @@ export enum ModelProvider {
|
|
28
28
|
Baichuan = 'baichuan',
|
29
29
|
Bedrock = 'bedrock',
|
30
30
|
DeepSeek = 'deepseek',
|
31
|
+
FireworksAI = 'fireworksai',
|
31
32
|
Google = 'google',
|
32
33
|
Groq = 'groq',
|
33
34
|
Minimax = 'minimax',
|
@@ -40,6 +41,7 @@ export enum ModelProvider {
|
|
40
41
|
Perplexity = 'perplexity',
|
41
42
|
Qwen = 'qwen',
|
42
43
|
SiliconCloud = 'siliconcloud',
|
44
|
+
Spark = 'spark',
|
43
45
|
Stepfun = 'stepfun',
|
44
46
|
Taichu = 'taichu',
|
45
47
|
TogetherAI = 'togetherai',
|
@@ -287,7 +287,7 @@ describe('OpenAIStream', () => {
|
|
287
287
|
expect(chunks).toEqual([
|
288
288
|
'id: 2\n',
|
289
289
|
'event: tool_calls\n',
|
290
|
-
`data: [{"function":{"
|
290
|
+
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
291
291
|
]);
|
292
292
|
|
293
293
|
expect(onToolCallMock).toHaveBeenCalledTimes(1);
|
@@ -334,7 +334,7 @@ describe('OpenAIStream', () => {
|
|
334
334
|
expect(chunks).toEqual([
|
335
335
|
'id: 5\n',
|
336
336
|
'event: tool_calls\n',
|
337
|
-
`data: [{"function":{"
|
337
|
+
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
338
338
|
]);
|
339
339
|
});
|
340
340
|
|
@@ -428,7 +428,7 @@ describe('OpenAIStream', () => {
|
|
428
428
|
`data: [{"function":{"arguments":"","name":"realtime-weather____fetchCurrentWeather"},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
429
429
|
'id: 1',
|
430
430
|
'event: tool_calls',
|
431
|
-
`data: [{"function":{"arguments":"{\\"city\\": \\"杭州\\"}"},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
431
|
+
`data: [{"function":{"arguments":"{\\"city\\": \\"杭州\\"}","name":null},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
432
432
|
'id: 1',
|
433
433
|
'event: stop',
|
434
434
|
`data: "tool_calls"\n`,
|