@lobehub/chat 1.15.35 → 1.16.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @lobehub/chat might be problematic. Click here for more details.
- package/CHANGELOG.md +52 -0
- package/Dockerfile +4 -0
- package/Dockerfile.database +4 -0
- package/package.json +2 -2
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +14 -10
- package/src/app/api/chat/agentRuntime.ts +14 -0
- package/src/config/llm.ts +14 -0
- package/src/config/modelProviders/baichuan.ts +0 -2
- package/src/config/modelProviders/fireworksai.ts +143 -0
- package/src/config/modelProviders/index.ts +19 -11
- package/src/config/modelProviders/minimax.ts +0 -2
- package/src/config/modelProviders/spark.ts +59 -0
- package/src/const/settings/llm.ts +10 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +14 -0
- package/src/libs/agent-runtime/fireworksai/index.test.ts +255 -0
- package/src/libs/agent-runtime/fireworksai/index.ts +10 -0
- package/src/libs/agent-runtime/spark/index.test.ts +255 -0
- package/src/libs/agent-runtime/spark/index.ts +13 -0
- package/src/libs/agent-runtime/types/type.ts +2 -0
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +3 -3
- package/src/libs/agent-runtime/utils/streams/openai.ts +4 -1
- package/src/server/globalConfig/index.ts +18 -1
- package/src/types/user/settings/keyVaults.ts +2 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,58 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.16.1](https://github.com/lobehub/lobe-chat/compare/v1.16.0...v1.16.1)
|
6
|
+
|
7
|
+
<sup>Released on **2024-09-10**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Reorder the provider list, update spark check model to spark-lite & default disable useless model.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Reorder the provider list, closes [#3886](https://github.com/lobehub/lobe-chat/issues/3886) ([4d641f5](https://github.com/lobehub/lobe-chat/commit/4d641f5))
|
21
|
+
- **misc**: Update spark check model to spark-lite & default disable useless model, closes [#3885](https://github.com/lobehub/lobe-chat/issues/3885) ([9d7e47c](https://github.com/lobehub/lobe-chat/commit/9d7e47c))
|
22
|
+
|
23
|
+
</details>
|
24
|
+
|
25
|
+
<div align="right">
|
26
|
+
|
27
|
+
[](#readme-top)
|
28
|
+
|
29
|
+
</div>
|
30
|
+
|
31
|
+
## [Version 1.16.0](https://github.com/lobehub/lobe-chat/compare/v1.15.35...v1.16.0)
|
32
|
+
|
33
|
+
<sup>Released on **2024-09-10**</sup>
|
34
|
+
|
35
|
+
#### ✨ Features
|
36
|
+
|
37
|
+
- **misc**: Add Fireworks AI Model Provider, Add Spark model provider.
|
38
|
+
|
39
|
+
<br/>
|
40
|
+
|
41
|
+
<details>
|
42
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
43
|
+
|
44
|
+
#### What's improved
|
45
|
+
|
46
|
+
- **misc**: Add Fireworks AI Model Provider, closes [#3392](https://github.com/lobehub/lobe-chat/issues/3392) [#48](https://github.com/lobehub/lobe-chat/issues/48) ([fa0d84d](https://github.com/lobehub/lobe-chat/commit/fa0d84d))
|
47
|
+
- **misc**: Add Spark model provider, closes [#3098](https://github.com/lobehub/lobe-chat/issues/3098) [#25](https://github.com/lobehub/lobe-chat/issues/25) ([fc85c20](https://github.com/lobehub/lobe-chat/commit/fc85c20))
|
48
|
+
|
49
|
+
</details>
|
50
|
+
|
51
|
+
<div align="right">
|
52
|
+
|
53
|
+
[](#readme-top)
|
54
|
+
|
55
|
+
</div>
|
56
|
+
|
5
57
|
### [Version 1.15.35](https://github.com/lobehub/lobe-chat/compare/v1.15.34...v1.15.35)
|
6
58
|
|
7
59
|
<sup>Released on **2024-09-10**</sup>
|
package/Dockerfile
CHANGED
@@ -117,6 +117,8 @@ ENV \
|
|
117
117
|
BAICHUAN_API_KEY="" \
|
118
118
|
# DeepSeek
|
119
119
|
DEEPSEEK_API_KEY="" \
|
120
|
+
# Fireworks AI
|
121
|
+
FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
|
120
122
|
# Google
|
121
123
|
GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
|
122
124
|
# Groq
|
@@ -141,6 +143,8 @@ ENV \
|
|
141
143
|
QWEN_API_KEY="" QWEN_MODEL_LIST="" \
|
142
144
|
# SiliconCloud
|
143
145
|
SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
|
146
|
+
# Spark
|
147
|
+
SPARK_API_KEY="" \
|
144
148
|
# Stepfun
|
145
149
|
STEPFUN_API_KEY="" \
|
146
150
|
# Taichu
|
package/Dockerfile.database
CHANGED
@@ -149,6 +149,8 @@ ENV \
|
|
149
149
|
BAICHUAN_API_KEY="" \
|
150
150
|
# DeepSeek
|
151
151
|
DEEPSEEK_API_KEY="" \
|
152
|
+
# Fireworks AI
|
153
|
+
FIREWORKSAI_API_KEY="" FIREWORKSAI_MODEL_LIST="" \
|
152
154
|
# Google
|
153
155
|
GOOGLE_API_KEY="" GOOGLE_PROXY_URL="" \
|
154
156
|
# Groq
|
@@ -173,6 +175,8 @@ ENV \
|
|
173
175
|
QWEN_API_KEY="" QWEN_MODEL_LIST="" \
|
174
176
|
# SiliconCloud
|
175
177
|
SILICONCLOUD_API_KEY="" SILICONCLOUD_MODEL_LIST="" SILICONCLOUD_PROXY_URL="" \
|
178
|
+
# Spark
|
179
|
+
SPARK_API_KEY="" \
|
176
180
|
# Stepfun
|
177
181
|
STEPFUN_API_KEY="" \
|
178
182
|
# Taichu
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.16.1",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -212,7 +212,7 @@
|
|
212
212
|
"systemjs": "^6.15.1",
|
213
213
|
"ts-md5": "^1.3.1",
|
214
214
|
"ua-parser-js": "^1.0.38",
|
215
|
-
"unstructured-client": "^0.
|
215
|
+
"unstructured-client": "^0.16.0",
|
216
216
|
"url-join": "^5.0.0",
|
217
217
|
"use-merge-value": "^1.2.0",
|
218
218
|
"utility-types": "^3.11.0",
|
@@ -5,6 +5,7 @@ import {
|
|
5
5
|
AnthropicProviderCard,
|
6
6
|
BaichuanProviderCard,
|
7
7
|
DeepSeekProviderCard,
|
8
|
+
FireworksAIProviderCard,
|
8
9
|
GoogleProviderCard,
|
9
10
|
GroqProviderCard,
|
10
11
|
MinimaxProviderCard,
|
@@ -15,6 +16,7 @@ import {
|
|
15
16
|
PerplexityProviderCard,
|
16
17
|
QwenProviderCard,
|
17
18
|
SiliconCloudProviderCard,
|
19
|
+
SparkProviderCard,
|
18
20
|
StepfunProviderCard,
|
19
21
|
TaichuProviderCard,
|
20
22
|
TogetherAIProviderCard,
|
@@ -38,29 +40,31 @@ export const useProviderList = (): ProviderItem[] => {
|
|
38
40
|
return useMemo(
|
39
41
|
() => [
|
40
42
|
OpenAIProvider,
|
41
|
-
OllamaProvider,
|
42
43
|
AzureProvider,
|
43
|
-
|
44
|
+
OllamaProvider,
|
44
45
|
AnthropicProviderCard,
|
45
46
|
BedrockProvider,
|
46
|
-
|
47
|
+
GoogleProviderCard,
|
48
|
+
DeepSeekProviderCard,
|
47
49
|
OpenRouterProviderCard,
|
50
|
+
GroqProviderCard,
|
48
51
|
NovitaProviderCard,
|
52
|
+
PerplexityProviderCard,
|
53
|
+
MistralProviderCard,
|
49
54
|
TogetherAIProviderCard,
|
55
|
+
FireworksAIProviderCard,
|
56
|
+
UpstageProviderCard,
|
50
57
|
QwenProviderCard,
|
51
|
-
|
52
|
-
MinimaxProviderCard,
|
53
|
-
MistralProviderCard,
|
54
|
-
MoonshotProviderCard,
|
55
|
-
PerplexityProviderCard,
|
58
|
+
SparkProviderCard,
|
56
59
|
ZhiPuProviderCard,
|
57
60
|
ZeroOneProviderCard,
|
58
61
|
StepfunProviderCard,
|
62
|
+
MoonshotProviderCard,
|
59
63
|
BaichuanProviderCard,
|
60
|
-
|
64
|
+
MinimaxProviderCard,
|
61
65
|
Ai360ProviderCard,
|
62
66
|
SiliconCloudProviderCard,
|
63
|
-
|
67
|
+
TaichuProviderCard,
|
64
68
|
],
|
65
69
|
[AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider],
|
66
70
|
);
|
@@ -151,6 +151,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
151
151
|
|
152
152
|
return { apiKey };
|
153
153
|
}
|
154
|
+
case ModelProvider.FireworksAI: {
|
155
|
+
const { FIREWORKSAI_API_KEY } = getLLMConfig();
|
156
|
+
|
157
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || FIREWORKSAI_API_KEY);
|
158
|
+
|
159
|
+
return { apiKey };
|
160
|
+
}
|
154
161
|
case ModelProvider.ZeroOne: {
|
155
162
|
const { ZEROONE_API_KEY } = getLLMConfig();
|
156
163
|
|
@@ -213,6 +220,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
|
|
213
220
|
|
214
221
|
const apiKey = apiKeyManager.pick(payload?.apiKey || UPSTAGE_API_KEY);
|
215
222
|
|
223
|
+
return { apiKey };
|
224
|
+
}
|
225
|
+
case ModelProvider.Spark: {
|
226
|
+
const { SPARK_API_KEY } = getLLMConfig();
|
227
|
+
|
228
|
+
const apiKey = apiKeyManager.pick(payload?.apiKey || SPARK_API_KEY);
|
229
|
+
|
216
230
|
return { apiKey };
|
217
231
|
}
|
218
232
|
}
|
package/src/config/llm.ts
CHANGED
@@ -64,6 +64,10 @@ export const getLLMConfig = () => {
|
|
64
64
|
TOGETHERAI_API_KEY: z.string().optional(),
|
65
65
|
TOGETHERAI_MODEL_LIST: z.string().optional(),
|
66
66
|
|
67
|
+
ENABLED_FIREWORKSAI: z.boolean(),
|
68
|
+
FIREWORKSAI_API_KEY: z.string().optional(),
|
69
|
+
FIREWORKSAI_MODEL_LIST: z.string().optional(),
|
70
|
+
|
67
71
|
ENABLED_AWS_BEDROCK: z.boolean(),
|
68
72
|
AWS_BEDROCK_MODEL_LIST: z.string().optional(),
|
69
73
|
AWS_REGION: z.string().optional(),
|
@@ -101,6 +105,9 @@ export const getLLMConfig = () => {
|
|
101
105
|
|
102
106
|
ENABLED_UPSTAGE: z.boolean(),
|
103
107
|
UPSTAGE_API_KEY: z.string().optional(),
|
108
|
+
|
109
|
+
ENABLED_SPARK: z.boolean(),
|
110
|
+
SPARK_API_KEY: z.string().optional(),
|
104
111
|
},
|
105
112
|
runtimeEnv: {
|
106
113
|
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
|
@@ -149,6 +156,10 @@ export const getLLMConfig = () => {
|
|
149
156
|
TOGETHERAI_API_KEY: process.env.TOGETHERAI_API_KEY,
|
150
157
|
TOGETHERAI_MODEL_LIST: process.env.TOGETHERAI_MODEL_LIST,
|
151
158
|
|
159
|
+
ENABLED_FIREWORKSAI: !!process.env.FIREWORKSAI_API_KEY,
|
160
|
+
FIREWORKSAI_API_KEY: process.env.FIREWORKSAI_API_KEY,
|
161
|
+
FIREWORKSAI_MODEL_LIST: process.env.FIREWORKSAI_MODEL_LIST,
|
162
|
+
|
152
163
|
ENABLED_MOONSHOT: !!process.env.MOONSHOT_API_KEY,
|
153
164
|
MOONSHOT_API_KEY: process.env.MOONSHOT_API_KEY,
|
154
165
|
MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
|
@@ -199,6 +210,9 @@ export const getLLMConfig = () => {
|
|
199
210
|
|
200
211
|
ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
|
201
212
|
UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
|
213
|
+
|
214
|
+
ENABLED_SPARK: !!process.env.SPARK_API_KEY,
|
215
|
+
SPARK_API_KEY: process.env.SPARK_API_KEY,
|
202
216
|
},
|
203
217
|
});
|
204
218
|
};
|
@@ -32,7 +32,6 @@ const Baichuan: ModelProviderCard = {
|
|
32
32
|
{
|
33
33
|
description: '采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
|
34
34
|
displayName: 'Baichuan 2 Turbo',
|
35
|
-
enabled: true,
|
36
35
|
id: 'Baichuan2-Turbo',
|
37
36
|
maxOutput: 8192,
|
38
37
|
tokens: 32_768,
|
@@ -40,7 +39,6 @@ const Baichuan: ModelProviderCard = {
|
|
40
39
|
{
|
41
40
|
description: '具备 192K 超长上下文窗口,采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。',
|
42
41
|
displayName: 'Baichuan 2 Turbo 192k',
|
43
|
-
enabled: true,
|
44
42
|
id: 'Baichuan2-Turbo-192k',
|
45
43
|
maxOutput: 2048,
|
46
44
|
tokens: 192_000,
|
@@ -0,0 +1,143 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
// ref https://fireworks.ai/models?show=Serverless
|
4
|
+
// ref https://fireworks.ai/pricing
|
5
|
+
const FireworksAI: ModelProviderCard = {
|
6
|
+
chatModels: [
|
7
|
+
{
|
8
|
+
description: 'Fireworks latest and most performant function-calling model. Firefunction-v2 is based on Llama-3 and trained to excel at function-calling as well as chat and instruction-following. See blog post for more details https://fireworks.ai/blog/firefunction-v2-launch-post',
|
9
|
+
displayName: 'Firefunction V2',
|
10
|
+
enabled: true,
|
11
|
+
functionCall: true,
|
12
|
+
id: 'accounts/fireworks/models/firefunction-v2',
|
13
|
+
tokens: 8192,
|
14
|
+
},
|
15
|
+
{
|
16
|
+
description: 'Fireworks open-source function calling model.',
|
17
|
+
displayName: 'Firefunction V1',
|
18
|
+
functionCall: true,
|
19
|
+
id: 'accounts/fireworks/models/firefunction-v1',
|
20
|
+
tokens: 32_768,
|
21
|
+
},
|
22
|
+
{
|
23
|
+
description: 'Vision-language model allowing both image and text as inputs (single image is recommended), trained on OSS model generated training data and open sourced on huggingface at fireworks-ai/FireLLaVA-13b',
|
24
|
+
displayName: 'FireLLaVA-13B',
|
25
|
+
enabled: true,
|
26
|
+
functionCall: false,
|
27
|
+
id: 'accounts/fireworks/models/firellava-13b',
|
28
|
+
tokens: 4096,
|
29
|
+
vision: true,
|
30
|
+
},
|
31
|
+
{
|
32
|
+
displayName: 'Llama 3.1 8B Instruct',
|
33
|
+
enabled: true,
|
34
|
+
functionCall: false,
|
35
|
+
id: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
|
36
|
+
tokens: 131_072,
|
37
|
+
},
|
38
|
+
{
|
39
|
+
displayName: 'Llama 3.1 70B Instruct',
|
40
|
+
enabled: true,
|
41
|
+
functionCall: false,
|
42
|
+
id: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
|
43
|
+
tokens: 131_072,
|
44
|
+
},
|
45
|
+
{
|
46
|
+
displayName: 'Llama 3.1 405B Instruct',
|
47
|
+
enabled: true,
|
48
|
+
functionCall: false,
|
49
|
+
id: 'accounts/fireworks/models/llama-v3p1-405b-instruct',
|
50
|
+
tokens: 131_072,
|
51
|
+
},
|
52
|
+
{
|
53
|
+
displayName: 'Llama 3 8B Instruct',
|
54
|
+
functionCall: false,
|
55
|
+
id: 'accounts/fireworks/models/llama-v3-8b-instruct',
|
56
|
+
tokens: 8192,
|
57
|
+
},
|
58
|
+
{
|
59
|
+
displayName: 'Llama 3 70B Instruct',
|
60
|
+
functionCall: false,
|
61
|
+
id: 'accounts/fireworks/models/llama-v3-70b-instruct',
|
62
|
+
tokens: 8192,
|
63
|
+
},
|
64
|
+
{
|
65
|
+
displayName: 'Llama 3 8B Instruct (HF version)',
|
66
|
+
functionCall: false,
|
67
|
+
id: 'accounts/fireworks/models/llama-v3-8b-instruct-hf',
|
68
|
+
tokens: 8192,
|
69
|
+
},
|
70
|
+
{
|
71
|
+
displayName: 'Llama 3 70B Instruct (HF version)',
|
72
|
+
functionCall: false,
|
73
|
+
id: 'accounts/fireworks/models/llama-v3-70b-instruct-hf',
|
74
|
+
tokens: 8192,
|
75
|
+
},
|
76
|
+
{
|
77
|
+
displayName: 'Gemma 2 9B Instruct',
|
78
|
+
enabled: true,
|
79
|
+
functionCall: false,
|
80
|
+
id: 'accounts/fireworks/models/gemma2-9b-it',
|
81
|
+
tokens: 8192,
|
82
|
+
},
|
83
|
+
{
|
84
|
+
displayName: 'Mixtral MoE 8x7B Instruct',
|
85
|
+
enabled: true,
|
86
|
+
functionCall: false,
|
87
|
+
id: 'accounts/fireworks/models/mixtral-8x7b-instruct',
|
88
|
+
tokens: 32_768,
|
89
|
+
},
|
90
|
+
{
|
91
|
+
displayName: 'Mixtral MoE 8x22B Instruct',
|
92
|
+
enabled: true,
|
93
|
+
functionCall: false,
|
94
|
+
id: 'accounts/fireworks/models/mixtral-8x22b-instruct',
|
95
|
+
tokens: 65_536,
|
96
|
+
},
|
97
|
+
{
|
98
|
+
displayName: 'Mixtral MoE 8x7B Instruct (HF version)',
|
99
|
+
functionCall: false,
|
100
|
+
id: 'accounts/fireworks/models/mixtral-8x7b-instruct-hf',
|
101
|
+
tokens: 32_768,
|
102
|
+
},
|
103
|
+
{
|
104
|
+
displayName: 'Phi 3 Vision Instruct',
|
105
|
+
enabled: true,
|
106
|
+
functionCall: false,
|
107
|
+
id: 'accounts/fireworks/models/phi-3-vision-128k-instruct',
|
108
|
+
tokens: 8192,
|
109
|
+
vision: true,
|
110
|
+
},
|
111
|
+
{
|
112
|
+
displayName: 'Yi-Large',
|
113
|
+
enabled: true,
|
114
|
+
functionCall: false,
|
115
|
+
id: 'accounts/yi-01-ai/models/yi-large',
|
116
|
+
tokens: 32_768,
|
117
|
+
},
|
118
|
+
{
|
119
|
+
displayName: 'StarCoder 7B',
|
120
|
+
functionCall: false,
|
121
|
+
id: 'accounts/fireworks/models/starcoder-7b',
|
122
|
+
tokens: 8192,
|
123
|
+
},
|
124
|
+
{
|
125
|
+
displayName: 'StarCoder 15.5B',
|
126
|
+
functionCall: false,
|
127
|
+
id: 'accounts/fireworks/models/starcoder-16b',
|
128
|
+
tokens: 8192,
|
129
|
+
},
|
130
|
+
{
|
131
|
+
displayName: 'MythoMax L2 13b',
|
132
|
+
functionCall: false,
|
133
|
+
id: 'accounts/fireworks/models/mythomax-l2-13b',
|
134
|
+
tokens: 4096,
|
135
|
+
},
|
136
|
+
],
|
137
|
+
checkModel: 'accounts/fireworks/models/firefunction-v2',
|
138
|
+
id: 'fireworksai',
|
139
|
+
modelList: { showModelFetcher: true },
|
140
|
+
name: 'Fireworks AI',
|
141
|
+
};
|
142
|
+
|
143
|
+
export default FireworksAI;
|
@@ -6,6 +6,7 @@ import AzureProvider from './azure';
|
|
6
6
|
import BaichuanProvider from './baichuan';
|
7
7
|
import BedrockProvider from './bedrock';
|
8
8
|
import DeepSeekProvider from './deepseek';
|
9
|
+
import FireworksAIProvider from './fireworksai';
|
9
10
|
import GoogleProvider from './google';
|
10
11
|
import GroqProvider from './groq';
|
11
12
|
import MinimaxProvider from './minimax';
|
@@ -18,6 +19,7 @@ import OpenRouterProvider from './openrouter';
|
|
18
19
|
import PerplexityProvider from './perplexity';
|
19
20
|
import QwenProvider from './qwen';
|
20
21
|
import SiliconCloudProvider from './siliconcloud';
|
22
|
+
import SparkProvider from './spark';
|
21
23
|
import StepfunProvider from './stepfun';
|
22
24
|
import TaichuProvider from './taichu';
|
23
25
|
import TogetherAIProvider from './togetherai';
|
@@ -39,6 +41,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
39
41
|
OllamaProvider.chatModels,
|
40
42
|
OpenRouterProvider.chatModels,
|
41
43
|
TogetherAIProvider.chatModels,
|
44
|
+
FireworksAIProvider.chatModels,
|
42
45
|
PerplexityProvider.chatModels,
|
43
46
|
AnthropicProvider.chatModels,
|
44
47
|
ZeroOneProvider.chatModels,
|
@@ -49,33 +52,36 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
|
|
49
52
|
Ai360Provider.chatModels,
|
50
53
|
SiliconCloudProvider.chatModels,
|
51
54
|
UpstageProvider.chatModels,
|
55
|
+
SparkProvider.chatModels,
|
52
56
|
].flat();
|
53
57
|
|
54
58
|
export const DEFAULT_MODEL_PROVIDER_LIST = [
|
55
59
|
OpenAIProvider,
|
56
60
|
{ ...AzureProvider, chatModels: [] },
|
57
|
-
QwenProvider,
|
58
61
|
OllamaProvider,
|
59
62
|
AnthropicProvider,
|
60
|
-
|
63
|
+
BedrockProvider,
|
61
64
|
GoogleProvider,
|
65
|
+
DeepSeekProvider,
|
62
66
|
OpenRouterProvider,
|
63
|
-
|
64
|
-
|
67
|
+
GroqProvider,
|
68
|
+
NovitaProvider,
|
65
69
|
PerplexityProvider,
|
66
|
-
MinimaxProvider,
|
67
70
|
MistralProvider,
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
+
TogetherAIProvider,
|
72
|
+
FireworksAIProvider,
|
73
|
+
UpstageProvider,
|
74
|
+
QwenProvider,
|
75
|
+
SparkProvider,
|
71
76
|
ZhiPuProvider,
|
77
|
+
ZeroOneProvider,
|
72
78
|
StepfunProvider,
|
73
|
-
|
79
|
+
MoonshotProvider,
|
74
80
|
BaichuanProvider,
|
75
|
-
|
81
|
+
MinimaxProvider,
|
76
82
|
Ai360Provider,
|
77
83
|
SiliconCloudProvider,
|
78
|
-
|
84
|
+
TaichuProvider,
|
79
85
|
];
|
80
86
|
|
81
87
|
export const filterEnabledModels = (provider: ModelProviderCard) => {
|
@@ -93,6 +99,7 @@ export { default as AzureProviderCard } from './azure';
|
|
93
99
|
export { default as BaichuanProviderCard } from './baichuan';
|
94
100
|
export { default as BedrockProviderCard } from './bedrock';
|
95
101
|
export { default as DeepSeekProviderCard } from './deepseek';
|
102
|
+
export { default as FireworksAIProviderCard } from './fireworksai';
|
96
103
|
export { default as GoogleProviderCard } from './google';
|
97
104
|
export { default as GroqProviderCard } from './groq';
|
98
105
|
export { default as MinimaxProviderCard } from './minimax';
|
@@ -105,6 +112,7 @@ export { default as OpenRouterProviderCard } from './openrouter';
|
|
105
112
|
export { default as PerplexityProviderCard } from './perplexity';
|
106
113
|
export { default as QwenProviderCard } from './qwen';
|
107
114
|
export { default as SiliconCloudProviderCard } from './siliconcloud';
|
115
|
+
export { default as SparkProviderCard } from './spark';
|
108
116
|
export { default as StepfunProviderCard } from './stepfun';
|
109
117
|
export { default as TaichuProviderCard } from './taichu';
|
110
118
|
export { default as TogetherAIProviderCard } from './togetherai';
|
@@ -30,14 +30,12 @@ const Minimax: ModelProviderCard = {
|
|
30
30
|
{
|
31
31
|
description: '生产力场景',
|
32
32
|
displayName: 'abab5.5',
|
33
|
-
enabled: true,
|
34
33
|
id: 'abab5.5-chat',
|
35
34
|
tokens: 16_384,
|
36
35
|
},
|
37
36
|
{
|
38
37
|
description: '中文人设对话场景',
|
39
38
|
displayName: 'abab5.5s',
|
40
|
-
enabled: true,
|
41
39
|
id: 'abab5.5s-chat',
|
42
40
|
tokens: 8192,
|
43
41
|
},
|
@@ -0,0 +1,59 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
// ref https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.html#_3-%E8%AF%B7%E6%B1%82%E8%AF%B4%E6%98%8E
|
4
|
+
// ref https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
|
5
|
+
const Spark: ModelProviderCard = {
|
6
|
+
chatModels: [
|
7
|
+
{
|
8
|
+
description: '轻量级大语言模型,低延迟,全免费 支持在线联网搜索功能 响应快速、便捷,全面免费开放 适用于低算力推理与模型精调等定制化场景',
|
9
|
+
displayName: 'Spark Lite',
|
10
|
+
enabled: true,
|
11
|
+
functionCall: false,
|
12
|
+
id: 'general',
|
13
|
+
maxOutput: 4096,
|
14
|
+
tokens: 8192,
|
15
|
+
},
|
16
|
+
{
|
17
|
+
description: '专业级大语言模型,兼顾模型效果与性能 数学、代码、医疗、教育等场景专项优化 支持联网搜索、天气、日期等多个内置插件 覆盖大部分知识问答、语言理解、文本创作等多个场景',
|
18
|
+
displayName: 'Spark Pro',
|
19
|
+
enabled: true,
|
20
|
+
functionCall: false,
|
21
|
+
id: 'generalv3',
|
22
|
+
maxOutput: 8192,
|
23
|
+
tokens: 8192,
|
24
|
+
},
|
25
|
+
{
|
26
|
+
description: '支持最长上下文的星火大模型,长文无忧 128K星火大模型强势来袭 通读全文,旁征博引 沟通无界,逻辑连贯',
|
27
|
+
displayName: 'Spark Pro-128K',
|
28
|
+
enabled: true,
|
29
|
+
functionCall: false,
|
30
|
+
id: 'Pro-128k',
|
31
|
+
maxOutput: 4096,
|
32
|
+
tokens: 128_000,
|
33
|
+
},
|
34
|
+
{
|
35
|
+
description: '最全面的星火大模型版本,功能丰富 支持联网搜索、天气、日期等多个内置插件 核心能力全面升级,各场景应用效果普遍提升 支持System角色人设与FunctionCall函数调用',
|
36
|
+
displayName: 'Spark3.5 Max',
|
37
|
+
enabled: true,
|
38
|
+
functionCall: false,
|
39
|
+
id: 'generalv3.5',
|
40
|
+
maxOutput: 8192,
|
41
|
+
tokens: 8192,
|
42
|
+
},
|
43
|
+
{
|
44
|
+
description: '最强大的星火大模型版本,效果极佳 全方位提升效果,引领智能巅峰 优化联网搜索链路,提供精准回答 强化文本总结能力,提升办公生产力',
|
45
|
+
displayName: 'Spark4.0 Ultra',
|
46
|
+
enabled: true,
|
47
|
+
functionCall: false,
|
48
|
+
id: '4.0Ultra',
|
49
|
+
maxOutput: 8192,
|
50
|
+
tokens: 8192,
|
51
|
+
},
|
52
|
+
],
|
53
|
+
checkModel: 'general',
|
54
|
+
id: 'spark',
|
55
|
+
modelList: { showModelFetcher: true },
|
56
|
+
name: 'Spark',
|
57
|
+
};
|
58
|
+
|
59
|
+
export default Spark;
|
@@ -4,6 +4,7 @@ import {
|
|
4
4
|
BaichuanProviderCard,
|
5
5
|
BedrockProviderCard,
|
6
6
|
DeepSeekProviderCard,
|
7
|
+
FireworksAIProviderCard,
|
7
8
|
GoogleProviderCard,
|
8
9
|
GroqProviderCard,
|
9
10
|
MinimaxProviderCard,
|
@@ -16,6 +17,7 @@ import {
|
|
16
17
|
PerplexityProviderCard,
|
17
18
|
QwenProviderCard,
|
18
19
|
SiliconCloudProviderCard,
|
20
|
+
SparkProviderCard,
|
19
21
|
StepfunProviderCard,
|
20
22
|
TaichuProviderCard,
|
21
23
|
TogetherAIProviderCard,
|
@@ -51,6 +53,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
51
53
|
enabled: false,
|
52
54
|
enabledModels: filterEnabledModels(DeepSeekProviderCard),
|
53
55
|
},
|
56
|
+
fireworksai: {
|
57
|
+
enabled: false,
|
58
|
+
enabledModels: filterEnabledModels(FireworksAIProviderCard),
|
59
|
+
},
|
54
60
|
google: {
|
55
61
|
enabled: false,
|
56
62
|
enabledModels: filterEnabledModels(GoogleProviderCard),
|
@@ -100,6 +106,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
|
100
106
|
enabled: false,
|
101
107
|
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
|
102
108
|
},
|
109
|
+
spark: {
|
110
|
+
enabled: false,
|
111
|
+
enabledModels: filterEnabledModels(SparkProviderCard),
|
112
|
+
},
|
103
113
|
stepfun: {
|
104
114
|
enabled: false,
|
105
115
|
enabledModels: filterEnabledModels(StepfunProviderCard),
|
@@ -9,6 +9,7 @@ import { LobeAzureOpenAI } from './azureOpenai';
|
|
9
9
|
import { LobeBaichuanAI } from './baichuan';
|
10
10
|
import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
|
11
11
|
import { LobeDeepSeekAI } from './deepseek';
|
12
|
+
import { LobeFireworksAI } from './fireworksai';
|
12
13
|
import { LobeGoogleAI } from './google';
|
13
14
|
import { LobeGroq } from './groq';
|
14
15
|
import { LobeMinimaxAI } from './minimax';
|
@@ -21,6 +22,7 @@ import { LobeOpenRouterAI } from './openrouter';
|
|
21
22
|
import { LobePerplexityAI } from './perplexity';
|
22
23
|
import { LobeQwenAI } from './qwen';
|
23
24
|
import { LobeSiliconCloudAI } from './siliconcloud';
|
25
|
+
import { LobeSparkAI } from './spark';
|
24
26
|
import { LobeStepfunAI } from './stepfun';
|
25
27
|
import { LobeTaichuAI } from './taichu';
|
26
28
|
import { LobeTogetherAI } from './togetherai';
|
@@ -120,6 +122,7 @@ class AgentRuntime {
|
|
120
122
|
baichuan: Partial<ClientOptions>;
|
121
123
|
bedrock: Partial<LobeBedrockAIParams>;
|
122
124
|
deepseek: Partial<ClientOptions>;
|
125
|
+
fireworksai: Partial<ClientOptions>;
|
123
126
|
google: { apiKey?: string; baseURL?: string };
|
124
127
|
groq: Partial<ClientOptions>;
|
125
128
|
minimax: Partial<ClientOptions>;
|
@@ -132,6 +135,7 @@ class AgentRuntime {
|
|
132
135
|
perplexity: Partial<ClientOptions>;
|
133
136
|
qwen: Partial<ClientOptions>;
|
134
137
|
siliconcloud: Partial<ClientOptions>;
|
138
|
+
spark: Partial<ClientOptions>;
|
135
139
|
stepfun: Partial<ClientOptions>;
|
136
140
|
taichu: Partial<ClientOptions>;
|
137
141
|
togetherai: Partial<ClientOptions>;
|
@@ -224,6 +228,11 @@ class AgentRuntime {
|
|
224
228
|
break;
|
225
229
|
}
|
226
230
|
|
231
|
+
case ModelProvider.FireworksAI: {
|
232
|
+
runtimeModel = new LobeFireworksAI(params.fireworksai);
|
233
|
+
break
|
234
|
+
}
|
235
|
+
|
227
236
|
case ModelProvider.ZeroOne: {
|
228
237
|
runtimeModel = new LobeZeroOneAI(params.zeroone);
|
229
238
|
break;
|
@@ -268,6 +277,11 @@ class AgentRuntime {
|
|
268
277
|
runtimeModel = new LobeUpstageAI(params.upstage);
|
269
278
|
break
|
270
279
|
}
|
280
|
+
|
281
|
+
case ModelProvider.Spark: {
|
282
|
+
runtimeModel = new LobeSparkAI(params.spark);
|
283
|
+
break
|
284
|
+
}
|
271
285
|
}
|
272
286
|
|
273
287
|
return new AgentRuntime(runtimeModel);
|
@@ -0,0 +1,255 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import OpenAI from 'openai';
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import {
|
6
|
+
ChatStreamCallbacks,
|
7
|
+
LobeOpenAICompatibleRuntime,
|
8
|
+
ModelProvider,
|
9
|
+
} from '@/libs/agent-runtime';
|
10
|
+
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
12
|
+
import { LobeFireworksAI } from './index';
|
13
|
+
|
14
|
+
const provider = ModelProvider.FireworksAI;
|
15
|
+
const defaultBaseURL = 'https://api.fireworks.ai/inference/v1';
|
16
|
+
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
19
|
+
|
20
|
+
// Mock the console.error to avoid polluting test output
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
+
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
24
|
+
|
25
|
+
beforeEach(() => {
|
26
|
+
instance = new LobeFireworksAI({ apiKey: 'test' });
|
27
|
+
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
+
new ReadableStream() as any,
|
31
|
+
);
|
32
|
+
});
|
33
|
+
|
34
|
+
afterEach(() => {
|
35
|
+
vi.clearAllMocks();
|
36
|
+
});
|
37
|
+
|
38
|
+
describe('LobeFireworksAI', () => {
|
39
|
+
describe('init', () => {
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
41
|
+
const instance = new LobeFireworksAI({ apiKey: 'test_api_key' });
|
42
|
+
expect(instance).toBeInstanceOf(LobeFireworksAI);
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
+
});
|
45
|
+
});
|
46
|
+
|
47
|
+
describe('chat', () => {
|
48
|
+
describe('Error', () => {
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
+
// Arrange
|
51
|
+
const apiError = new OpenAI.APIError(
|
52
|
+
400,
|
53
|
+
{
|
54
|
+
status: 400,
|
55
|
+
error: {
|
56
|
+
message: 'Bad Request',
|
57
|
+
},
|
58
|
+
},
|
59
|
+
'Error message',
|
60
|
+
{},
|
61
|
+
);
|
62
|
+
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
+
|
65
|
+
// Act
|
66
|
+
try {
|
67
|
+
await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
} catch (e) {
|
73
|
+
expect(e).toEqual({
|
74
|
+
endpoint: defaultBaseURL,
|
75
|
+
error: {
|
76
|
+
error: { message: 'Bad Request' },
|
77
|
+
status: 400,
|
78
|
+
},
|
79
|
+
errorType: bizErrorType,
|
80
|
+
provider,
|
81
|
+
});
|
82
|
+
}
|
83
|
+
});
|
84
|
+
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
+
try {
|
87
|
+
new LobeFireworksAI({});
|
88
|
+
} catch (e) {
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
+
}
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
+
// Arrange
|
95
|
+
const errorInfo = {
|
96
|
+
stack: 'abc',
|
97
|
+
cause: {
|
98
|
+
message: 'api is undefined',
|
99
|
+
},
|
100
|
+
};
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
+
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
+
|
105
|
+
// Act
|
106
|
+
try {
|
107
|
+
await instance.chat({
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
110
|
+
temperature: 0,
|
111
|
+
});
|
112
|
+
} catch (e) {
|
113
|
+
expect(e).toEqual({
|
114
|
+
endpoint: defaultBaseURL,
|
115
|
+
error: {
|
116
|
+
cause: { message: 'api is undefined' },
|
117
|
+
stack: 'abc',
|
118
|
+
},
|
119
|
+
errorType: bizErrorType,
|
120
|
+
provider,
|
121
|
+
});
|
122
|
+
}
|
123
|
+
});
|
124
|
+
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
+
// Arrange
|
127
|
+
const errorInfo = {
|
128
|
+
stack: 'abc',
|
129
|
+
cause: { message: 'api is undefined' },
|
130
|
+
};
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
+
|
133
|
+
instance = new LobeFireworksAI({
|
134
|
+
apiKey: 'test',
|
135
|
+
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
137
|
+
});
|
138
|
+
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
+
|
141
|
+
// Act
|
142
|
+
try {
|
143
|
+
await instance.chat({
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
146
|
+
temperature: 0,
|
147
|
+
});
|
148
|
+
} catch (e) {
|
149
|
+
expect(e).toEqual({
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
151
|
+
error: {
|
152
|
+
cause: { message: 'api is undefined' },
|
153
|
+
stack: 'abc',
|
154
|
+
},
|
155
|
+
errorType: bizErrorType,
|
156
|
+
provider,
|
157
|
+
});
|
158
|
+
}
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should throw an InvalidFireworksAIAPIKey error type on 401 status code', async () => {
|
162
|
+
// Mock the API call to simulate a 401 error
|
163
|
+
const error = new Error('Unauthorized') as any;
|
164
|
+
error.status = 401;
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
+
|
167
|
+
try {
|
168
|
+
await instance.chat({
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
171
|
+
temperature: 0,
|
172
|
+
});
|
173
|
+
} catch (e) {
|
174
|
+
// Expect the chat method to throw an error with InvalidFireworksAIAPIKey
|
175
|
+
expect(e).toEqual({
|
176
|
+
endpoint: defaultBaseURL,
|
177
|
+
error: new Error('Unauthorized'),
|
178
|
+
errorType: invalidErrorType,
|
179
|
+
provider,
|
180
|
+
});
|
181
|
+
}
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
+
// Arrange
|
186
|
+
const genericError = new Error('Generic Error');
|
187
|
+
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
+
|
190
|
+
// Act
|
191
|
+
try {
|
192
|
+
await instance.chat({
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
195
|
+
temperature: 0,
|
196
|
+
});
|
197
|
+
} catch (e) {
|
198
|
+
expect(e).toEqual({
|
199
|
+
endpoint: defaultBaseURL,
|
200
|
+
errorType: 'AgentRuntimeError',
|
201
|
+
provider,
|
202
|
+
error: {
|
203
|
+
name: genericError.name,
|
204
|
+
cause: genericError.cause,
|
205
|
+
message: genericError.message,
|
206
|
+
stack: genericError.stack,
|
207
|
+
},
|
208
|
+
});
|
209
|
+
}
|
210
|
+
});
|
211
|
+
});
|
212
|
+
|
213
|
+
describe('DEBUG', () => {
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_FIREWORKSAI_CHAT_COMPLETION is 1', async () => {
|
215
|
+
// Arrange
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
+
const mockDebugStream = new ReadableStream({
|
218
|
+
start(controller) {
|
219
|
+
controller.enqueue('Debug stream content');
|
220
|
+
controller.close();
|
221
|
+
},
|
222
|
+
}) as any;
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
+
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
+
});
|
229
|
+
|
230
|
+
// 保存原始环境变量值
|
231
|
+
const originalDebugValue = process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION;
|
232
|
+
|
233
|
+
// 模拟环境变量
|
234
|
+
process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = '1';
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
+
|
237
|
+
// 执行测试
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
+
await instance.chat({
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
+
model: 'accounts/fireworks/models/firefunction-v2',
|
243
|
+
stream: true,
|
244
|
+
temperature: 0,
|
245
|
+
});
|
246
|
+
|
247
|
+
// 验证 debugStream 被调用
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
+
|
250
|
+
// 恢复原始环境变量值
|
251
|
+
process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = originalDebugValue;
|
252
|
+
});
|
253
|
+
});
|
254
|
+
});
|
255
|
+
});
|
@@ -0,0 +1,10 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
export const LobeFireworksAI = LobeOpenAICompatibleFactory({
|
5
|
+
baseURL: 'https://api.fireworks.ai/inference/v1',
|
6
|
+
debug: {
|
7
|
+
chatCompletion: () => process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION === '1',
|
8
|
+
},
|
9
|
+
provider: ModelProvider.FireworksAI,
|
10
|
+
});
|
@@ -0,0 +1,255 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import OpenAI from 'openai';
|
3
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
+
|
5
|
+
import {
|
6
|
+
ChatStreamCallbacks,
|
7
|
+
LobeOpenAICompatibleRuntime,
|
8
|
+
ModelProvider,
|
9
|
+
} from '@/libs/agent-runtime';
|
10
|
+
|
11
|
+
import * as debugStreamModule from '../utils/debugStream';
|
12
|
+
import { LobeSparkAI } from './index';
|
13
|
+
|
14
|
+
const provider = ModelProvider.Spark;
|
15
|
+
const defaultBaseURL = 'https://spark-api-open.xf-yun.com/v1';
|
16
|
+
|
17
|
+
const bizErrorType = 'ProviderBizError';
|
18
|
+
const invalidErrorType = 'InvalidProviderAPIKey';
|
19
|
+
|
20
|
+
// Mock the console.error to avoid polluting test output
|
21
|
+
vi.spyOn(console, 'error').mockImplementation(() => {});
|
22
|
+
|
23
|
+
let instance: LobeOpenAICompatibleRuntime;
|
24
|
+
|
25
|
+
beforeEach(() => {
|
26
|
+
instance = new LobeSparkAI({ apiKey: 'test' });
|
27
|
+
|
28
|
+
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
29
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
30
|
+
new ReadableStream() as any,
|
31
|
+
);
|
32
|
+
});
|
33
|
+
|
34
|
+
afterEach(() => {
|
35
|
+
vi.clearAllMocks();
|
36
|
+
});
|
37
|
+
|
38
|
+
describe('LobeSparkAI', () => {
|
39
|
+
describe('init', () => {
|
40
|
+
it('should correctly initialize with an API key', async () => {
|
41
|
+
const instance = new LobeSparkAI({ apiKey: 'test_api_key' });
|
42
|
+
expect(instance).toBeInstanceOf(LobeSparkAI);
|
43
|
+
expect(instance.baseURL).toEqual(defaultBaseURL);
|
44
|
+
});
|
45
|
+
});
|
46
|
+
|
47
|
+
describe('chat', () => {
|
48
|
+
describe('Error', () => {
|
49
|
+
it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
50
|
+
// Arrange
|
51
|
+
const apiError = new OpenAI.APIError(
|
52
|
+
400,
|
53
|
+
{
|
54
|
+
status: 400,
|
55
|
+
error: {
|
56
|
+
message: 'Bad Request',
|
57
|
+
},
|
58
|
+
},
|
59
|
+
'Error message',
|
60
|
+
{},
|
61
|
+
);
|
62
|
+
|
63
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
64
|
+
|
65
|
+
// Act
|
66
|
+
try {
|
67
|
+
await instance.chat({
|
68
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
69
|
+
model: 'general',
|
70
|
+
temperature: 0,
|
71
|
+
});
|
72
|
+
} catch (e) {
|
73
|
+
expect(e).toEqual({
|
74
|
+
endpoint: defaultBaseURL,
|
75
|
+
error: {
|
76
|
+
error: { message: 'Bad Request' },
|
77
|
+
status: 400,
|
78
|
+
},
|
79
|
+
errorType: bizErrorType,
|
80
|
+
provider,
|
81
|
+
});
|
82
|
+
}
|
83
|
+
});
|
84
|
+
|
85
|
+
it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
|
86
|
+
try {
|
87
|
+
new LobeSparkAI({});
|
88
|
+
} catch (e) {
|
89
|
+
expect(e).toEqual({ errorType: invalidErrorType });
|
90
|
+
}
|
91
|
+
});
|
92
|
+
|
93
|
+
it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
94
|
+
// Arrange
|
95
|
+
const errorInfo = {
|
96
|
+
stack: 'abc',
|
97
|
+
cause: {
|
98
|
+
message: 'api is undefined',
|
99
|
+
},
|
100
|
+
};
|
101
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
102
|
+
|
103
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
104
|
+
|
105
|
+
// Act
|
106
|
+
try {
|
107
|
+
await instance.chat({
|
108
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
109
|
+
model: 'general',
|
110
|
+
temperature: 0,
|
111
|
+
});
|
112
|
+
} catch (e) {
|
113
|
+
expect(e).toEqual({
|
114
|
+
endpoint: defaultBaseURL,
|
115
|
+
error: {
|
116
|
+
cause: { message: 'api is undefined' },
|
117
|
+
stack: 'abc',
|
118
|
+
},
|
119
|
+
errorType: bizErrorType,
|
120
|
+
provider,
|
121
|
+
});
|
122
|
+
}
|
123
|
+
});
|
124
|
+
|
125
|
+
it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
|
126
|
+
// Arrange
|
127
|
+
const errorInfo = {
|
128
|
+
stack: 'abc',
|
129
|
+
cause: { message: 'api is undefined' },
|
130
|
+
};
|
131
|
+
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
132
|
+
|
133
|
+
instance = new LobeSparkAI({
|
134
|
+
apiKey: 'test',
|
135
|
+
|
136
|
+
baseURL: 'https://api.abc.com/v1',
|
137
|
+
});
|
138
|
+
|
139
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
140
|
+
|
141
|
+
// Act
|
142
|
+
try {
|
143
|
+
await instance.chat({
|
144
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
145
|
+
model: 'general',
|
146
|
+
temperature: 0,
|
147
|
+
});
|
148
|
+
} catch (e) {
|
149
|
+
expect(e).toEqual({
|
150
|
+
endpoint: 'https://api.***.com/v1',
|
151
|
+
error: {
|
152
|
+
cause: { message: 'api is undefined' },
|
153
|
+
stack: 'abc',
|
154
|
+
},
|
155
|
+
errorType: bizErrorType,
|
156
|
+
provider,
|
157
|
+
});
|
158
|
+
}
|
159
|
+
});
|
160
|
+
|
161
|
+
it('should throw an InvalidSparkAPIKey error type on 401 status code', async () => {
|
162
|
+
// Mock the API call to simulate a 401 error
|
163
|
+
const error = new Error('Unauthorized') as any;
|
164
|
+
error.status = 401;
|
165
|
+
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
166
|
+
|
167
|
+
try {
|
168
|
+
await instance.chat({
|
169
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
170
|
+
model: 'general',
|
171
|
+
temperature: 0,
|
172
|
+
});
|
173
|
+
} catch (e) {
|
174
|
+
// Expect the chat method to throw an error with InvalidSparkAPIKey
|
175
|
+
expect(e).toEqual({
|
176
|
+
endpoint: defaultBaseURL,
|
177
|
+
error: new Error('Unauthorized'),
|
178
|
+
errorType: invalidErrorType,
|
179
|
+
provider,
|
180
|
+
});
|
181
|
+
}
|
182
|
+
});
|
183
|
+
|
184
|
+
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
185
|
+
// Arrange
|
186
|
+
const genericError = new Error('Generic Error');
|
187
|
+
|
188
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
189
|
+
|
190
|
+
// Act
|
191
|
+
try {
|
192
|
+
await instance.chat({
|
193
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
194
|
+
model: 'general',
|
195
|
+
temperature: 0,
|
196
|
+
});
|
197
|
+
} catch (e) {
|
198
|
+
expect(e).toEqual({
|
199
|
+
endpoint: defaultBaseURL,
|
200
|
+
errorType: 'AgentRuntimeError',
|
201
|
+
provider,
|
202
|
+
error: {
|
203
|
+
name: genericError.name,
|
204
|
+
cause: genericError.cause,
|
205
|
+
message: genericError.message,
|
206
|
+
stack: genericError.stack,
|
207
|
+
},
|
208
|
+
});
|
209
|
+
}
|
210
|
+
});
|
211
|
+
});
|
212
|
+
|
213
|
+
describe('DEBUG', () => {
|
214
|
+
it('should call debugStream and return StreamingTextResponse when DEBUG_SPARK_CHAT_COMPLETION is 1', async () => {
|
215
|
+
// Arrange
|
216
|
+
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
217
|
+
const mockDebugStream = new ReadableStream({
|
218
|
+
start(controller) {
|
219
|
+
controller.enqueue('Debug stream content');
|
220
|
+
controller.close();
|
221
|
+
},
|
222
|
+
}) as any;
|
223
|
+
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
224
|
+
|
225
|
+
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
226
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
227
|
+
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
228
|
+
});
|
229
|
+
|
230
|
+
// 保存原始环境变量值
|
231
|
+
const originalDebugValue = process.env.DEBUG_SPARK_CHAT_COMPLETION;
|
232
|
+
|
233
|
+
// 模拟环境变量
|
234
|
+
process.env.DEBUG_SPARK_CHAT_COMPLETION = '1';
|
235
|
+
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
236
|
+
|
237
|
+
// 执行测试
|
238
|
+
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
239
|
+
// 假设的测试函数调用,你可能需要根据实际情况调整
|
240
|
+
await instance.chat({
|
241
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
242
|
+
model: 'general',
|
243
|
+
stream: true,
|
244
|
+
temperature: 0,
|
245
|
+
});
|
246
|
+
|
247
|
+
// 验证 debugStream 被调用
|
248
|
+
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
249
|
+
|
250
|
+
// 恢复原始环境变量值
|
251
|
+
process.env.DEBUG_SPARK_CHAT_COMPLETION = originalDebugValue;
|
252
|
+
});
|
253
|
+
});
|
254
|
+
});
|
255
|
+
});
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
+
|
4
|
+
export const LobeSparkAI = LobeOpenAICompatibleFactory({
|
5
|
+
baseURL: 'https://spark-api-open.xf-yun.com/v1',
|
6
|
+
chatCompletion: {
|
7
|
+
noUserId: true,
|
8
|
+
},
|
9
|
+
debug: {
|
10
|
+
chatCompletion: () => process.env.DEBUG_SPARK_CHAT_COMPLETION === '1',
|
11
|
+
},
|
12
|
+
provider: ModelProvider.Spark,
|
13
|
+
});
|
@@ -28,6 +28,7 @@ export enum ModelProvider {
|
|
28
28
|
Baichuan = 'baichuan',
|
29
29
|
Bedrock = 'bedrock',
|
30
30
|
DeepSeek = 'deepseek',
|
31
|
+
FireworksAI = 'fireworksai',
|
31
32
|
Google = 'google',
|
32
33
|
Groq = 'groq',
|
33
34
|
Minimax = 'minimax',
|
@@ -40,6 +41,7 @@ export enum ModelProvider {
|
|
40
41
|
Perplexity = 'perplexity',
|
41
42
|
Qwen = 'qwen',
|
42
43
|
SiliconCloud = 'siliconcloud',
|
44
|
+
Spark = 'spark',
|
43
45
|
Stepfun = 'stepfun',
|
44
46
|
Taichu = 'taichu',
|
45
47
|
TogetherAI = 'togetherai',
|
@@ -287,7 +287,7 @@ describe('OpenAIStream', () => {
|
|
287
287
|
expect(chunks).toEqual([
|
288
288
|
'id: 2\n',
|
289
289
|
'event: tool_calls\n',
|
290
|
-
`data: [{"function":{"
|
290
|
+
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
291
291
|
]);
|
292
292
|
|
293
293
|
expect(onToolCallMock).toHaveBeenCalledTimes(1);
|
@@ -334,7 +334,7 @@ describe('OpenAIStream', () => {
|
|
334
334
|
expect(chunks).toEqual([
|
335
335
|
'id: 5\n',
|
336
336
|
'event: tool_calls\n',
|
337
|
-
`data: [{"function":{"
|
337
|
+
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
338
338
|
]);
|
339
339
|
});
|
340
340
|
|
@@ -428,7 +428,7 @@ describe('OpenAIStream', () => {
|
|
428
428
|
`data: [{"function":{"arguments":"","name":"realtime-weather____fetchCurrentWeather"},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
429
429
|
'id: 1',
|
430
430
|
'event: tool_calls',
|
431
|
-
`data: [{"function":{"arguments":"{\\"city\\": \\"杭州\\"}"},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
431
|
+
`data: [{"function":{"arguments":"{\\"city\\": \\"杭州\\"}","name":null},"id":"toolu_01VQtK4W9kqxGGLHgsPPxiBj","index":0,"type":"function"}]\n`,
|
432
432
|
'id: 1',
|
433
433
|
'event: stop',
|
434
434
|
`data: "tool_calls"\n`,
|
@@ -35,7 +35,10 @@ export const transformOpenAIStream = (
|
|
35
35
|
}
|
36
36
|
|
37
37
|
return {
|
38
|
-
function:
|
38
|
+
function: {
|
39
|
+
arguments: value.function?.arguments ?? '{}',
|
40
|
+
name: value.function?.name ?? null,
|
41
|
+
},
|
39
42
|
id: value.id || stack?.tool?.id || generateToolCallId(index, value.function?.name),
|
40
43
|
|
41
44
|
// mistral's tool calling don't have index and function field, it's data like:
|
@@ -5,6 +5,7 @@ import { langfuseEnv } from '@/config/langfuse';
|
|
5
5
|
import { getLLMConfig } from '@/config/llm';
|
6
6
|
import {
|
7
7
|
BedrockProviderCard,
|
8
|
+
FireworksAIProviderCard,
|
8
9
|
GroqProviderCard,
|
9
10
|
NovitaProviderCard,
|
10
11
|
OllamaProviderCard,
|
@@ -63,7 +64,9 @@ export const getServerGlobalConfig = () => {
|
|
63
64
|
SILICONCLOUD_MODEL_LIST,
|
64
65
|
|
65
66
|
ENABLED_UPSTAGE,
|
66
|
-
|
67
|
+
|
68
|
+
ENABLED_SPARK,
|
69
|
+
|
67
70
|
ENABLED_AZURE_OPENAI,
|
68
71
|
AZURE_MODEL_LIST,
|
69
72
|
|
@@ -79,6 +82,9 @@ export const getServerGlobalConfig = () => {
|
|
79
82
|
|
80
83
|
ENABLED_TOGETHERAI,
|
81
84
|
TOGETHERAI_MODEL_LIST,
|
85
|
+
|
86
|
+
ENABLED_FIREWORKSAI,
|
87
|
+
FIREWORKSAI_MODEL_LIST,
|
82
88
|
} = getLLMConfig();
|
83
89
|
|
84
90
|
const config: GlobalServerConfig = {
|
@@ -112,6 +118,16 @@ export const getServerGlobalConfig = () => {
|
|
112
118
|
}),
|
113
119
|
},
|
114
120
|
deepseek: { enabled: ENABLED_DEEPSEEK },
|
121
|
+
|
122
|
+
fireworksai: {
|
123
|
+
enabled: ENABLED_FIREWORKSAI,
|
124
|
+
enabledModels: extractEnabledModels(FIREWORKSAI_MODEL_LIST),
|
125
|
+
serverModelCards: transformToChatModelCards({
|
126
|
+
defaultChatModels: FireworksAIProviderCard.chatModels,
|
127
|
+
modelString: FIREWORKSAI_MODEL_LIST,
|
128
|
+
}),
|
129
|
+
},
|
130
|
+
|
115
131
|
google: { enabled: ENABLED_GOOGLE },
|
116
132
|
groq: {
|
117
133
|
enabled: ENABLED_GROQ,
|
@@ -174,6 +190,7 @@ export const getServerGlobalConfig = () => {
|
|
174
190
|
modelString: SILICONCLOUD_MODEL_LIST,
|
175
191
|
}),
|
176
192
|
},
|
193
|
+
spark: { enabled: ENABLED_SPARK },
|
177
194
|
stepfun: { enabled: ENABLED_STEPFUN },
|
178
195
|
|
179
196
|
taichu: { enabled: ENABLED_TAICHU },
|
@@ -22,6 +22,7 @@ export interface UserKeyVaults {
|
|
22
22
|
baichuan?: OpenAICompatibleKeyVault;
|
23
23
|
bedrock?: AWSBedrockKeyVault;
|
24
24
|
deepseek?: OpenAICompatibleKeyVault;
|
25
|
+
fireworksai?: OpenAICompatibleKeyVault;
|
25
26
|
google?: OpenAICompatibleKeyVault;
|
26
27
|
groq?: OpenAICompatibleKeyVault;
|
27
28
|
lobehub?: any;
|
@@ -36,6 +37,7 @@ export interface UserKeyVaults {
|
|
36
37
|
perplexity?: OpenAICompatibleKeyVault;
|
37
38
|
qwen?: OpenAICompatibleKeyVault;
|
38
39
|
siliconcloud?: OpenAICompatibleKeyVault;
|
40
|
+
spark?: OpenAICompatibleKeyVault;
|
39
41
|
stepfun?: OpenAICompatibleKeyVault;
|
40
42
|
taichu?: OpenAICompatibleKeyVault;
|
41
43
|
togetherai?: OpenAICompatibleKeyVault;
|