@lobehub/chat 1.31.3 → 1.31.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/package.json +1 -1
- package/src/config/modelProviders/baichuan.ts +30 -2
- package/src/config/modelProviders/deepseek.ts +1 -1
- package/src/config/modelProviders/index.ts +1 -1
- package/src/config/modelProviders/qwen.ts +19 -19
- package/src/config/modelProviders/siliconcloud.ts +16 -4
- package/src/config/modelProviders/wenxin.ts +13 -0
- package/src/libs/agent-runtime/ai360/index.test.ts +1 -1
- package/src/libs/agent-runtime/ai360/index.ts +1 -1
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.31.5](https://github.com/lobehub/lobe-chat/compare/v1.31.4...v1.31.5)
|
6
|
+
|
7
|
+
<sup>Released on **2024-11-12**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Update some provider modellist & fix ai360 baseurl.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Update some provider modellist & fix ai360 baseurl, closes [#4679](https://github.com/lobehub/lobe-chat/issues/4679) ([1b19fae](https://github.com/lobehub/lobe-chat/commit/1b19fae))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.31.4](https://github.com/lobehub/lobe-chat/compare/v1.31.3...v1.31.4)
|
31
|
+
|
32
|
+
<sup>Released on **2024-11-12**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Fix Cloudflare Workers AI Sort.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Fix Cloudflare Workers AI Sort, closes [#4672](https://github.com/lobehub/lobe-chat/issues/4672) ([353ba70](https://github.com/lobehub/lobe-chat/commit/353ba70))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.31.3](https://github.com/lobehub/lobe-chat/compare/v1.31.2...v1.31.3)
|
6
56
|
|
7
57
|
<sup>Released on **2024-11-12**</sup>
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.31.
|
3
|
+
"version": "1.31.5",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -18,11 +18,40 @@ const Baichuan: ModelProviderCard = {
|
|
18
18
|
},
|
19
19
|
tokens: 32_768,
|
20
20
|
},
|
21
|
+
{
|
22
|
+
description:
|
23
|
+
'模型能力国内第一,在知识百科、长文本、生成创作等中文任务上超越国外主流模型。还具备行业领先的多模态能力,多项权威评测基准表现优异。',
|
24
|
+
displayName: 'Baichuan 4 Turbo',
|
25
|
+
enabled: true,
|
26
|
+
functionCall: true,
|
27
|
+
id: 'Baichuan4-Turbo',
|
28
|
+
maxOutput: 4096,
|
29
|
+
pricing: {
|
30
|
+
currency: 'CNY',
|
31
|
+
input: 15,
|
32
|
+
output: 15,
|
33
|
+
},
|
34
|
+
tokens: 32_768,
|
35
|
+
},
|
36
|
+
{
|
37
|
+
description:
|
38
|
+
'模型能力国内第一,在知识百科、长文本、生成创作等中文任务上超越国外主流模型。还具备行业领先的多模态能力,多项权威评测基准表现优异。',
|
39
|
+
displayName: 'Baichuan 4 Air',
|
40
|
+
enabled: true,
|
41
|
+
functionCall: true,
|
42
|
+
id: 'Baichuan4-Air',
|
43
|
+
maxOutput: 4096,
|
44
|
+
pricing: {
|
45
|
+
currency: 'CNY',
|
46
|
+
input: 0.98,
|
47
|
+
output: 0.98,
|
48
|
+
},
|
49
|
+
tokens: 32_768,
|
50
|
+
},
|
21
51
|
{
|
22
52
|
description:
|
23
53
|
'针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
|
24
54
|
displayName: 'Baichuan 3 Turbo',
|
25
|
-
enabled: true,
|
26
55
|
functionCall: true,
|
27
56
|
id: 'Baichuan3-Turbo',
|
28
57
|
maxOutput: 8192,
|
@@ -37,7 +66,6 @@ const Baichuan: ModelProviderCard = {
|
|
37
66
|
description:
|
38
67
|
'具备 128K 超长上下文窗口,针对企业高频场景优化,效果大幅提升,高性价比。相对于Baichuan2模型,内容创作提升20%,知识问答提升17%, 角色扮演能力提升40%。整体效果比GPT3.5更优。',
|
39
68
|
displayName: 'Baichuan 3 Turbo 128k',
|
40
|
-
enabled: true,
|
41
69
|
id: 'Baichuan3-Turbo-128k',
|
42
70
|
maxOutput: 4096,
|
43
71
|
pricing: {
|
@@ -81,6 +81,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
81
81
|
DeepSeekProvider,
|
82
82
|
HuggingFaceProvider,
|
83
83
|
OpenRouterProvider,
|
84
|
+
CloudflareProvider,
|
84
85
|
GithubProvider,
|
85
86
|
NovitaProvider,
|
86
87
|
TogetherAIProvider,
|
@@ -104,7 +105,6 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
104
105
|
MinimaxProvider,
|
105
106
|
Ai360Provider,
|
106
107
|
TaichuProvider,
|
107
|
-
CloudflareProvider,
|
108
108
|
SiliconCloudProvider,
|
109
109
|
];
|
110
110
|
|
@@ -118,6 +118,17 @@ const Qwen: ModelProviderCard = {
|
|
118
118
|
},
|
119
119
|
tokens: 131_072,
|
120
120
|
},
|
121
|
+
{
|
122
|
+
description: '通义千问代码模型。',
|
123
|
+
displayName: 'Qwen Coder Plus',
|
124
|
+
id: 'qwen-coder-plus-latest',
|
125
|
+
pricing: {
|
126
|
+
currency: 'CNY',
|
127
|
+
input: 3.5,
|
128
|
+
output: 7,
|
129
|
+
},
|
130
|
+
tokens: 131_072,
|
131
|
+
},
|
121
132
|
{
|
122
133
|
description: '通义千问2.5对外开源的7B规模的模型。',
|
123
134
|
displayName: 'Qwen2.5 7B',
|
@@ -166,17 +177,6 @@ const Qwen: ModelProviderCard = {
|
|
166
177
|
},
|
167
178
|
tokens: 131_072,
|
168
179
|
},
|
169
|
-
{
|
170
|
-
description: 'Qwen-Math 模型具有强大的数学解题能力。',
|
171
|
-
displayName: 'Qwen2.5 Math 1.5B',
|
172
|
-
id: 'qwen2.5-math-1.5b-instruct',
|
173
|
-
pricing: {
|
174
|
-
currency: 'CNY',
|
175
|
-
input: 0,
|
176
|
-
output: 0,
|
177
|
-
},
|
178
|
-
tokens: 4096,
|
179
|
-
},
|
180
180
|
{
|
181
181
|
description: 'Qwen-Math 模型具有强大的数学解题能力。',
|
182
182
|
displayName: 'Qwen2.5 Math 7B',
|
@@ -201,23 +201,23 @@ const Qwen: ModelProviderCard = {
|
|
201
201
|
},
|
202
202
|
{
|
203
203
|
description: '通义千问代码模型开源版。',
|
204
|
-
displayName: 'Qwen2.5 Coder
|
205
|
-
id: 'qwen2.5-coder-
|
204
|
+
displayName: 'Qwen2.5 Coder 7B',
|
205
|
+
id: 'qwen2.5-coder-7b-instruct',
|
206
206
|
pricing: {
|
207
207
|
currency: 'CNY',
|
208
|
-
input:
|
209
|
-
output:
|
208
|
+
input: 1,
|
209
|
+
output: 2,
|
210
210
|
},
|
211
211
|
tokens: 131_072,
|
212
212
|
},
|
213
213
|
{
|
214
214
|
description: '通义千问代码模型开源版。',
|
215
|
-
displayName: 'Qwen2.5 Coder
|
216
|
-
id: 'qwen2.5-coder-
|
215
|
+
displayName: 'Qwen2.5 Coder 32B',
|
216
|
+
id: 'qwen2.5-coder-32b-instruct',
|
217
217
|
pricing: {
|
218
218
|
currency: 'CNY',
|
219
|
-
input:
|
220
|
-
output:
|
219
|
+
input: 3.5,
|
220
|
+
output: 7,
|
221
221
|
},
|
222
222
|
tokens: 131_072,
|
223
223
|
},
|
@@ -3,6 +3,18 @@ import { ModelProviderCard } from '@/types/llm';
|
|
3
3
|
// ref :https://siliconflow.cn/zh-cn/pricing
|
4
4
|
const SiliconCloud: ModelProviderCard = {
|
5
5
|
chatModels: [
|
6
|
+
{
|
7
|
+
description: 'Hunyuan-Large 是业界最大的开源 Transformer 架构 MoE 模型,拥有 3890 亿总参数量和 520 亿激活参数量。',
|
8
|
+
displayName: 'Hunyuan Large',
|
9
|
+
enabled: true,
|
10
|
+
id: 'Tencent/Hunyuan-A52B-Instruct',
|
11
|
+
pricing: {
|
12
|
+
currency: 'CNY',
|
13
|
+
input: 21,
|
14
|
+
output: 21,
|
15
|
+
},
|
16
|
+
tokens: 32_768,
|
17
|
+
},
|
6
18
|
{
|
7
19
|
description: 'DeepSeek V2.5 集合了先前版本的优秀特征,增强了通用和编码能力。',
|
8
20
|
displayName: 'DeepSeek V2.5',
|
@@ -105,12 +117,12 @@ const SiliconCloud: ModelProviderCard = {
|
|
105
117
|
},
|
106
118
|
{
|
107
119
|
description: 'Qwen2.5-Coder 专注于代码编写。',
|
108
|
-
displayName: 'Qwen2.5 Coder
|
109
|
-
id: 'Qwen/Qwen2.5-Coder-
|
120
|
+
displayName: 'Qwen2.5 Coder 32B',
|
121
|
+
id: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
110
122
|
pricing: {
|
111
123
|
currency: 'CNY',
|
112
|
-
input:
|
113
|
-
output:
|
124
|
+
input: 1.26,
|
125
|
+
output: 1.26,
|
114
126
|
},
|
115
127
|
tokens: 32_768,
|
116
128
|
},
|
@@ -79,6 +79,19 @@ const BaiduWenxin: ModelProviderCard = {
|
|
79
79
|
},
|
80
80
|
tokens: 8192,
|
81
81
|
},
|
82
|
+
{
|
83
|
+
description:
|
84
|
+
'百度自研的旗舰级超大规模⼤语⾔模型,综合效果表现出色,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。相较于ERNIE 4.0在性能表现上更优秀',
|
85
|
+
displayName: 'ERNIE 4.0 Turbo 128K',
|
86
|
+
enabled: true,
|
87
|
+
id: 'ERNIE-4.0-Turbo-128K',
|
88
|
+
pricing: {
|
89
|
+
currency: 'CNY',
|
90
|
+
input: 20,
|
91
|
+
output: 60,
|
92
|
+
},
|
93
|
+
tokens: 128_000,
|
94
|
+
},
|
82
95
|
{
|
83
96
|
description:
|
84
97
|
'百度自研的旗舰级超大规模⼤语⾔模型,综合效果表现出色,广泛适用于各领域复杂任务场景;支持自动对接百度搜索插件,保障问答信息时效。相较于ERNIE 4.0在性能表现上更优秀',
|
@@ -12,7 +12,7 @@ import * as debugStreamModule from '../utils/debugStream';
|
|
12
12
|
import { LobeAi360AI } from './index';
|
13
13
|
|
14
14
|
const provider = ModelProvider.Ai360;
|
15
|
-
const defaultBaseURL = 'https://
|
15
|
+
const defaultBaseURL = 'https://api.360.cn/v1';
|
16
16
|
|
17
17
|
const bizErrorType = 'ProviderBizError';
|
18
18
|
const invalidErrorType = 'InvalidProviderAPIKey';
|
@@ -2,7 +2,7 @@ import { ModelProvider } from '../types';
|
|
2
2
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
3
|
|
4
4
|
export const LobeAi360AI = LobeOpenAICompatibleFactory({
|
5
|
-
baseURL: 'https://
|
5
|
+
baseURL: 'https://api.360.cn/v1',
|
6
6
|
chatCompletion: {
|
7
7
|
handlePayload: (payload) => {
|
8
8
|
return {
|