@lobehub/chat 1.49.8 → 1.49.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.49.10](https://github.com/lobehub/lobe-chat/compare/v1.49.9...v1.49.10)
|
6
|
+
|
7
|
+
<sup>Released on **2025-02-02**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Fix `<think>` tag crash with special markdown content.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Fix `<think>` tag crash with special markdown content, closes [#5670](https://github.com/lobehub/lobe-chat/issues/5670) ([b719522](https://github.com/lobehub/lobe-chat/commit/b719522))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.49.9](https://github.com/lobehub/lobe-chat/compare/v1.49.8...v1.49.9)
|
31
|
+
|
32
|
+
<sup>Released on **2025-02-01**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Update siliconcloud models.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Update siliconcloud models, closes [#5647](https://github.com/lobehub/lobe-chat/issues/5647) ([4b41ad4](https://github.com/lobehub/lobe-chat/commit/4b41ad4))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.49.8](https://github.com/lobehub/lobe-chat/compare/v1.49.7...v1.49.8)
|
6
56
|
|
7
57
|
<sup>Released on **2025-02-01**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"fixes": [
|
5
|
+
"Fix <think> tag crash with special markdown content."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-02-02",
|
9
|
+
"version": "1.49.10"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Update siliconcloud models."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-02-01",
|
18
|
+
"version": "1.49.9"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.49.
|
3
|
+
"version": "1.49.10",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,20 +1,86 @@
|
|
1
1
|
import { AIChatModelCard } from '@/types/aiModel';
|
2
2
|
|
3
3
|
const siliconcloudChatModels: AIChatModelCard[] = [
|
4
|
+
{
|
5
|
+
abilities: {
|
6
|
+
reasoning: true,
|
7
|
+
},
|
8
|
+
contextWindowTokens: 65_536,
|
9
|
+
description:
|
10
|
+
'DeepSeek-R1 是一款强化学习(RL)驱动的推理模型,解决了模型中的重复性和可读性问题。在 RL 之前,DeepSeek-R1 引入了冷启动数据,进一步优化了推理性能。它在数学、代码和推理任务中与 OpenAI-o1 表现相当,并且通过精心设计的训练方法,提升了整体效果。',
|
11
|
+
displayName: 'DeepSeek R1',
|
12
|
+
enabled: true,
|
13
|
+
id: 'deepseek-ai/DeepSeek-R1',
|
14
|
+
pricing: {
|
15
|
+
currency: 'CNY',
|
16
|
+
input: 4,
|
17
|
+
output: 16,
|
18
|
+
},
|
19
|
+
type: 'chat',
|
20
|
+
},
|
21
|
+
{
|
22
|
+
abilities: {
|
23
|
+
functionCall: true,
|
24
|
+
},
|
25
|
+
contextWindowTokens: 65_536,
|
26
|
+
description:
|
27
|
+
'DeepSeek-V3 是一款拥有 6710 亿参数的混合专家(MoE)语言模型,采用多头潜在注意力(MLA)和 DeepSeekMoE 架构,结合无辅助损失的负载平衡策略,优化推理和训练效率。通过在 14.8 万亿高质量tokens上预训练,并进行监督微调和强化学习,DeepSeek-V3 在性能上超越其他开源模型,接近领先闭源模型。',
|
28
|
+
displayName: 'DeepSeek V3',
|
29
|
+
enabled: true,
|
30
|
+
id: 'deepseek-ai/DeepSeek-V3',
|
31
|
+
pricing: { // 2.9 涨价
|
32
|
+
currency: 'CNY',
|
33
|
+
input: 1,
|
34
|
+
output: 2,
|
35
|
+
},
|
36
|
+
type: 'chat',
|
37
|
+
},
|
4
38
|
{
|
5
39
|
abilities: {
|
6
40
|
functionCall: true,
|
7
41
|
},
|
8
42
|
contextWindowTokens: 32_768,
|
9
43
|
description:
|
10
|
-
'DeepSeek-V2.5 是 DeepSeek-V2-Chat 和 DeepSeek-Coder-V2-Instruct 的升级版本,集成了两个先前版本的通用和编码能力。该模型在多个方面进行了优化,包括写作和指令跟随能力,更好地与人类偏好保持一致。DeepSeek-V2.5 在各种评估基准上都取得了显著的提升,如 AlpacaEval 2.0、ArenaHard、AlignBench 和 MT-Bench
|
44
|
+
'DeepSeek-V2.5 是 DeepSeek-V2-Chat 和 DeepSeek-Coder-V2-Instruct 的升级版本,集成了两个先前版本的通用和编码能力。该模型在多个方面进行了优化,包括写作和指令跟随能力,更好地与人类偏好保持一致。DeepSeek-V2.5 在各种评估基准上都取得了显著的提升,如 AlpacaEval 2.0、ArenaHard、AlignBench 和 MT-Bench 等。',
|
11
45
|
displayName: 'DeepSeek V2.5',
|
12
|
-
enabled: true,
|
13
46
|
id: 'deepseek-ai/DeepSeek-V2.5',
|
14
47
|
pricing: {
|
15
48
|
currency: 'CNY',
|
16
|
-
input:
|
17
|
-
output:
|
49
|
+
input: 5,
|
50
|
+
output: 5,
|
51
|
+
},
|
52
|
+
type: 'chat',
|
53
|
+
},
|
54
|
+
{
|
55
|
+
abilities: {
|
56
|
+
vision: true,
|
57
|
+
},
|
58
|
+
contextWindowTokens: 4096,
|
59
|
+
description:
|
60
|
+
'DeepSeek-VL2 是一个基于 DeepSeekMoE-27B 开发的混合专家(MoE)视觉语言模型,采用稀疏激活的 MoE 架构,在仅激活 4.5B 参数的情况下实现了卓越性能。该模型在视觉问答、光学字符识别、文档/表格/图表理解和视觉定位等多个任务中表现优异。',
|
61
|
+
displayName: 'DeepSeek VL2',
|
62
|
+
id: 'deepseek-ai/deepseek-vl2',
|
63
|
+
pricing: {
|
64
|
+
currency: 'CNY',
|
65
|
+
input: 0.99,
|
66
|
+
output: 0.99,
|
67
|
+
},
|
68
|
+
type: 'chat',
|
69
|
+
},
|
70
|
+
{
|
71
|
+
abilities: {
|
72
|
+
vision: true,
|
73
|
+
},
|
74
|
+
contextWindowTokens: 32_768,
|
75
|
+
description:
|
76
|
+
'QVQ-72B-Preview 是由 Qwen 团队开发的专注于视觉推理能力的研究型模型,其在复杂场景理解和解决视觉相关的数学问题方面具有独特优势。',
|
77
|
+
displayName: 'QVQ 72B Preview',
|
78
|
+
enabled: true,
|
79
|
+
id: 'Qwen/QVQ-72B-Preview',
|
80
|
+
pricing: {
|
81
|
+
currency: 'CNY',
|
82
|
+
input: 9.9,
|
83
|
+
output: 9.9,
|
18
84
|
},
|
19
85
|
type: 'chat',
|
20
86
|
},
|
@@ -477,7 +543,6 @@ const siliconcloudChatModels: AIChatModelCard[] = [
|
|
477
543
|
description:
|
478
544
|
'Gemma 是由 Google 开发的轻量级、最先进的开放模型系列,采用与 Gemini 模型相同的研究和技术构建。这些模型是仅解码器的大型语言模型,支持英语,提供预训练和指令微调两种变体的开放权重。Gemma 模型适用于各种文本生成任务,包括问答、摘要和推理。其相对较小的规模使其能够部署在资源有限的环境中,如笔记本电脑、台式机或个人云基础设施,从而让所有人都能获得最先进的 AI 模型,促进创新',
|
479
545
|
displayName: 'Gemma 2 27B',
|
480
|
-
enabled: true,
|
481
546
|
id: 'google/gemma-2-27b-it',
|
482
547
|
pricing: {
|
483
548
|
currency: 'CNY',
|
@@ -524,7 +589,6 @@ const siliconcloudChatModels: AIChatModelCard[] = [
|
|
524
589
|
description:
|
525
590
|
'Meta Llama 3.1 是由 Meta 开发的多语言大型语言模型家族,包括 8B、70B 和 405B 三种参数规模的预训练和指令微调变体。该 70B 指令微调模型针对多语言对话场景进行了优化,在多项行业基准测试中表现优异。模型训练使用了超过 15 万亿个 tokens 的公开数据,并采用了监督微调和人类反馈强化学习等技术来提升模型的有用性和安全性。Llama 3.1 支持文本生成和代码生成,知识截止日期为 2023 年 12 月',
|
526
591
|
displayName: 'Llama 3.1 70B Instruct',
|
527
|
-
enabled: true,
|
528
592
|
id: 'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
529
593
|
pricing: {
|
530
594
|
currency: 'CNY',
|
@@ -3,6 +3,33 @@ import { ModelProviderCard } from '@/types/llm';
|
|
3
3
|
// ref :https://siliconflow.cn/zh-cn/pricing
|
4
4
|
const SiliconCloud: ModelProviderCard = {
|
5
5
|
chatModels: [
|
6
|
+
{
|
7
|
+
contextWindowTokens: 65_536,
|
8
|
+
description:
|
9
|
+
'DeepSeek-R1 是一款强化学习(RL)驱动的推理模型,解决了模型中的重复性和可读性问题。在 RL 之前,DeepSeek-R1 引入了冷启动数据,进一步优化了推理性能。它在数学、代码和推理任务中与 OpenAI-o1 表现相当,并且通过精心设计的训练方法,提升了整体效果。',
|
10
|
+
displayName: 'DeepSeek R1',
|
11
|
+
enabled: true,
|
12
|
+
id: 'deepseek-ai/DeepSeek-R1',
|
13
|
+
pricing: {
|
14
|
+
currency: 'CNY',
|
15
|
+
input: 4,
|
16
|
+
output: 16,
|
17
|
+
},
|
18
|
+
},
|
19
|
+
{
|
20
|
+
contextWindowTokens: 65_536,
|
21
|
+
description:
|
22
|
+
'DeepSeek-V3 是一款拥有 6710 亿参数的混合专家(MoE)语言模型,采用多头潜在注意力(MLA)和 DeepSeekMoE 架构,结合无辅助损失的负载平衡策略,优化推理和训练效率。通过在 14.8 万亿高质量tokens上预训练,并进行监督微调和强化学习,DeepSeek-V3 在性能上超越其他开源模型,接近领先闭源模型。',
|
23
|
+
displayName: 'DeepSeek V3',
|
24
|
+
enabled: true,
|
25
|
+
functionCall: true,
|
26
|
+
id: 'deepseek-ai/DeepSeek-V3',
|
27
|
+
pricing: { // 2.9 涨价
|
28
|
+
currency: 'CNY',
|
29
|
+
input: 1,
|
30
|
+
output: 2,
|
31
|
+
},
|
32
|
+
},
|
6
33
|
{
|
7
34
|
contextWindowTokens: 32_768,
|
8
35
|
description:
|
@@ -13,10 +40,37 @@ const SiliconCloud: ModelProviderCard = {
|
|
13
40
|
id: 'deepseek-ai/DeepSeek-V2.5',
|
14
41
|
pricing: {
|
15
42
|
currency: 'CNY',
|
16
|
-
input:
|
17
|
-
output:
|
43
|
+
input: 5,
|
44
|
+
output: 5,
|
18
45
|
},
|
19
46
|
},
|
47
|
+
{
|
48
|
+
contextWindowTokens: 4096,
|
49
|
+
description:
|
50
|
+
'DeepSeek-VL2 是一个基于 DeepSeekMoE-27B 开发的混合专家(MoE)视觉语言模型,采用稀疏激活的 MoE 架构,在仅激活 4.5B 参数的情况下实现了卓越性能。该模型在视觉问答、光学字符识别、文档/表格/图表理解和视觉定位等多个任务中表现优异。',
|
51
|
+
displayName: 'DeepSeek VL2',
|
52
|
+
id: 'deepseek-ai/deepseek-vl2',
|
53
|
+
pricing: {
|
54
|
+
currency: 'CNY',
|
55
|
+
input: 0.99,
|
56
|
+
output: 0.99,
|
57
|
+
},
|
58
|
+
vision: true,
|
59
|
+
},
|
60
|
+
{
|
61
|
+
contextWindowTokens: 32_768,
|
62
|
+
description:
|
63
|
+
'QVQ-72B-Preview 是由 Qwen 团队开发的专注于视觉推理能力的研究型模型,其在复杂场景理解和解决视觉相关的数学问题方面具有独特优势。',
|
64
|
+
displayName: 'QVQ 72B Preview',
|
65
|
+
enabled: true,
|
66
|
+
id: 'Qwen/QVQ-72B-Preview',
|
67
|
+
pricing: {
|
68
|
+
currency: 'CNY',
|
69
|
+
input: 9.9,
|
70
|
+
output: 9.9,
|
71
|
+
},
|
72
|
+
vision: true,
|
73
|
+
},
|
20
74
|
{
|
21
75
|
contextWindowTokens: 32_768,
|
22
76
|
description:
|
@@ -32,7 +32,14 @@ export const createRemarkCustomTagPlugin = (tag: string) => () => {
|
|
32
32
|
|
33
33
|
// 转换为 Markdown 字符串
|
34
34
|
const content = contentNodes
|
35
|
-
.map((n: any) =>
|
35
|
+
.map((n: any) => {
|
36
|
+
// fix https://github.com/lobehub/lobe-chat/issues/5668
|
37
|
+
if (n.type === 'paragraph') {
|
38
|
+
return n.children.map((child: any) => child.value).join('');
|
39
|
+
}
|
40
|
+
|
41
|
+
return toMarkdown(n);
|
42
|
+
})
|
36
43
|
.join('\n\n')
|
37
44
|
.trim();
|
38
45
|
|