@lobehub/chat 1.16.12 → 1.16.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.16.13](https://github.com/lobehub/lobe-chat/compare/v1.16.12...v1.16.13)
6
+
7
+ <sup>Released on **2024-09-13**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update siliconcloud model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update siliconcloud model, closes [#3935](https://github.com/lobehub/lobe-chat/issues/3935) ([882e981](https://github.com/lobehub/lobe-chat/commit/882e981))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.16.12](https://github.com/lobehub/lobe-chat/compare/v1.16.11...v1.16.12)
6
31
 
7
32
  <sup>Released on **2024-09-12**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.16.12",
3
+ "version": "1.16.13",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,6 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref :https://siliconflow.cn/zh-cn/models
3
+ // ref :https://siliconflow.cn/zh-cn/pricing
4
4
  const SiliconCloud: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
@@ -161,18 +161,12 @@ const SiliconCloud: ModelProviderCard = {
161
161
  tokens: 32_768,
162
162
  },
163
163
  {
164
- description: 'LLaMA 3.1 405B 是预训练和指令调整的强大机型。',
164
+ description: 'LLaMA 3.1 405B 指令微调模型针对多语言对话场景进行了优化。',
165
165
  displayName: 'Llama 3.1 405B',
166
166
  enabled: true,
167
167
  id: 'meta-llama/Meta-Llama-3.1-405B-Instruct',
168
168
  tokens: 32_768,
169
169
  },
170
- {
171
- description: 'Reflection Llama 3.1 通过Reflection-Tuning技术提升推理能力。',
172
- displayName: 'Reflection Llama 3.1 70B',
173
- id: 'mattshumer/Reflection-Llama-3.1-70B',
174
- tokens: 32_768,
175
- },
176
170
  {
177
171
  description: 'LLaMA 3 支持大容量文本生成和指令解析。',
178
172
  displayName: 'Llama 3 70B',
@@ -180,13 +174,13 @@ const SiliconCloud: ModelProviderCard = {
180
174
  tokens: 8192,
181
175
  },
182
176
  {
183
- description: 'Mistral 7B 是按需 fine-tuning的模型,为任务提供优化解答。',
177
+ description: 'Mistral 7B 指令微调模型针对对话场景进行了优化,可用于文本生成和对话任务。',
184
178
  displayName: 'Mistral 7B',
185
179
  id: 'mistralai/Mistral-7B-Instruct-v0.2',
186
180
  tokens: 32_768,
187
181
  },
188
182
  {
189
- description: 'Mixtral 8x7B 是预训练的稀疏混合专家模型,用于通用性文本任务。',
183
+ description: 'Mixtral 8x7B 模型支持多语言输入和输出,可用于文本生成和对话任务。',
190
184
  displayName: 'Mistral 8x7B',
191
185
  id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
192
186
  tokens: 32_768,
@@ -194,7 +188,7 @@ const SiliconCloud: ModelProviderCard = {
194
188
  ],
195
189
  checkModel: 'Qwen/Qwen2-1.5B-Instruct',
196
190
  description:
197
- 'SiliconFlow 致力于加速 AGI,以惠及人类,通过易用与成本低的 GenAI 堆栈提升大规模 AI 效率。',
191
+ 'SiliconCloud,基于优秀开源基础模型的高性价比 GenAI 云服务',
198
192
  id: 'siliconcloud',
199
193
  modelList: { showModelFetcher: true },
200
194
  modelsUrl: 'https://siliconflow.cn/zh-cn/models',
@@ -202,7 +196,7 @@ const SiliconCloud: ModelProviderCard = {
202
196
  proxyUrl: {
203
197
  placeholder: 'https://api.siliconflow.cn/v1',
204
198
  },
205
- url: 'https://siliconflow.cn',
199
+ url: 'https://siliconflow.cn/zh-cn/siliconcloud',
206
200
  };
207
201
 
208
202
  export default SiliconCloud;