@lobehub/chat 1.19.0 → 1.19.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.19.1](https://github.com/lobehub/lobe-chat/compare/v1.19.0...v1.19.1)
6
+
7
+ <sup>Released on **2024-09-19**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add mistral provider new models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add mistral provider new models, closes [#4014](https://github.com/lobehub/lobe-chat/issues/4014) ([0b70d57](https://github.com/lobehub/lobe-chat/commit/0b70d57))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 1.19.0](https://github.com/lobehub/lobe-chat/compare/v1.18.2...v1.19.0)
6
31
 
7
32
  <sup>Released on **2024-09-18**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.19.0",
3
+ "version": "1.19.1",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -109,7 +109,7 @@
109
109
  "@azure/core-rest-pipeline": "1.16.0",
110
110
  "@azure/openai": "1.0.0-beta.12",
111
111
  "@cfworker/json-schema": "^2.0.0",
112
- "@clerk/localizations": "2.0.0",
112
+ "@clerk/localizations": "^3.0.4",
113
113
  "@clerk/nextjs": "^5.3.3",
114
114
  "@clerk/themes": "^2.1.27",
115
115
  "@codesandbox/sandpack-react": "^2.19.8",
@@ -6,33 +6,20 @@ const Mistral: ModelProviderCard = {
6
6
  chatModels: [
7
7
  {
8
8
  description:
9
- 'Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。',
10
- displayName: 'Mistral 7B',
11
- id: 'open-mistral-7b',
12
- tokens: 32_768,
13
- },
14
- {
15
- description:
16
- 'Mixtral 8x7B是一个稀疏专家模型,利用多个参数提高推理速度,适合处理多语言和代码生成任务。',
17
- displayName: 'Mixtral 8x7B',
18
- id: 'open-mixtral-8x7b',
19
- tokens: 32_768,
20
- },
21
- {
22
- description:
23
- 'Mixtral 8x22B是一个更大的专家模型,专注于复杂任务,提供出色的推理能力和更高的吞吐量。',
24
- displayName: 'Mixtral 8x22B',
9
+ 'Mistral Nemo是一个与Nvidia合作开发的12B模型,提供出色的推理和编码性能,易于集成和替换。',
10
+ displayName: 'Mistral Nemo',
11
+ enabled: true,
25
12
  functionCall: true,
26
- id: 'open-mixtral-8x22b',
27
- tokens: 65_536,
13
+ id: 'open-mistral-nemo',
14
+ tokens: 128_000,
28
15
  },
29
16
  {
30
17
  description:
31
- 'Mistral Nemo是一个与Nvidia合作开发的12B模型,提供出色的推理和编码性能,易于集成和替换。',
32
- displayName: 'Mistral Nemo',
18
+ 'Mistral Small是成本效益高、快速且可靠的选项,适用于翻译、摘要和情感分析等用例。',
19
+ displayName: 'Mistral Small',
33
20
  enabled: true,
34
21
  functionCall: true,
35
- id: 'open-mistral-nemo',
22
+ id: 'mistral-small-latest',
36
23
  tokens: 128_000,
37
24
  },
38
25
  {
@@ -51,6 +38,36 @@ const Mistral: ModelProviderCard = {
51
38
  id: 'codestral-latest',
52
39
  tokens: 32_768,
53
40
  },
41
+ {
42
+ description: 'Pixtral 模型在图表和图理解、文档问答、多模态推理和指令遵循等任务上表现出强大的能力,能够以自然分辨率和宽高比摄入图像,还能够在长达 128K 令牌的长上下文窗口中处理任意数量的图像。',
43
+ displayName: 'Pixtral 12B',
44
+ enabled: true,
45
+ id: 'pixtral-12b-2409',
46
+ tokens: 128_000,
47
+ vision: true,
48
+ },
49
+ {
50
+ description:
51
+ 'Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。',
52
+ displayName: 'Mistral 7B',
53
+ id: 'open-mistral-7b',
54
+ tokens: 32_768,
55
+ },
56
+ {
57
+ description:
58
+ 'Mixtral 8x7B是一个稀疏专家模型,利用多个参数提高推理速度,适合处理多语言和代码生成任务。',
59
+ displayName: 'Mixtral 8x7B',
60
+ id: 'open-mixtral-8x7b',
61
+ tokens: 32_768,
62
+ },
63
+ {
64
+ description:
65
+ 'Mixtral 8x22B是一个更大的专家模型,专注于复杂任务,提供出色的推理能力和更高的吞吐量。',
66
+ displayName: 'Mixtral 8x22B',
67
+ functionCall: true,
68
+ id: 'open-mixtral-8x22b',
69
+ tokens: 65_536,
70
+ },
54
71
  {
55
72
  description:
56
73
  'Codestral Mamba是专注于代码生成的Mamba 2语言模型,为先进的代码和推理任务提供强力支持。',
@@ -3,15 +3,6 @@ import { ModelProviderCard } from '@/types/llm';
3
3
  // ref :https://developers.upstage.ai/docs/getting-started/models
4
4
  const Upstage: ModelProviderCard = {
5
5
  chatModels: [
6
- {
7
- description:
8
- 'Solar Pro 是 Upstage 推出的一款高智能LLM,专注于单GPU的指令跟随能力,IFEval得分80以上。目前支持英语,正式版本计划于2024年11月推出,将扩展语言支持和上下文长度。',
9
- displayName: 'Solar Pro',
10
- enabled: true,
11
- functionCall: false,
12
- id: 'solar-pro',
13
- tokens: 4096,
14
- },
15
6
  {
16
7
  description:
17
8
  'Solar Mini 是一种紧凑型 LLM,性能优于 GPT-3.5,具备强大的多语言能力,支持英语和韩语,提供高效小巧的解决方案。',
@@ -29,6 +20,15 @@ const Upstage: ModelProviderCard = {
29
20
  id: 'solar-1-mini-chat-ja',
30
21
  tokens: 32_768,
31
22
  },
23
+ {
24
+ description:
25
+ 'Solar Pro 是 Upstage 推出的一款高智能LLM,专注于单GPU的指令跟随能力,IFEval得分80以上。目前支持英语,正式版本计划于2024年11月推出,将扩展语言支持和上下文长度。',
26
+ displayName: 'Solar Pro',
27
+ enabled: true,
28
+ functionCall: false,
29
+ id: 'solar-pro',
30
+ tokens: 4096,
31
+ },
32
32
  ],
33
33
  checkModel: 'solar-1-mini-chat',
34
34
  description: