@lobehub/chat 0.162.22 → 0.162.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,32 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 0.162.23](https://github.com/lobehub/lobe-chat/compare/v0.162.22...v0.162.23)
6
+
7
+ <sup>Released on **2024-06-12**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add Qwen2 models, Add Zhipu new models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add Qwen2 models, closes [#2832](https://github.com/lobehub/lobe-chat/issues/2832) ([fb97be9](https://github.com/lobehub/lobe-chat/commit/fb97be9))
21
+ - **misc**: Add Zhipu new models, closes [#2830](https://github.com/lobehub/lobe-chat/issues/2830) ([5be43f0](https://github.com/lobehub/lobe-chat/commit/5be43f0))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
5
31
  ### [Version 0.162.22](https://github.com/lobehub/lobe-chat/compare/v0.162.21...v0.162.22)
6
32
 
7
33
  <sup>Released on **2024-06-11**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.162.22",
3
+ "version": "0.162.23",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -13,6 +13,39 @@ const Ollama: ModelProviderCard = {
13
13
  id: 'llama3:70b',
14
14
  tokens: 8000,
15
15
  },
16
+ {
17
+ displayName: 'Phi-3 3.8B',
18
+ enabled: true,
19
+ id: 'phi3',
20
+ tokens: 128_000,
21
+ },
22
+ {
23
+ displayName: 'Phi-3 14B',
24
+ id: 'phi3:14b',
25
+ tokens: 128_000,
26
+ },
27
+ {
28
+ displayName: 'Aya 23 8B',
29
+ enabled: true,
30
+ id: 'aya',
31
+ tokens: 8192, // https://cohere.com/research/papers/aya-command-23-8b-and-35b-technical-report-2024-05-23
32
+ },
33
+ {
34
+ displayName: 'Aya 23 35B',
35
+ id: 'aya:35b',
36
+ tokens: 8192,
37
+ },
38
+ {
39
+ displayName: 'Qwen2 7B',
40
+ enabled: true,
41
+ id: 'qwen2',
42
+ tokens: 128_000,
43
+ },
44
+ {
45
+ displayName: 'Qwen2 72B',
46
+ id: 'qwen2:72b',
47
+ tokens: 128_000,
48
+ },
16
49
  {
17
50
  displayName: 'Command R 35B',
18
51
  enabled: true,
@@ -91,12 +124,6 @@ const Ollama: ModelProviderCard = {
91
124
  id: 'codellama:python',
92
125
  tokens: 16_384,
93
126
  },
94
- {
95
- displayName: 'Phi3-Instruct 3.8B',
96
- enabled: true,
97
- id: 'phi3:instruct',
98
- tokens: 131_072, // https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/config.json
99
- },
100
127
  {
101
128
  displayName: 'Mistral',
102
129
  enabled: true,
@@ -121,7 +148,6 @@ const Ollama: ModelProviderCard = {
121
148
  },
122
149
  {
123
150
  displayName: 'Qwen Chat 7B',
124
- enabled: true,
125
151
  id: 'qwen:7b',
126
152
  tokens: 32_768,
127
153
  },
@@ -4,26 +4,49 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Qwen: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入。',
7
+ description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入',
8
8
  displayName: 'Qwen Turbo',
9
9
  enabled: true,
10
10
  id: 'qwen-turbo',
11
- tokens: 8192,
11
+ tokens: 8000,
12
12
  },
13
13
  {
14
- description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入。',
14
+ description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入',
15
15
  displayName: 'Qwen Plus',
16
16
  enabled: true,
17
17
  id: 'qwen-plus',
18
- tokens: 30_720,
18
+ tokens: 32_000,
19
19
  },
20
20
  {
21
- description:
22
- '通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型。',
21
+ description: '通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型',
23
22
  displayName: 'Qwen Max',
24
23
  enabled: true,
25
24
  id: 'qwen-max',
26
- tokens: 8192,
25
+ tokens: 8000,
26
+ },
27
+ {
28
+ description: '通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,扩展了上下文窗口',
29
+ displayName: 'Qwen Max LongContext',
30
+ id: 'qwen-max-longcontext',
31
+ tokens: 30_000,
32
+ },
33
+ {
34
+ description: '通义千问2对外开源的7B规模的模型',
35
+ displayName: 'Qwen2 7B',
36
+ id: 'qwen2-7b-instruct',
37
+ tokens: 131_072,
38
+ },
39
+ {
40
+ description: '通义千问2对外开源的57B规模14B激活参数的MOE模型',
41
+ displayName: 'Qwen2 57B-A14B MoE',
42
+ id: 'qwen2-57b-a14b-instruct',
43
+ tokens: 32_768,
44
+ },
45
+ {
46
+ description: '通义千问2对外开源的72B规模的模型',
47
+ displayName: 'Qwen2 72B',
48
+ id: 'qwen2-72b-instruct',
49
+ tokens: 131_072,
27
50
  },
28
51
  ],
29
52
  checkModel: 'qwen-turbo',
@@ -1,35 +1,62 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  // ref https://open.bigmodel.cn/dev/howuse/model
4
+ // api https://open.bigmodel.cn/dev/api#language
4
5
  const ZhiPu: ModelProviderCard = {
5
6
  chatModels: [
6
7
  {
7
- description: '最新的 GLM-4 、最大支持 128k 上下文、支持 Function Call 、Retreival',
8
- displayName: 'GLM-4',
8
+ description: '智谱当前最先进最智能的模型,指令遵从能力大幅提升18.6%,发布于20240605',
9
+ displayName: 'GLM-4-0520',
9
10
  enabled: true,
10
11
  functionCall: true,
12
+ id: 'glm-4-0520',
13
+ tokens: 128_000,
14
+ },
15
+ {
16
+ description: '发布于20240116的最智能版本模型,目前已被 GLM-4-0520 版本超越',
17
+ displayName: 'GLM-4',
18
+ functionCall: true,
11
19
  id: 'glm-4',
12
20
  tokens: 128_000,
13
21
  },
14
22
  {
15
- description:
16
- '实现了视觉语言特征的深度融合,支持视觉问答、图像字幕、视觉定位、复杂目标检测等各类多模态理解任务',
17
- displayName: 'GLM-4 Vision',
23
+ description: '性价比最高的版本,综合性能接近GLM-4,速度快,价格实惠',
24
+ displayName: 'GLM-4-Air',
25
+ enabled: true,
26
+ functionCall: true,
27
+ id: 'glm-4-air',
28
+ tokens: 128_000,
29
+ },
30
+ {
31
+ description: 'GLM-4-Air 的高性能版本,效果不变,推理速度达到其2.6倍',
32
+ displayName: 'GLM-4-Airx',
33
+ functionCall: true,
34
+ id: 'glm-4-airx',
35
+ tokens: 128_000,
36
+ },
37
+ {
38
+ description: '适用简单任务,速度最快,价格最实惠的版本',
39
+ displayName: 'GLM-4-Flash',
40
+ id: 'glm-4-flash',
41
+ tokens: 128_000,
42
+ },
43
+ {
44
+ description: '实现了视觉语言特征的深度融合,支持视觉问答、图像字幕、视觉定位、复杂目标检测等各类图像理解任务',
45
+ displayName: 'GLM-4V',
18
46
  enabled: true,
19
47
  id: 'glm-4v',
20
48
  tokens: 2000,
21
49
  vision: true,
22
50
  },
23
51
  {
24
- description: '最新的glm-3-turbo、最大支持 128k上下文、支持Function Call、Retreival',
25
- displayName: 'GLM-3 Turbo',
26
- enabled: true,
52
+ description: '适用于对知识量、推理能力、创造力要求较高的场景,比如广告文案、小说写作、知识类写作、代码生成等',
53
+ displayName: 'GLM-3-Turbo',
27
54
  functionCall: true,
28
55
  id: 'glm-3-turbo',
29
56
  tokens: 128_000,
30
57
  },
31
58
  ],
32
- checkModel: 'glm-3-turbo',
59
+ checkModel: 'glm-4-flash',
33
60
  id: 'zhipu',
34
61
  name: 'ZhiPu',
35
62
  };