@lobehub/chat 1.15.21 → 1.15.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @lobehub/chat might be problematic. Click here for more details.

package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.15.22](https://github.com/lobehub/lobe-chat/compare/v1.15.21...v1.15.22)
6
+
7
+ <sup>Released on **2024-09-08**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update model display name & Remove Qwen preview model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update model display name & Remove Qwen preview model, closes [#3757](https://github.com/lobehub/lobe-chat/issues/3757) ([dd439ba](https://github.com/lobehub/lobe-chat/commit/dd439ba))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.15.21](https://github.com/lobehub/lobe-chat/compare/v1.15.20...v1.15.21)
6
31
 
7
32
  <sup>Released on **2024-09-08**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.15.21",
3
+ "version": "1.15.22",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -5,20 +5,12 @@ const DeepSeek: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
7
  description: '擅长通用对话任务',
8
- displayName: 'DeepSeek-V2',
8
+ displayName: 'DeepSeek V2.5',
9
9
  enabled: true,
10
10
  functionCall: true,
11
11
  id: 'deepseek-chat',
12
12
  tokens: 128_000,
13
13
  },
14
- {
15
- description: '擅长处理编程和数学任务',
16
- displayName: 'DeepSeek-Coder-V2',
17
- enabled: true,
18
- functionCall: true,
19
- id: 'deepseek-coder',
20
- tokens: 128_000,
21
- },
22
14
  ],
23
15
  checkModel: 'deepseek-chat',
24
16
  id: 'deepseek',
@@ -4,30 +4,30 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Novita: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'Llama3.1 8B Instruct',
7
+ displayName: 'Llama 3.1 8B Instruct',
8
8
  enabled: true,
9
9
  id: 'meta-llama/llama-3.1-8b-instruct',
10
10
  tokens: 8192,
11
11
  },
12
12
  {
13
- displayName: 'Llama3.1 70B Instruct',
13
+ displayName: 'Llama 3.1 70B Instruct',
14
14
  enabled: true,
15
15
  id: 'meta-llama/llama-3.1-70b-instruct',
16
16
  tokens: 131_072,
17
17
  },
18
18
  {
19
- displayName: 'Llama3.1 405B Instruct',
19
+ displayName: 'Llama 3.1 405B Instruct',
20
20
  enabled: true,
21
21
  id: 'meta-llama/llama-3.1-405b-instruct',
22
22
  tokens: 32_768,
23
23
  },
24
24
  {
25
- displayName: 'Llama3 8B Instruct',
25
+ displayName: 'Llama 3 8B Instruct',
26
26
  id: 'meta-llama/llama-3-8b-instruct',
27
27
  tokens: 8192,
28
28
  },
29
29
  {
30
- displayName: 'Llama3 70B Instruct',
30
+ displayName: 'Llama 3 70B Instruct',
31
31
  id: 'meta-llama/llama-3-70b-instruct',
32
32
  tokens: 8192,
33
33
  },
@@ -4,18 +4,18 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Ollama: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'Llama3.1 8B',
7
+ displayName: 'Llama 3.1 8B',
8
8
  enabled: true,
9
9
  id: 'llama3.1',
10
10
  tokens: 128_000,
11
11
  },
12
12
  {
13
- displayName: 'Llama3.1 70B',
13
+ displayName: 'Llama 3.1 70B',
14
14
  id: 'llama3.1:70b',
15
15
  tokens: 128_000,
16
16
  },
17
17
  {
18
- displayName: 'Llama3.1 405B',
18
+ displayName: 'Llama 3.1 405B',
19
19
  id: 'llama3.1:405b',
20
20
  tokens: 128_000,
21
21
  },
@@ -41,18 +41,18 @@ const Ollama: ModelProviderCard = {
41
41
  tokens: 16_384,
42
42
  },
43
43
  {
44
- displayName: 'Gemma2 2B',
44
+ displayName: 'Gemma 2 2B',
45
45
  id: 'gemma2:2b',
46
46
  tokens: 8192,
47
47
  },
48
48
  {
49
- displayName: 'Gemma2 9B',
49
+ displayName: 'Gemma 2 9B',
50
50
  enabled: true,
51
51
  id: 'gemma2',
52
52
  tokens: 8192,
53
53
  },
54
54
  {
55
- displayName: 'Gemma2 27B',
55
+ displayName: 'Gemma 2 27B',
56
56
  id: 'gemma2:27b',
57
57
  tokens: 8192,
58
58
  },
@@ -4,36 +4,36 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Perplexity: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'Llama3.1 Sonar Small Chat',
7
+ displayName: 'Llama 3.1 Sonar Small Chat',
8
8
  enabled: true,
9
9
  id: 'llama-3.1-sonar-small-128k-chat',
10
10
  tokens: 128_000,
11
11
  },
12
12
  {
13
- displayName: 'Llama3.1 Sonar Large Chat',
13
+ displayName: 'Llama 3.1 Sonar Large Chat',
14
14
  enabled: true,
15
15
  id: 'llama-3.1-sonar-large-128k-chat',
16
16
  tokens: 128_000,
17
17
  },
18
18
  {
19
- displayName: 'Llama3.1 Sonar Small Online',
19
+ displayName: 'Llama 3.1 Sonar Small Online',
20
20
  enabled: true,
21
21
  id: 'llama-3.1-sonar-small-128k-online',
22
22
  tokens: 128_000,
23
23
  },
24
24
  {
25
- displayName: 'Llama3.1 Sonar Large Online',
25
+ displayName: 'Llama 3.1 Sonar Large Online',
26
26
  enabled: true,
27
27
  id: 'llama-3.1-sonar-large-128k-online',
28
28
  tokens: 128_000,
29
29
  },
30
30
  {
31
- displayName: 'Llama3.1 8B Instruct',
31
+ displayName: 'Llama 3.1 8B Instruct',
32
32
  id: 'llama-3.1-8b-instruct',
33
33
  tokens: 128_000,
34
34
  },
35
35
  {
36
- displayName: 'Llama3.1 70B Instruct',
36
+ displayName: 'Llama 3.1 70B Instruct',
37
37
  id: 'llama-3.1-70b-instruct',
38
38
  tokens: 128_000,
39
39
  },
@@ -58,15 +58,6 @@ const Qwen: ModelProviderCard = {
58
58
  displayName: 'Qwen VL Max',
59
59
  enabled: true,
60
60
  id: 'qwen-vl-max',
61
- tokens: 8192,
62
- vision: true,
63
- },
64
- {
65
- description:
66
- '抢先体验即将升级的 qwen-vl-max 大模型。',
67
- displayName: 'Qwen VL Max 0809',
68
- enabled: true,
69
- id: 'qwen-vl-max-0809',
70
61
  tokens: 32_768,
71
62
  vision: true,
72
63
  },
@@ -79,7 +70,7 @@ const Qwen: ModelProviderCard = {
79
70
  },
80
71
  {
81
72
  description: '通义千问2对外开源的57B规模14B激活参数的MOE模型',
82
- displayName: 'Qwen2 57B-A14B MoE',
73
+ displayName: 'Qwen2 57B A14B MoE',
83
74
  id: 'qwen2-57b-a14b-instruct',
84
75
  tokens: 65_536, // https://huggingface.co/Qwen/Qwen2-57B-A14B-Instruct
85
76
  },
@@ -4,116 +4,150 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const SiliconCloud: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- enabled: true,
8
- id: 'Qwen/Qwen2-72B-Instruct',
7
+ displayName: 'Qwen2 1.5B',
8
+ id: 'Qwen/Qwen2-1.5B-Instruct',
9
9
  tokens: 32_768,
10
10
  },
11
11
  {
12
+ displayName: 'Qwen2 7B',
12
13
  enabled: true,
13
- id: 'Qwen/Qwen2-Math-72B-Instruct',
14
+ id: 'Qwen/Qwen2-7B-Instruct',
14
15
  tokens: 32_768,
15
16
  },
16
17
  {
17
- enabled: true,
18
+ displayName: 'Qwen2 57B A14B',
18
19
  id: 'Qwen/Qwen2-57B-A14B-Instruct',
19
20
  tokens: 32_768,
20
21
  },
21
22
  {
22
- id: 'Qwen/Qwen2-7B-Instruct',
23
+ displayName: 'Qwen2 72B',
24
+ enabled: true,
25
+ id: 'Qwen/Qwen2-72B-Instruct',
23
26
  tokens: 32_768,
24
27
  },
25
28
  {
26
- id: 'Qwen/Qwen2-1.5B-Instruct',
29
+ displayName: 'Qwen2 Math 72B',
30
+ enabled: true,
31
+ id: 'Qwen/Qwen2-Math-72B-Instruct',
27
32
  tokens: 32_768,
28
33
  },
29
34
  {
30
- id: 'Qwen/Qwen1.5-110B-Chat',
35
+ displayName: 'Qwen1.5 7B',
36
+ id: 'Qwen/Qwen1.5-7B-Chat',
31
37
  tokens: 32_768,
32
38
  },
33
39
  {
34
- id: 'Qwen/Qwen1.5-32B-Chat',
40
+ displayName: 'Qwen1.5 14B',
41
+ id: 'Qwen/Qwen1.5-14B-Chat',
35
42
  tokens: 32_768,
36
43
  },
37
44
  {
38
- id: 'Qwen/Qwen1.5-14B-Chat',
45
+ displayName: 'Qwen1.5 32B',
46
+ id: 'Qwen/Qwen1.5-32B-Chat',
39
47
  tokens: 32_768,
40
48
  },
41
49
  {
42
- id: 'Qwen/Qwen1.5-7B-Chat',
50
+ displayName: 'Qwen1.5 110B',
51
+ id: 'Qwen/Qwen1.5-110B-Chat',
43
52
  tokens: 32_768,
44
53
  },
45
54
  {
46
- id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
47
- tokens: 32_768,
55
+ displayName: 'Yi-1.5 6B',
56
+ id: '01-ai/Yi-1.5-6B-Chat',
57
+ tokens: 4096,
48
58
  },
49
59
  {
60
+ displayName: 'Yi-1.5 9B',
50
61
  enabled: true,
51
- id: 'deepseek-ai/DeepSeek-V2-Chat',
52
- tokens: 32_768,
62
+ id: '01-ai/Yi-1.5-9B-Chat-16K',
63
+ tokens: 16_384,
53
64
  },
54
65
  {
55
- id: 'deepseek-ai/deepseek-llm-67b-chat',
66
+ displayName: 'Yi-1.5 34B',
67
+ enabled: true,
68
+ id: '01-ai/Yi-1.5-34B-Chat-16K',
69
+ tokens: 16_384,
70
+ },
71
+ {
72
+ displayName: 'GLM-3 6B',
73
+ id: 'THUDM/chatglm3-6b',
56
74
  tokens: 32_768,
57
75
  },
58
76
  {
77
+ displayName: 'GLM-4 9B',
78
+ enabled: true,
59
79
  id: 'THUDM/glm-4-9b-chat',
60
80
  tokens: 32_768,
61
81
  },
62
82
  {
63
- id: 'THUDM/chatglm3-6b',
83
+ displayName: 'Internlm 2.5 7B',
84
+ enabled: true,
85
+ id: 'internlm/internlm2_5-7b-chat',
64
86
  tokens: 32_768,
65
87
  },
66
88
  {
89
+ displayName: 'Internlm 2.5 20B',
67
90
  enabled: true,
68
- id: '01-ai/Yi-1.5-34B-Chat-16K',
69
- tokens: 16_384,
91
+ id: 'internlm/internlm2_5-20b-chat',
92
+ tokens: 32_768,
70
93
  },
71
94
  {
72
- id: '01-ai/Yi-1.5-9B-Chat-16K',
73
- tokens: 16_384,
95
+ displayName: 'DeepSeek V2 Chat',
96
+ id: 'deepseek-ai/DeepSeek-V2-Chat',
97
+ tokens: 32_768,
74
98
  },
75
99
  {
76
- id: '01-ai/Yi-1.5-6B-Chat',
77
- tokens: 4096,
100
+ displayName: 'DeepSeek V2 Coder',
101
+ id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
102
+ tokens: 32_768,
78
103
  },
79
104
  {
80
- id: 'internlm/internlm2_5-7b-chat',
105
+ displayName: 'DeepSeek LLM 67B',
106
+ id: 'deepseek-ai/deepseek-llm-67b-chat',
81
107
  tokens: 32_768,
82
108
  },
83
109
  {
110
+ displayName: 'Gemma 2 9B',
111
+ enabled: true,
84
112
  id: 'google/gemma-2-9b-it',
85
113
  tokens: 8192,
86
114
  },
87
115
  {
116
+ displayName: 'Gemma 2 27B',
117
+ enabled: true,
88
118
  id: 'google/gemma-2-27b-it',
89
119
  tokens: 8192,
90
120
  },
91
121
  {
92
- id: 'internlm/internlm2_5-20b-chat',
93
- tokens: 32_768,
94
- },
95
- {
122
+ displayName: 'Llama 3.1 8B',
123
+ enabled: true,
96
124
  id: 'meta-llama/Meta-Llama-3.1-8B-Instruct',
97
125
  tokens: 32_768,
98
126
  },
99
127
  {
128
+ displayName: 'Llama 3.1 70B',
100
129
  enabled: true,
101
130
  id: 'meta-llama/Meta-Llama-3.1-70B-Instruct',
102
131
  tokens: 32_768,
103
132
  },
104
133
  {
134
+ displayName: 'Llama 3.1 405B',
135
+ enabled: true,
105
136
  id: 'meta-llama/Meta-Llama-3.1-405B-Instruct',
106
137
  tokens: 32_768,
107
138
  },
108
139
  {
140
+ displayName: 'Llama 3 70B',
109
141
  id: 'meta-llama/Meta-Llama-3-70B-Instruct',
110
142
  tokens: 8192,
111
143
  },
112
144
  {
145
+ displayName: 'Mistral 7B',
113
146
  id: 'mistralai/Mistral-7B-Instruct-v0.2',
114
147
  tokens: 32_768,
115
148
  },
116
149
  {
150
+ displayName: 'Mistral 8x7B',
117
151
  id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
118
152
  tokens: 32_768,
119
153
  },
@@ -5,41 +5,49 @@ import { ModelProviderCard } from '@/types/llm';
5
5
  const Stepfun: ModelProviderCard = {
6
6
  chatModels: [
7
7
  {
8
+ displayName: 'Step 2 16K',
8
9
  enabled: true,
9
10
  id: 'step-2-16k',
10
11
  tokens: 16_000,
11
12
  },
12
13
  {
14
+ displayName: 'Step 1 256K',
13
15
  id: 'step-1-256k',
14
16
  tokens: 256_000,
15
17
  },
16
18
  {
19
+ displayName: 'Step 1 128K',
17
20
  enabled: true,
18
21
  id: 'step-1-128k',
19
22
  tokens: 128_000,
20
23
  },
21
24
  {
25
+ displayName: 'Step 1 32K',
22
26
  enabled: true,
23
27
  id: 'step-1-32k',
24
28
  tokens: 32_000,
25
29
  },
26
30
  {
31
+ displayName: 'Step 1 8K',
27
32
  enabled: true,
28
33
  id: 'step-1-8k',
29
34
  tokens: 8000,
30
35
  },
31
36
  {
37
+ displayName: 'Step 1 Flash',
32
38
  enabled: true,
33
39
  id: 'step-1-flash',
34
40
  tokens: 8000,
35
41
  },
36
42
  {
43
+ displayName: 'Step 1V 32K',
37
44
  enabled: true,
38
45
  id: 'step-1v-32k',
39
46
  tokens: 32_000,
40
47
  vision: true,
41
48
  },
42
49
  {
50
+ displayName: 'Step 1V 8K',
43
51
  enabled: true,
44
52
  id: 'step-1v-8k',
45
53
  tokens: 8000,
@@ -64,8 +64,8 @@ const VirtualizedList = memo<VirtualizedListProps>(({ mobile }) => {
64
64
  }, [data.length]);
65
65
 
66
66
  const theme = useTheme();
67
- // overscan should be 1.5 times the height of the window
68
- const overscan = typeof window !== 'undefined' ? window.innerHeight * 1.5 : 0;
67
+ // overscan should be 3 times the height of the window
68
+ const overscan = typeof window !== 'undefined' ? window.innerHeight * 3 : 0;
69
69
 
70
70
  const itemContent = useCallback(
71
71
  (index: number, id: string) => {