@lobehub/chat 0.159.3 → 0.159.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,41 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 0.159.4](https://github.com/lobehub/lobe-chat/compare/v0.159.3...v0.159.4)
6
+
7
+ <sup>Released on **2024-05-14**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Refresh model config form & mobile footer button lost.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **misc**: Add GPT-4o model, update perplexity models, updates 01.AI model list.
16
+
17
+ <br/>
18
+
19
+ <details>
20
+ <summary><kbd>Improvements and Fixes</kbd></summary>
21
+
22
+ #### What's fixed
23
+
24
+ - **misc**: Refresh model config form & mobile footer button lost, closes [#2318](https://github.com/lobehub/lobe-chat/issues/2318) [#2319](https://github.com/lobehub/lobe-chat/issues/2319) [#1811](https://github.com/lobehub/lobe-chat/issues/1811) ([eadcefc](https://github.com/lobehub/lobe-chat/commit/eadcefc))
25
+
26
+ #### Styles
27
+
28
+ - **misc**: Add GPT-4o model, closes [#2481](https://github.com/lobehub/lobe-chat/issues/2481) ([ae6a03f](https://github.com/lobehub/lobe-chat/commit/ae6a03f))
29
+ - **misc**: Update perplexity models, closes [#2469](https://github.com/lobehub/lobe-chat/issues/2469) ([488cde7](https://github.com/lobehub/lobe-chat/commit/488cde7))
30
+ - **misc**: Updates 01.AI model list, closes [#2471](https://github.com/lobehub/lobe-chat/issues/2471) ([f28711a](https://github.com/lobehub/lobe-chat/commit/f28711a))
31
+
32
+ </details>
33
+
34
+ <div align="right">
35
+
36
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
37
+
38
+ </div>
39
+
5
40
  ### [Version 0.159.3](https://github.com/lobehub/lobe-chat/compare/v0.159.2...v0.159.3)
6
41
 
7
42
  <sup>Released on **2024-05-14**</sup>
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: Get Start with LobeChat
2
+ title: Get started with LobeChat
3
3
  description: >-
4
4
  Explore the exciting features in LobeChat, including Vision Model, TTS & STT,
5
5
  Local LLMs, and Multi AI Providers. Discover more about Agent Market, Plugin
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.159.3",
3
+ "version": "0.159.4",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -1,5 +1,5 @@
1
1
  import { Modal } from '@lobehub/ui';
2
- import { Checkbox, Form, Input } from 'antd';
2
+ import { Button, Checkbox, Form, Input } from 'antd';
3
3
  import isEqual from 'fast-deep-equal';
4
4
  import { memo } from 'react';
5
5
  import { useTranslation } from 'react-i18next';
@@ -16,6 +16,7 @@ interface ModelConfigModalProps {
16
16
  const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, provider }) => {
17
17
  const [formInstance] = Form.useForm();
18
18
  const { t } = useTranslation('setting');
19
+ const { t: tc } = useTranslation('common');
19
20
 
20
21
  const [open, id, editingProvider, dispatchCustomModelCards, toggleEditingCustomModelCard] =
21
22
  useUserStore((s) => [
@@ -38,20 +39,32 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, pro
38
39
  return (
39
40
  <Modal
40
41
  destroyOnClose
41
- maskClosable
42
- onCancel={() => {
43
- closeModal();
44
- }}
45
- onOk={() => {
46
- if (!editingProvider || !id) return;
47
- const data = formInstance.getFieldsValue();
42
+ footer={[
43
+ <Button key="cancel" onClick={closeModal}>
44
+ {tc('cancel')}
45
+ </Button>,
46
+
47
+ <Button
48
+ key="ok"
49
+ onClick={() => {
50
+ if (!editingProvider || !id) return;
51
+ const data = formInstance.getFieldsValue();
48
52
 
49
- dispatchCustomModelCards(editingProvider as any, { id, type: 'update', value: data });
53
+ dispatchCustomModelCards(editingProvider as any, { id, type: 'update', value: data });
50
54
 
51
- closeModal();
52
- }}
55
+ closeModal();
56
+ }}
57
+ style={{ marginInlineStart: '16px' }}
58
+ type="primary"
59
+ >
60
+ {tc('ok')}
61
+ </Button>,
62
+ ]}
63
+ maskClosable
64
+ onCancel={closeModal}
53
65
  open={open}
54
66
  title={t('llm.customModelCards.modelConfig.modalTitle')}
67
+ zIndex={1051} // Select is 1050
55
68
  >
56
69
  <div
57
70
  onClick={(e) => {
@@ -66,6 +79,7 @@ const ModelConfigModal = memo<ModelConfigModalProps>(({ showAzureDeployName, pro
66
79
  form={formInstance}
67
80
  initialValues={modelCard}
68
81
  labelCol={{ span: 4 }}
82
+ preserve={false}
69
83
  style={{ marginTop: 16 }}
70
84
  wrapperCol={{ offset: 1, span: 18 }}
71
85
  >
@@ -29,8 +29,10 @@ const OpenAI: ModelProviderCard = {
29
29
  tokens: 4096,
30
30
  },
31
31
  {
32
+ description: 'Currently points to gpt-3.5-turbo-16k-0613',
32
33
  displayName: 'GPT-3.5 Turbo 16K',
33
34
  id: 'gpt-3.5-turbo-16k',
35
+ legacy: true,
34
36
  tokens: 16_385,
35
37
  },
36
38
  {
@@ -43,9 +45,10 @@ const OpenAI: ModelProviderCard = {
43
45
  displayName: 'GPT-3.5 Turbo 16K (0613)',
44
46
  id: 'gpt-3.5-turbo-16k-0613',
45
47
  legacy: true,
46
- tokens: 4096,
48
+ tokens: 16_385,
47
49
  },
48
50
  {
51
+ description: 'Currently points to gpt-4-0125-preview',
49
52
  displayName: 'GPT-4 Turbo Preview',
50
53
  functionCall: true,
51
54
  id: 'gpt-4-turbo-preview',
@@ -58,7 +61,7 @@ const OpenAI: ModelProviderCard = {
58
61
  tokens: 128_000,
59
62
  },
60
63
  {
61
- description: 'GPT-4 视觉预览版,支持视觉任务',
64
+ description: 'Currently points to gpt-4-1106-vision-preview',
62
65
  displayName: 'GPT-4 Turbo Vision Preview',
63
66
  id: 'gpt-4-vision-preview',
64
67
  tokens: 128_000,
@@ -77,6 +80,7 @@ const OpenAI: ModelProviderCard = {
77
80
  tokens: 128_000,
78
81
  },
79
82
  {
83
+ description: 'Currently points to gpt-4-0613',
80
84
  displayName: 'GPT-4',
81
85
  functionCall: true,
82
86
  id: 'gpt-4',
@@ -89,6 +93,7 @@ const OpenAI: ModelProviderCard = {
89
93
  tokens: 8192,
90
94
  },
91
95
  {
96
+ description: 'Currently points to gpt-4-32k-0613',
92
97
  displayName: 'GPT-4 32K',
93
98
  functionCall: true,
94
99
  id: 'gpt-4-32k',
@@ -101,7 +106,7 @@ const OpenAI: ModelProviderCard = {
101
106
  tokens: 32_768,
102
107
  },
103
108
  {
104
- description: 'GPT-4 Turbo 视觉版',
109
+ description: 'GPT-4 Turbo with Vision',
105
110
  displayName: 'GPT-4 Turbo',
106
111
  enabled: true,
107
112
  functionCall: true,
@@ -117,6 +122,15 @@ const OpenAI: ModelProviderCard = {
117
122
  tokens: 128_000,
118
123
  vision: true,
119
124
  },
125
+ {
126
+ description: 'Currently points to gpt-4o-2024-05-13',
127
+ displayName: 'GPT-4o',
128
+ enabled: true,
129
+ functionCall: true,
130
+ id: 'gpt-4o',
131
+ tokens: 128_000,
132
+ vision: true,
133
+ },
120
134
  ],
121
135
  enabled: true,
122
136
  id: 'openai',
@@ -5,35 +5,35 @@ const Perplexity: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
7
  displayName: 'Perplexity 7B Chat',
8
- id: 'sonar-small-chat',
9
- tokens: 16_384,
8
+ id: 'llama-3-sonar-small-32k-chat',
9
+ tokens: 32_768,
10
10
  },
11
11
  {
12
- displayName: 'Perplexity 8x7B Chat',
12
+ displayName: 'Perplexity 70B Chat',
13
13
  enabled: true,
14
- id: 'sonar-medium-chat',
15
- tokens: 16_384,
14
+ id: 'llama-3-sonar-large-32k-chat',
15
+ tokens: 32_768,
16
16
  },
17
17
  {
18
18
  displayName: 'Perplexity 7B Online',
19
- id: 'sonar-small-online',
20
- tokens: 12_000,
19
+ id: 'llama-3-sonar-small-32k-online',
20
+ tokens: 28_000,
21
21
  },
22
22
  {
23
- displayName: 'Perplexity 8x7B Online',
23
+ displayName: 'Perplexity 70B Online',
24
24
  enabled: true,
25
- id: 'sonar-medium-online',
26
- tokens: 12_000,
25
+ id: 'llama-3-sonar-large-32k-online',
26
+ tokens: 28_000,
27
27
  },
28
28
  {
29
- displayName: 'Codellama 70B Instruct',
30
- id: 'codellama-70b-instruct',
31
- tokens: 16_384,
29
+ displayName: 'Llama3 8B Instruct',
30
+ id: 'llama-3-8b-instruct',
31
+ tokens: 8192,
32
32
  },
33
33
  {
34
- displayName: 'Mistral 7B Instruct',
35
- id: 'mistral-7b-instruc',
36
- tokens: 16_384,
34
+ displayName: 'Llama3 70B Instruct',
35
+ id: 'llama-3-70b-instruct',
36
+ tokens: 8192,
37
37
  },
38
38
  {
39
39
  displayName: 'Mixtral 8x7B Instruct',
@@ -1,31 +1,70 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
- // ref https://platform.lingyiwanwu.com/
3
+ // ref https://platform.lingyiwanwu.com/docs#%E6%A8%A1%E5%9E%8B
4
4
  const ZeroOne: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- description: '支持聊天、问答、对话、写作、翻译等功能。',
8
- displayName: 'YI 34B Chat',
7
+ description: '全新千亿参数模型,提供超强问答及文本生成能力。',
8
+ displayName: 'Yi Large',
9
9
  enabled: true,
10
- id: 'yi-34b-chat-0205',
11
- tokens: 4096, // https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/config.json
10
+ id: 'yi-large',
11
+ tokens: 16_384,
12
12
  },
13
13
  {
14
- description:
15
- '支持通用图片问答、图表理解、OCR、视觉推理,能处理高分辨率(1024*1024)的图像,能在复杂视觉任务上提供优秀性能,同时支持多种语言。',
16
- displayName: 'YI Vision Plus',
14
+ description: '中型尺寸模型升级微调,能力均衡,性价比高。深度优化指令遵循能力。',
15
+ displayName: 'Yi Medium',
17
16
  enabled: true,
18
- id: 'yi-vl-plus',
17
+ id: 'yi-medium',
18
+ tokens: 16_384,
19
+ },
20
+ {
21
+ description: '复杂视觉任务模型,提供高性能图片理解、分析能力。',
22
+ displayName: 'Yi Vision',
23
+ enabled: true,
24
+ id: 'yi-vision',
19
25
  tokens: 4096,
20
- vision: true,
21
26
  },
22
27
  {
23
- description: '增强了问答对话交互和深度内容创作能力。文档问答和构建知识库小能手。',
24
- displayName: 'YI 34B Chat 200k',
28
+ description: '200K 超长上下文窗口,提供长文本深度理解和生成能力。',
29
+ displayName: 'Yi 200K',
25
30
  enabled: true,
26
- id: 'yi-34b-chat-200k',
27
- tokens: 200_000, // https://huggingface.co/01-ai/Yi-34B-200K/blob/main/config.json
31
+ id: 'yi-medium-200k',
32
+ tokens: 200_000,
28
33
  },
34
+ {
35
+ description: '小而精悍,轻量极速模型。提供强化数学运算和代码编写能力。',
36
+ displayName: 'Yi Spark',
37
+ enabled: true,
38
+ id: 'yi-spark',
39
+ tokens: 16_384,
40
+ },
41
+ {
42
+ description: '基于Yi-Large超强模型的高阶服务,结合检索与生成技术提供精准答案,支持客⼾私有知识库(请联系客服申请)。',
43
+ displayName: 'Yi Large RAG',
44
+ id: 'yi-large-rag',
45
+ tokens: 16_384,
46
+ },
47
+ {
48
+ description: '超高性价比、卓越性能。根据性能和推理速度、成本,进行平衡性高精度调优。',
49
+ displayName: 'Yi Large Turbo',
50
+ enabled: true,
51
+ id: 'yi-large-turbo',
52
+ tokens: 16_384,
53
+ },
54
+ {
55
+ description: '「兼容版本模型」文本推理能力增强。',
56
+ displayName: 'Yi Large Preview',
57
+ enabled: true,
58
+ id: 'yi-large-preview',
59
+ tokens: 16_384,
60
+ },
61
+ {
62
+ description: '「兼容版本模型」实时信息获取,以及文本推理能力增强。',
63
+ displayName: 'Yi Large RAG Preview',
64
+ id: 'yi-large-rag-preview',
65
+ tokens: 16_384,
66
+ },
67
+
29
68
  ],
30
69
  id: 'zeroone',
31
70
  };
@@ -11,15 +11,17 @@ exports[`LobeOpenAI > models > should get models 1`] = `
11
11
  "tokens": 16385,
12
12
  },
13
13
  {
14
+ "description": "Currently points to gpt-3.5-turbo-16k-0613",
14
15
  "displayName": "GPT-3.5 Turbo 16K",
15
16
  "id": "gpt-3.5-turbo-16k",
17
+ "legacy": true,
16
18
  "tokens": 16385,
17
19
  },
18
20
  {
19
21
  "displayName": "GPT-3.5 Turbo 16K (0613)",
20
22
  "id": "gpt-3.5-turbo-16k-0613",
21
23
  "legacy": true,
22
- "tokens": 4096,
24
+ "tokens": 16385,
23
25
  },
24
26
  {
25
27
  "displayName": "GPT-4 Turbo Vision Preview (1106)",
@@ -37,6 +39,7 @@ exports[`LobeOpenAI > models > should get models 1`] = `
37
39
  "tokens": 128000,
38
40
  },
39
41
  {
42
+ "description": "Currently points to gpt-4-0125-preview",
40
43
  "displayName": "GPT-4 Turbo Preview",
41
44
  "functionCall": true,
42
45
  "id": "gpt-4-turbo-preview",
@@ -69,13 +72,14 @@ exports[`LobeOpenAI > models > should get models 1`] = `
69
72
  "tokens": 128000,
70
73
  },
71
74
  {
72
- "description": "GPT-4 视觉预览版,支持视觉任务",
75
+ "description": "Currently points to gpt-4-1106-vision-preview",
73
76
  "displayName": "GPT-4 Turbo Vision Preview",
74
77
  "id": "gpt-4-vision-preview",
75
78
  "tokens": 128000,
76
79
  "vision": true,
77
80
  },
78
81
  {
82
+ "description": "Currently points to gpt-4-0613",
79
83
  "displayName": "GPT-4",
80
84
  "functionCall": true,
81
85
  "id": "gpt-4",
@@ -69,12 +69,15 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
69
69
  "tokens": 16385,
70
70
  },
71
71
  {
72
+ "description": "Currently points to gpt-3.5-turbo-16k-0613",
72
73
  "displayName": "GPT-3.5 Turbo 16K",
73
74
  "enabled": true,
74
75
  "id": "gpt-3.5-turbo-16k",
76
+ "legacy": true,
75
77
  "tokens": 16385,
76
78
  },
77
79
  {
80
+ "description": "Currently points to gpt-4-0613",
78
81
  "displayName": "GPT-4",
79
82
  "enabled": true,
80
83
  "functionCall": true,
@@ -82,6 +85,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
82
85
  "tokens": 8192,
83
86
  },
84
87
  {
88
+ "description": "Currently points to gpt-4-32k-0613",
85
89
  "displayName": "GPT-4 32K",
86
90
  "enabled": true,
87
91
  "functionCall": true,
@@ -96,7 +100,7 @@ exports[`configRouter > getGlobalConfig > Model Provider env > OPENAI_MODEL_LIST
96
100
  "tokens": 128000,
97
101
  },
98
102
  {
99
- "description": "GPT-4 视觉预览版,支持视觉任务",
103
+ "description": "Currently points to gpt-4-1106-vision-preview",
100
104
  "displayName": "GPT-4 Turbo Vision Preview",
101
105
  "enabled": true,
102
106
  "id": "gpt-4-vision-preview",
@@ -49,7 +49,7 @@ describe('modelProviderSelectors', () => {
49
49
  const s = merge(initialSettingsState, {}) as unknown as UserStore;
50
50
 
51
51
  const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
52
- expect(result).toEqual(['gpt-3.5-turbo', 'gpt-4-turbo']);
52
+ expect(result).toEqual(['gpt-3.5-turbo', 'gpt-4-turbo', 'gpt-4o']);
53
53
  });
54
54
 
55
55
  it('should return undefined for a non-existing provider', () => {