@lobehub/chat 0.151.1 → 0.151.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/next.config.mjs +1 -0
- package/package.json +2 -2
- package/src/app/chat/(desktop)/features/ChatInput/Footer/SendMore.tsx +2 -2
- package/src/config/modelProviders/anthropic.ts +1 -1
- package/src/config/modelProviders/azure.ts +1 -0
- package/src/config/modelProviders/bedrock.ts +2 -2
- package/src/config/modelProviders/google.ts +9 -7
- package/src/config/modelProviders/groq.ts +0 -1
- package/src/config/modelProviders/minimax.ts +15 -0
- package/src/config/modelProviders/mistral.ts +1 -1
- package/src/config/modelProviders/moonshot.ts +1 -0
- package/src/config/modelProviders/ollama.ts +21 -21
- package/src/config/modelProviders/openai.ts +1 -1
- package/src/config/modelProviders/togetherai.ts +1 -1
- package/src/config/modelProviders/zeroone.ts +4 -3
- package/src/config/modelProviders/zhipu.ts +4 -2
- package/src/features/ChatInput/useSend.ts +8 -2
- package/src/features/Conversation/components/InboxWelcome/QuestionSuggest.tsx +8 -8
- package/src/features/Conversation/components/VirtualizedList/index.tsx +7 -1
- package/src/features/Conversation/index.tsx +6 -9
- package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +1 -1
- package/src/services/chat.ts +8 -2
- package/src/store/chat/slices/message/action.test.ts +1 -5
- package/src/store/chat/slices/message/action.ts +23 -10
- package/src/store/chat/slices/plugin/action.test.ts +1 -1
- package/src/store/chat/slices/plugin/action.ts +1 -1
- package/src/store/user/slices/settings/actions/llm.test.ts +1 -1
- package/src/types/llm.ts +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,56 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 0.151.3](https://github.com/lobehub/lobe-chat/compare/v0.151.2...v0.151.3)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-04-29**</sup>
|
|
8
|
+
|
|
9
|
+
#### 💄 Styles
|
|
10
|
+
|
|
11
|
+
- **misc**: Patching models info.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Styles
|
|
19
|
+
|
|
20
|
+
- **misc**: Patching models info, closes [#2269](https://github.com/lobehub/lobe-chat/issues/2269) [#22802280](https://github.com/lobehub/lobe-chat/issues/22802280) ([03bcb06](https://github.com/lobehub/lobe-chat/commit/03bcb06))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
### [Version 0.151.2](https://github.com/lobehub/lobe-chat/compare/v0.151.1...v0.151.2)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2024-04-29**</sup>
|
|
33
|
+
|
|
34
|
+
#### 🐛 Bug Fixes
|
|
35
|
+
|
|
36
|
+
- **misc**: Fix only inject welcome question in inbox.
|
|
37
|
+
|
|
38
|
+
<br/>
|
|
39
|
+
|
|
40
|
+
<details>
|
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
42
|
+
|
|
43
|
+
#### What's fixed
|
|
44
|
+
|
|
45
|
+
- **misc**: Fix only inject welcome question in inbox, closes [#2289](https://github.com/lobehub/lobe-chat/issues/2289) ([cc8edd3](https://github.com/lobehub/lobe-chat/commit/cc8edd3))
|
|
46
|
+
|
|
47
|
+
</details>
|
|
48
|
+
|
|
49
|
+
<div align="right">
|
|
50
|
+
|
|
51
|
+
[](#readme-top)
|
|
52
|
+
|
|
53
|
+
</div>
|
|
54
|
+
|
|
5
55
|
### [Version 0.151.1](https://github.com/lobehub/lobe-chat/compare/v0.151.0...v0.151.1)
|
|
6
56
|
|
|
7
57
|
<sup>Released on **2024-04-29**</sup>
|
package/next.config.mjs
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "0.151.
|
|
3
|
+
"version": "0.151.3",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -126,7 +126,7 @@
|
|
|
126
126
|
"lucide-react": "latest",
|
|
127
127
|
"modern-screenshot": "^4.4.39",
|
|
128
128
|
"nanoid": "^5.0.7",
|
|
129
|
-
"next": "14.1
|
|
129
|
+
"next": "^14.2.1",
|
|
130
130
|
"next-auth": "5.0.0-beta.15",
|
|
131
131
|
"next-sitemap": "^4.2.3",
|
|
132
132
|
"numeral": "^2.0.6",
|
|
@@ -45,7 +45,7 @@ const SendMore = memo(() => {
|
|
|
45
45
|
hotKey,
|
|
46
46
|
(keyboardEvent, hotkeysEvent) => {
|
|
47
47
|
console.log(keyboardEvent, hotkeysEvent);
|
|
48
|
-
sendMessage(true);
|
|
48
|
+
sendMessage({ onlyAddUserMessage: true });
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
51
|
enableOnFormTags: true,
|
|
@@ -94,7 +94,7 @@ const SendMore = memo(() => {
|
|
|
94
94
|
</Flexbox>
|
|
95
95
|
),
|
|
96
96
|
onClick: () => {
|
|
97
|
-
sendMessage(true);
|
|
97
|
+
sendMessage({ onlyAddUserMessage: true });
|
|
98
98
|
},
|
|
99
99
|
},
|
|
100
100
|
],
|
|
@@ -56,14 +56,14 @@ const Bedrock: ModelProviderCard = {
|
|
|
56
56
|
displayName: 'Llama 2 Chat 13B',
|
|
57
57
|
enabled: true,
|
|
58
58
|
id: 'meta.llama2-13b-chat-v1',
|
|
59
|
-
tokens:
|
|
59
|
+
tokens: 4096,
|
|
60
60
|
},
|
|
61
61
|
{
|
|
62
62
|
description: 'Llama 2 Chat 70B v1,上下文大小为 4k,Llama 2 模型的对话用例优化变体。',
|
|
63
63
|
displayName: 'Llama 2 Chat 70B',
|
|
64
64
|
enabled: true,
|
|
65
65
|
id: 'meta.llama2-70b-chat-v1',
|
|
66
|
-
tokens:
|
|
66
|
+
tokens: 4096,
|
|
67
67
|
},
|
|
68
68
|
],
|
|
69
69
|
id: 'bedrock',
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
3
|
// ref https://ai.google.dev/models/gemini
|
|
4
|
+
// api https://ai.google.dev/api/rest/v1beta/models/list
|
|
4
5
|
const Google: ModelProviderCard = {
|
|
5
6
|
chatModels: [
|
|
6
7
|
{
|
|
@@ -8,13 +9,14 @@ const Google: ModelProviderCard = {
|
|
|
8
9
|
displayName: 'PaLM 2 Chat (Legacy)',
|
|
9
10
|
id: 'chat-bison-001',
|
|
10
11
|
maxOutput: 1024,
|
|
12
|
+
// tokens: 4096 + 1024, // none tokens test
|
|
11
13
|
},
|
|
12
14
|
{
|
|
13
15
|
description: 'A legacy model that understands text and generates text as an output',
|
|
14
16
|
displayName: 'PaLM 2 (Legacy)',
|
|
15
17
|
id: 'text-bison-001',
|
|
16
18
|
maxOutput: 1024,
|
|
17
|
-
tokens:
|
|
19
|
+
tokens: 8196 + 1024,
|
|
18
20
|
},
|
|
19
21
|
{
|
|
20
22
|
description: 'The best model for scaling across a wide range of tasks',
|
|
@@ -22,14 +24,14 @@ const Google: ModelProviderCard = {
|
|
|
22
24
|
enabled: true,
|
|
23
25
|
id: 'gemini-pro',
|
|
24
26
|
maxOutput: 2048,
|
|
25
|
-
tokens:
|
|
27
|
+
tokens: 30_720 + 2048,
|
|
26
28
|
},
|
|
27
29
|
{
|
|
28
30
|
description: 'The best image understanding model to handle a broad range of applications',
|
|
29
31
|
displayName: 'Gemini 1.0 Pro Vision',
|
|
30
32
|
id: 'gemini-1.0-pro-vision-latest',
|
|
31
33
|
maxOutput: 4096,
|
|
32
|
-
tokens:
|
|
34
|
+
tokens: 12_288 + 4096,
|
|
33
35
|
vision: true,
|
|
34
36
|
},
|
|
35
37
|
{
|
|
@@ -38,7 +40,7 @@ const Google: ModelProviderCard = {
|
|
|
38
40
|
enabled: true,
|
|
39
41
|
id: 'gemini-pro-vision',
|
|
40
42
|
maxOutput: 4096,
|
|
41
|
-
tokens:
|
|
43
|
+
tokens: 12_288 + 4096,
|
|
42
44
|
vision: true,
|
|
43
45
|
},
|
|
44
46
|
{
|
|
@@ -47,7 +49,7 @@ const Google: ModelProviderCard = {
|
|
|
47
49
|
displayName: 'Gemini 1.0 Pro 001 (Tuning)',
|
|
48
50
|
id: 'gemini-1.0-pro-001',
|
|
49
51
|
maxOutput: 2048,
|
|
50
|
-
tokens:
|
|
52
|
+
tokens: 30_720 + 2048,
|
|
51
53
|
},
|
|
52
54
|
{
|
|
53
55
|
description:
|
|
@@ -55,7 +57,7 @@ const Google: ModelProviderCard = {
|
|
|
55
57
|
displayName: 'Gemini 1.0 Pro Latest',
|
|
56
58
|
id: 'gemini-1.0-pro-latest',
|
|
57
59
|
maxOutput: 2048,
|
|
58
|
-
tokens:
|
|
60
|
+
tokens: 30_720 + 2048,
|
|
59
61
|
},
|
|
60
62
|
{
|
|
61
63
|
description: 'Mid-size multimodal model that supports up to 1 million tokens',
|
|
@@ -63,7 +65,7 @@ const Google: ModelProviderCard = {
|
|
|
63
65
|
enabled: true,
|
|
64
66
|
id: 'gemini-1.5-pro-latest',
|
|
65
67
|
maxOutput: 8192,
|
|
66
|
-
tokens:
|
|
68
|
+
tokens: 1_048_576 + 8192,
|
|
67
69
|
vision: true,
|
|
68
70
|
},
|
|
69
71
|
{
|
|
@@ -1,7 +1,22 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
|
+
// ref https://www.minimaxi.com/document/guides/chat-model/pro/api
|
|
3
4
|
const Minimax: ModelProviderCard = {
|
|
4
5
|
chatModels: [
|
|
6
|
+
{
|
|
7
|
+
description: '复杂场景,例如应用题计算、科学计算等场景',
|
|
8
|
+
displayName: 'abab6.5',
|
|
9
|
+
enabled: true,
|
|
10
|
+
id: 'abab6.5-chat',
|
|
11
|
+
tokens: 8192,
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
description: '通用场景',
|
|
15
|
+
displayName: 'abab6.5s',
|
|
16
|
+
enabled: true,
|
|
17
|
+
id: 'abab6.5s-chat',
|
|
18
|
+
tokens: 245_760,
|
|
19
|
+
},
|
|
5
20
|
{
|
|
6
21
|
description: '更复杂的格式化文本生成',
|
|
7
22
|
displayName: 'abab6',
|
|
@@ -6,7 +6,7 @@ const Ollama: ModelProviderCard = {
|
|
|
6
6
|
displayName: 'Llama3 8B',
|
|
7
7
|
enabled: true,
|
|
8
8
|
id: 'llama3',
|
|
9
|
-
tokens: 8000,
|
|
9
|
+
tokens: 8000, // https://huggingface.co/blog/zh/llama3#llama-3-的新进展
|
|
10
10
|
},
|
|
11
11
|
{
|
|
12
12
|
displayName: 'Llama3 70B',
|
|
@@ -17,48 +17,48 @@ const Ollama: ModelProviderCard = {
|
|
|
17
17
|
displayName: 'Command R 35B',
|
|
18
18
|
enabled: true,
|
|
19
19
|
id: 'command-r',
|
|
20
|
-
tokens:
|
|
20
|
+
tokens: 131_072, // https://huggingface.co/CohereForAI/c4ai-command-r-v01/blob/main/config.json
|
|
21
21
|
},
|
|
22
22
|
{
|
|
23
23
|
displayName: 'Command R+ 104B (Q2_K)',
|
|
24
24
|
id: 'command-r-plus:104b-q2_K',
|
|
25
|
-
tokens:
|
|
25
|
+
tokens: 131_072, // https://huggingface.co/CohereForAI/c4ai-command-r-plus/blob/main/config.json
|
|
26
26
|
},
|
|
27
27
|
{
|
|
28
28
|
displayName: 'Gemma 7B',
|
|
29
29
|
enabled: true,
|
|
30
30
|
id: 'gemma',
|
|
31
|
-
tokens:
|
|
31
|
+
tokens: 8192, // https://huggingface.co/google/gemma-7b-it/discussions/73#65e9678c0cda621164a95bad
|
|
32
32
|
},
|
|
33
33
|
{
|
|
34
34
|
displayName: 'Gemma 2B',
|
|
35
35
|
id: 'gemma:2b',
|
|
36
|
-
tokens:
|
|
36
|
+
tokens: 8192,
|
|
37
37
|
},
|
|
38
38
|
{
|
|
39
39
|
displayName: 'Llama2 Chat 13B',
|
|
40
40
|
id: 'llama2:13b',
|
|
41
|
-
tokens:
|
|
41
|
+
tokens: 4096, // https://llama.meta.com/llama2/
|
|
42
42
|
},
|
|
43
43
|
{
|
|
44
44
|
displayName: 'Llama2 Chat 7B',
|
|
45
45
|
id: 'llama2',
|
|
46
|
-
tokens:
|
|
46
|
+
tokens: 4096,
|
|
47
47
|
},
|
|
48
48
|
{
|
|
49
49
|
displayName: 'Llama2 Chat 70B',
|
|
50
50
|
id: 'llama2:70b',
|
|
51
|
-
tokens:
|
|
51
|
+
tokens: 4096,
|
|
52
52
|
},
|
|
53
53
|
{
|
|
54
54
|
displayName: 'Llama2 CN 13B',
|
|
55
55
|
id: 'llama2-chinese:13b',
|
|
56
|
-
tokens:
|
|
56
|
+
tokens: 4096,
|
|
57
57
|
},
|
|
58
58
|
{
|
|
59
59
|
displayName: 'Llama2 CN 7B',
|
|
60
60
|
id: 'llama2-chinese',
|
|
61
|
-
tokens:
|
|
61
|
+
tokens: 4096,
|
|
62
62
|
},
|
|
63
63
|
{
|
|
64
64
|
displayName: 'WizardLM 2 7B',
|
|
@@ -74,45 +74,45 @@ const Ollama: ModelProviderCard = {
|
|
|
74
74
|
{
|
|
75
75
|
displayName: 'Code Llama 7B',
|
|
76
76
|
id: 'codellama',
|
|
77
|
-
tokens:
|
|
77
|
+
tokens: 16_384, // https://huggingface.co/codellama/CodeLlama-7b-hf/blob/main/config.json
|
|
78
78
|
},
|
|
79
79
|
{
|
|
80
80
|
displayName: 'Code Llama 34B',
|
|
81
81
|
id: 'codellama:34b',
|
|
82
|
-
tokens:
|
|
82
|
+
tokens: 16_384,
|
|
83
83
|
},
|
|
84
84
|
{
|
|
85
85
|
displayName: 'Code Llama 70B',
|
|
86
86
|
id: 'codellama:70b',
|
|
87
|
-
tokens:
|
|
87
|
+
tokens: 16_384,
|
|
88
88
|
},
|
|
89
89
|
{
|
|
90
90
|
displayName: 'Code Llama 7B (Python)',
|
|
91
91
|
id: 'codellama:python',
|
|
92
|
-
tokens:
|
|
92
|
+
tokens: 16_384,
|
|
93
93
|
},
|
|
94
94
|
{
|
|
95
95
|
displayName: 'Phi3-Instruct 3.8B',
|
|
96
96
|
enabled: true,
|
|
97
97
|
id: 'phi3:instruct',
|
|
98
|
-
tokens:
|
|
98
|
+
tokens: 131_072, // https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/config.json
|
|
99
99
|
},
|
|
100
100
|
{
|
|
101
101
|
displayName: 'Mistral',
|
|
102
102
|
enabled: true,
|
|
103
103
|
id: 'mistral',
|
|
104
|
-
tokens:
|
|
104
|
+
tokens: 32_768, // https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json
|
|
105
105
|
},
|
|
106
106
|
{
|
|
107
107
|
displayName: 'Mixtral 8x7B',
|
|
108
108
|
enabled: true,
|
|
109
109
|
id: 'mixtral',
|
|
110
|
-
tokens:
|
|
110
|
+
tokens: 32_768,
|
|
111
111
|
},
|
|
112
112
|
{
|
|
113
113
|
displayName: 'Mixtral 8x22B',
|
|
114
114
|
id: 'mixtral:8x22b',
|
|
115
|
-
tokens:
|
|
115
|
+
tokens: 65_536, // https://huggingface.co/mistralai/Mixtral-8x22B-v0.1/blob/main/config.json
|
|
116
116
|
},
|
|
117
117
|
{
|
|
118
118
|
displayName: 'Qwen Chat 4B',
|
|
@@ -138,19 +138,19 @@ const Ollama: ModelProviderCard = {
|
|
|
138
138
|
{
|
|
139
139
|
displayName: 'LLaVA 7B',
|
|
140
140
|
id: 'llava',
|
|
141
|
-
tokens:
|
|
141
|
+
tokens: 4096, // https://huggingface.co/llava-hf/llava-1.5-7b-hf/blob/main/config.json
|
|
142
142
|
vision: true,
|
|
143
143
|
},
|
|
144
144
|
{
|
|
145
145
|
displayName: 'LLaVA 13B',
|
|
146
146
|
id: 'llava:13b',
|
|
147
|
-
tokens:
|
|
147
|
+
tokens: 4096,
|
|
148
148
|
vision: true,
|
|
149
149
|
},
|
|
150
150
|
{
|
|
151
151
|
displayName: 'LLaVA 34B',
|
|
152
152
|
id: 'llava:34b',
|
|
153
|
-
tokens:
|
|
153
|
+
tokens: 4096,
|
|
154
154
|
vision: true,
|
|
155
155
|
},
|
|
156
156
|
],
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
|
+
// ref https://platform.lingyiwanwu.com/
|
|
3
4
|
const ZeroOne: ModelProviderCard = {
|
|
4
5
|
chatModels: [
|
|
5
6
|
{
|
|
@@ -7,7 +8,7 @@ const ZeroOne: ModelProviderCard = {
|
|
|
7
8
|
displayName: 'YI 34B Chat',
|
|
8
9
|
enabled: true,
|
|
9
10
|
id: 'yi-34b-chat-0205',
|
|
10
|
-
tokens:
|
|
11
|
+
tokens: 4096, // https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/config.json
|
|
11
12
|
},
|
|
12
13
|
{
|
|
13
14
|
description:
|
|
@@ -15,7 +16,7 @@ const ZeroOne: ModelProviderCard = {
|
|
|
15
16
|
displayName: 'YI Vision Plus',
|
|
16
17
|
enabled: true,
|
|
17
18
|
id: 'yi-vl-plus',
|
|
18
|
-
tokens:
|
|
19
|
+
tokens: 4096,
|
|
19
20
|
vision: true,
|
|
20
21
|
},
|
|
21
22
|
{
|
|
@@ -23,7 +24,7 @@ const ZeroOne: ModelProviderCard = {
|
|
|
23
24
|
displayName: 'YI 34B Chat 200k',
|
|
24
25
|
enabled: true,
|
|
25
26
|
id: 'yi-34b-chat-200k',
|
|
26
|
-
tokens: 200_000,
|
|
27
|
+
tokens: 200_000, // https://huggingface.co/01-ai/Yi-34B-200K/blob/main/config.json
|
|
27
28
|
},
|
|
28
29
|
],
|
|
29
30
|
id: 'zeroone',
|
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import { ModelProviderCard } from '@/types/llm';
|
|
2
2
|
|
|
3
3
|
// TODO: 等待 ZhiPu 修复 API 问题后开启 functionCall
|
|
4
|
-
// refs: https://github.com/lobehub/lobe-chat/discussions/737#discussioncomment-8315815
|
|
5
4
|
// 暂时不透出 GLM 系列的 function_call 功能
|
|
5
|
+
// refs https://github.com/lobehub/lobe-chat/discussions/737#discussioncomment-8315815
|
|
6
|
+
|
|
7
|
+
// ref https://open.bigmodel.cn/dev/howuse/model
|
|
6
8
|
const ZhiPu: ModelProviderCard = {
|
|
7
9
|
chatModels: [
|
|
8
10
|
{
|
|
@@ -19,7 +21,7 @@ const ZhiPu: ModelProviderCard = {
|
|
|
19
21
|
displayName: 'GLM-4 Vision',
|
|
20
22
|
enabled: true,
|
|
21
23
|
id: 'glm-4v',
|
|
22
|
-
tokens:
|
|
24
|
+
tokens: 2000,
|
|
23
25
|
vision: true,
|
|
24
26
|
},
|
|
25
27
|
{
|
|
@@ -1,15 +1,21 @@
|
|
|
1
1
|
import { useCallback } from 'react';
|
|
2
2
|
|
|
3
3
|
import { useChatStore } from '@/store/chat';
|
|
4
|
+
import { SendMessageParams } from '@/store/chat/slices/message/action';
|
|
4
5
|
import { filesSelectors, useFileStore } from '@/store/file';
|
|
5
6
|
|
|
7
|
+
export type UseSendMessageParams = Pick<
|
|
8
|
+
SendMessageParams,
|
|
9
|
+
'onlyAddUserMessage' | 'isWelcomeQuestion'
|
|
10
|
+
>;
|
|
11
|
+
|
|
6
12
|
export const useSendMessage = () => {
|
|
7
13
|
const [sendMessage, updateInputMessage] = useChatStore((s) => [
|
|
8
14
|
s.sendMessage,
|
|
9
15
|
s.updateInputMessage,
|
|
10
16
|
]);
|
|
11
17
|
|
|
12
|
-
return useCallback((
|
|
18
|
+
return useCallback((params: UseSendMessageParams = {}) => {
|
|
13
19
|
const store = useChatStore.getState();
|
|
14
20
|
if (!!store.chatLoadingId) return;
|
|
15
21
|
if (!store.inputMessage) return;
|
|
@@ -19,7 +25,7 @@ export const useSendMessage = () => {
|
|
|
19
25
|
sendMessage({
|
|
20
26
|
files: imageList,
|
|
21
27
|
message: store.inputMessage,
|
|
22
|
-
|
|
28
|
+
...params,
|
|
23
29
|
});
|
|
24
30
|
|
|
25
31
|
updateInputMessage('');
|
|
@@ -10,7 +10,8 @@ import { useTranslation } from 'react-i18next';
|
|
|
10
10
|
import { Flexbox } from 'react-layout-kit';
|
|
11
11
|
|
|
12
12
|
import { USAGE_DOCUMENTS } from '@/const/url';
|
|
13
|
-
import {
|
|
13
|
+
import { useSendMessage } from '@/features/ChatInput/useSend';
|
|
14
|
+
import { useChatStore } from '@/store/chat';
|
|
14
15
|
|
|
15
16
|
const useStyles = createStyles(({ css, token }) => ({
|
|
16
17
|
card: css`
|
|
@@ -54,14 +55,10 @@ const qa = shuffle([
|
|
|
54
55
|
]).slice(0, 5);
|
|
55
56
|
|
|
56
57
|
const QuestionSuggest = memo(() => {
|
|
57
|
-
const
|
|
58
|
+
const [updateInputMessage] = useChatStore((s) => [s.updateInputMessage]);
|
|
58
59
|
const { t } = useTranslation('welcome');
|
|
59
60
|
const { styles } = useStyles();
|
|
60
|
-
|
|
61
|
-
const handoleSend = (qa: string) => {
|
|
62
|
-
onInput(qa);
|
|
63
|
-
onSend();
|
|
64
|
-
};
|
|
61
|
+
const sendMessage = useSendMessage();
|
|
65
62
|
|
|
66
63
|
return (
|
|
67
64
|
<Flexbox gap={8} width={'100%'}>
|
|
@@ -85,7 +82,10 @@ const QuestionSuggest = memo(() => {
|
|
|
85
82
|
gap={8}
|
|
86
83
|
horizontal
|
|
87
84
|
key={item}
|
|
88
|
-
onClick={() =>
|
|
85
|
+
onClick={() => {
|
|
86
|
+
updateInputMessage(text);
|
|
87
|
+
sendMessage({ isWelcomeQuestion: true });
|
|
88
|
+
}}
|
|
89
89
|
>
|
|
90
90
|
{t(text)}
|
|
91
91
|
</Flexbox>
|
|
@@ -7,9 +7,11 @@ import { useChatStore } from '@/store/chat';
|
|
|
7
7
|
import { chatSelectors } from '@/store/chat/selectors';
|
|
8
8
|
import { isMobileScreen } from '@/utils/screen';
|
|
9
9
|
|
|
10
|
+
import { useInitConversation } from '../../hooks/useInitConversation';
|
|
10
11
|
import AutoScroll from '../AutoScroll';
|
|
11
12
|
import Item from '../ChatItem';
|
|
12
13
|
import InboxWelcome from '../InboxWelcome';
|
|
14
|
+
import SkeletonList from '../SkeletonList';
|
|
13
15
|
|
|
14
16
|
const WELCOME_ID = 'welcome';
|
|
15
17
|
|
|
@@ -29,6 +31,8 @@ interface VirtualizedListProps {
|
|
|
29
31
|
mobile?: boolean;
|
|
30
32
|
}
|
|
31
33
|
const VirtualizedList = memo<VirtualizedListProps>(({ mobile }) => {
|
|
34
|
+
useInitConversation();
|
|
35
|
+
|
|
32
36
|
const virtuosoRef = useRef<VirtuosoHandle>(null);
|
|
33
37
|
const [atBottom, setAtBottom] = useState(true);
|
|
34
38
|
|
|
@@ -52,7 +56,9 @@ const VirtualizedList = memo<VirtualizedListProps>(({ mobile }) => {
|
|
|
52
56
|
// overscan should be 1.5 times the height of the window
|
|
53
57
|
const overscan = typeof window !== 'undefined' ? window.innerHeight * 1.5 : 0;
|
|
54
58
|
|
|
55
|
-
return chatLoading
|
|
59
|
+
return chatLoading ? (
|
|
60
|
+
<SkeletonList mobile={mobile} />
|
|
61
|
+
) : (
|
|
56
62
|
<Flexbox height={'100%'}>
|
|
57
63
|
<Virtuoso
|
|
58
64
|
atBottomStateChange={setAtBottom}
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
import { createStyles } from 'antd-style';
|
|
2
|
-
import { ReactNode, memo } from 'react';
|
|
2
|
+
import { ReactNode, Suspense, lazy, memo } from 'react';
|
|
3
3
|
import { Flexbox } from 'react-layout-kit';
|
|
4
4
|
|
|
5
5
|
import ChatHydration from '@/components/StoreHydration/ChatHydration';
|
|
6
|
-
import { useChatStore } from '@/store/chat';
|
|
7
6
|
|
|
8
7
|
import SkeletonList from './components/SkeletonList';
|
|
9
|
-
|
|
10
|
-
|
|
8
|
+
|
|
9
|
+
const ChatList = lazy(() => import('./components/VirtualizedList'));
|
|
11
10
|
|
|
12
11
|
const useStyles = createStyles(
|
|
13
12
|
({ css, responsive, stylish }) => css`
|
|
@@ -30,10 +29,6 @@ interface ConversationProps {
|
|
|
30
29
|
const Conversation = memo<ConversationProps>(({ chatInput, mobile }) => {
|
|
31
30
|
const { styles } = useStyles();
|
|
32
31
|
|
|
33
|
-
useInitConversation();
|
|
34
|
-
|
|
35
|
-
const [messagesInit] = useChatStore((s) => [s.messagesInit]);
|
|
36
|
-
|
|
37
32
|
return (
|
|
38
33
|
<Flexbox
|
|
39
34
|
flex={1}
|
|
@@ -41,7 +36,9 @@ const Conversation = memo<ConversationProps>(({ chatInput, mobile }) => {
|
|
|
41
36
|
style={{ position: 'relative' }}
|
|
42
37
|
>
|
|
43
38
|
<div className={styles}>
|
|
44
|
-
|
|
39
|
+
<Suspense fallback={<SkeletonList mobile={mobile} />}>
|
|
40
|
+
<ChatList mobile={mobile} />
|
|
41
|
+
</Suspense>
|
|
45
42
|
</div>
|
|
46
43
|
{chatInput}
|
|
47
44
|
<ChatHydration />
|
package/src/services/chat.ts
CHANGED
|
@@ -31,6 +31,7 @@ import { createHeaderWithAuth, getProviderAuthPayload } from './_auth';
|
|
|
31
31
|
import { API_ENDPOINTS } from './_url';
|
|
32
32
|
|
|
33
33
|
interface FetchOptions {
|
|
34
|
+
isWelcomeQuestion?: boolean;
|
|
34
35
|
signal?: AbortSignal | undefined;
|
|
35
36
|
trace?: TracePayload;
|
|
36
37
|
}
|
|
@@ -65,6 +66,7 @@ interface FetchAITaskResultParams {
|
|
|
65
66
|
|
|
66
67
|
interface CreateAssistantMessageStream extends FetchSSEOptions {
|
|
67
68
|
abortController?: AbortController;
|
|
69
|
+
isWelcomeQuestion?: boolean;
|
|
68
70
|
params: GetChatCompletionPayload;
|
|
69
71
|
trace?: TracePayload;
|
|
70
72
|
}
|
|
@@ -220,10 +222,12 @@ class ChatService {
|
|
|
220
222
|
onErrorHandle,
|
|
221
223
|
onFinish,
|
|
222
224
|
trace,
|
|
225
|
+
isWelcomeQuestion,
|
|
223
226
|
}: CreateAssistantMessageStream) => {
|
|
224
227
|
await fetchSSE(
|
|
225
228
|
() =>
|
|
226
229
|
this.createAssistantMessage(params, {
|
|
230
|
+
isWelcomeQuestion,
|
|
227
231
|
signal: abortController?.signal,
|
|
228
232
|
trace: this.mapTrace(trace, TraceTagMap.Chat),
|
|
229
233
|
}),
|
|
@@ -432,9 +436,11 @@ class ChatService {
|
|
|
432
436
|
});
|
|
433
437
|
|
|
434
438
|
return produce(postMessages, (draft) => {
|
|
435
|
-
//
|
|
439
|
+
// if it's a welcome question, inject InboxGuide SystemRole
|
|
436
440
|
const inboxGuideSystemRole =
|
|
437
|
-
options?.
|
|
441
|
+
options?.isWelcomeQuestion &&
|
|
442
|
+
options?.trace?.sessionId === INBOX_SESSION_ID &&
|
|
443
|
+
INBOX_GUIDE_SYSTEMROLE;
|
|
438
444
|
|
|
439
445
|
// Inject Tool SystemRole
|
|
440
446
|
const hasTools = tools && tools?.length > 0;
|
|
@@ -385,11 +385,7 @@ describe('chatMessage actions', () => {
|
|
|
385
385
|
});
|
|
386
386
|
|
|
387
387
|
expect(messageService.removeMessage).not.toHaveBeenCalledWith(messageId);
|
|
388
|
-
expect(mockState.coreProcessMessage).toHaveBeenCalledWith(
|
|
389
|
-
expect.any(Array),
|
|
390
|
-
messageId,
|
|
391
|
-
undefined,
|
|
392
|
-
);
|
|
388
|
+
expect(mockState.coreProcessMessage).toHaveBeenCalledWith(expect.any(Array), messageId, {});
|
|
393
389
|
});
|
|
394
390
|
|
|
395
391
|
it('should not perform any action if the message id does not exist', async () => {
|
|
@@ -29,10 +29,20 @@ const n = setNamespace('message');
|
|
|
29
29
|
|
|
30
30
|
const SWR_USE_FETCH_MESSAGES = 'SWR_USE_FETCH_MESSAGES';
|
|
31
31
|
|
|
32
|
-
interface SendMessageParams {
|
|
32
|
+
export interface SendMessageParams {
|
|
33
33
|
message: string;
|
|
34
34
|
files?: { id: string; url: string }[];
|
|
35
35
|
onlyAddUserMessage?: boolean;
|
|
36
|
+
/**
|
|
37
|
+
*
|
|
38
|
+
* https://github.com/lobehub/lobe-chat/pull/2086
|
|
39
|
+
*/
|
|
40
|
+
isWelcomeQuestion?: boolean;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
interface ProcessMessageParams {
|
|
44
|
+
traceId?: string;
|
|
45
|
+
isWelcomeQuestion?: boolean;
|
|
36
46
|
}
|
|
37
47
|
|
|
38
48
|
export interface ChatMessageAction {
|
|
@@ -77,7 +87,7 @@ export interface ChatMessageAction {
|
|
|
77
87
|
coreProcessMessage: (
|
|
78
88
|
messages: ChatMessage[],
|
|
79
89
|
parentId: string,
|
|
80
|
-
|
|
90
|
+
params?: ProcessMessageParams,
|
|
81
91
|
) => Promise<void>;
|
|
82
92
|
/**
|
|
83
93
|
* 实际获取 AI 响应
|
|
@@ -87,7 +97,7 @@ export interface ChatMessageAction {
|
|
|
87
97
|
fetchAIChatMessage: (
|
|
88
98
|
messages: ChatMessage[],
|
|
89
99
|
assistantMessageId: string,
|
|
90
|
-
|
|
100
|
+
params?: ProcessMessageParams,
|
|
91
101
|
) => Promise<{
|
|
92
102
|
content: string;
|
|
93
103
|
functionCallAtEnd: boolean;
|
|
@@ -173,7 +183,7 @@ export const chatMessage: StateCreator<
|
|
|
173
183
|
await messageService.removeAllMessages();
|
|
174
184
|
await refreshMessages();
|
|
175
185
|
},
|
|
176
|
-
sendMessage: async ({ message, files, onlyAddUserMessage }) => {
|
|
186
|
+
sendMessage: async ({ message, files, onlyAddUserMessage, isWelcomeQuestion }) => {
|
|
177
187
|
const { coreProcessMessage, activeTopicId, activeId } = get();
|
|
178
188
|
if (!activeId) return;
|
|
179
189
|
|
|
@@ -200,7 +210,7 @@ export const chatMessage: StateCreator<
|
|
|
200
210
|
// Get the current messages to generate AI response
|
|
201
211
|
const messages = chatSelectors.currentChats(get());
|
|
202
212
|
|
|
203
|
-
await coreProcessMessage(messages, id);
|
|
213
|
+
await coreProcessMessage(messages, id, { isWelcomeQuestion });
|
|
204
214
|
|
|
205
215
|
// check activeTopic and then auto create topic
|
|
206
216
|
const chats = chatSelectors.currentChats(get());
|
|
@@ -263,6 +273,8 @@ export const chatMessage: StateCreator<
|
|
|
263
273
|
async ([, sessionId, topicId]: [string, string, string | undefined]) =>
|
|
264
274
|
messageService.getMessages(sessionId, topicId),
|
|
265
275
|
{
|
|
276
|
+
suspense: true,
|
|
277
|
+
fallbackData: [],
|
|
266
278
|
onSuccess: (messages, key) => {
|
|
267
279
|
set(
|
|
268
280
|
{ activeId: sessionId, messages, messagesInit: true },
|
|
@@ -280,7 +292,7 @@ export const chatMessage: StateCreator<
|
|
|
280
292
|
},
|
|
281
293
|
|
|
282
294
|
// the internal process method of the AI message
|
|
283
|
-
coreProcessMessage: async (messages, userMessageId,
|
|
295
|
+
coreProcessMessage: async (messages, userMessageId, params) => {
|
|
284
296
|
const { fetchAIChatMessage, triggerFunctionCall, refreshMessages, activeTopicId } = get();
|
|
285
297
|
|
|
286
298
|
const { model, provider } = getAgentConfig();
|
|
@@ -301,7 +313,7 @@ export const chatMessage: StateCreator<
|
|
|
301
313
|
|
|
302
314
|
// 2. fetch the AI response
|
|
303
315
|
const { isFunctionCall, content, functionCallAtEnd, functionCallContent, traceId } =
|
|
304
|
-
await fetchAIChatMessage(messages, mid,
|
|
316
|
+
await fetchAIChatMessage(messages, mid, params);
|
|
305
317
|
|
|
306
318
|
// 3. if it's the function call message, trigger the function method
|
|
307
319
|
if (isFunctionCall) {
|
|
@@ -341,7 +353,7 @@ export const chatMessage: StateCreator<
|
|
|
341
353
|
|
|
342
354
|
set({ messages }, false, n(`dispatchMessage/${payload.type}`, payload));
|
|
343
355
|
},
|
|
344
|
-
fetchAIChatMessage: async (messages, assistantId,
|
|
356
|
+
fetchAIChatMessage: async (messages, assistantId, params) => {
|
|
345
357
|
const {
|
|
346
358
|
toggleChatLoading,
|
|
347
359
|
refreshMessages,
|
|
@@ -421,11 +433,12 @@ export const chatMessage: StateCreator<
|
|
|
421
433
|
plugins: config.plugins,
|
|
422
434
|
},
|
|
423
435
|
trace: {
|
|
424
|
-
traceId,
|
|
436
|
+
traceId: params?.traceId,
|
|
425
437
|
sessionId: get().activeId,
|
|
426
438
|
topicId: get().activeTopicId,
|
|
427
439
|
traceName: TraceNameMap.Conversation,
|
|
428
440
|
},
|
|
441
|
+
isWelcomeQuestion: params?.isWelcomeQuestion,
|
|
429
442
|
onErrorHandle: async (error) => {
|
|
430
443
|
await messageService.updateMessageError(assistantId, error);
|
|
431
444
|
await refreshMessages();
|
|
@@ -567,7 +580,7 @@ export const chatMessage: StateCreator<
|
|
|
567
580
|
|
|
568
581
|
if (!latestMsg) return;
|
|
569
582
|
|
|
570
|
-
await coreProcessMessage(contextMessages, latestMsg.id, traceId);
|
|
583
|
+
await coreProcessMessage(contextMessages, latestMsg.id, { traceId });
|
|
571
584
|
},
|
|
572
585
|
|
|
573
586
|
internalUpdateMessageContent: async (id, content) => {
|
|
@@ -174,7 +174,7 @@ export const chatPlugin: StateCreator<
|
|
|
174
174
|
triggerAIMessage: async (id, traceId) => {
|
|
175
175
|
const { coreProcessMessage } = get();
|
|
176
176
|
const chats = chatSelectors.currentChats(get());
|
|
177
|
-
await coreProcessMessage(chats, id, traceId);
|
|
177
|
+
await coreProcessMessage(chats, id, { traceId });
|
|
178
178
|
},
|
|
179
179
|
|
|
180
180
|
triggerFunctionCall: async (id) => {
|