@lobehub/chat 1.26.10 → 1.26.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,23 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.26.11](https://github.com/lobehub/lobe-chat/compare/v1.26.10...v1.26.11)
6
+
7
+ <sup>Released on **2024-10-29**</sup>
8
+
9
+ <br/>
10
+
11
+ <details>
12
+ <summary><kbd>Improvements and Fixes</kbd></summary>
13
+
14
+ </details>
15
+
16
+ <div align="right">
17
+
18
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
19
+
20
+ </div>
21
+
5
22
  ### [Version 1.26.10](https://github.com/lobehub/lobe-chat/compare/v1.26.9...v1.26.10)
6
23
 
7
24
  <sup>Released on **2024-10-29**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.26.10",
3
+ "version": "1.26.11",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -3,7 +3,7 @@ import { and, desc, isNull } from 'drizzle-orm/expressions';
3
3
  import { chunk } from 'lodash-es';
4
4
 
5
5
  import { serverDB } from '@/database/server';
6
- import { ChunkMetadata, FileChunk, SemanticSearchChunk } from '@/types/chunk';
6
+ import { ChunkMetadata, FileChunk } from '@/types/chunk';
7
7
 
8
8
  import {
9
9
  NewChunkItem,
@@ -148,6 +148,8 @@ export class ChunkModel {
148
148
 
149
149
  const data = await serverDB
150
150
  .select({
151
+ fileId: fileChunks.fileId,
152
+ fileName: files.name,
151
153
  id: chunks.id,
152
154
  index: chunks.index,
153
155
  metadata: chunks.metadata,
@@ -158,16 +160,15 @@ export class ChunkModel {
158
160
  .from(chunks)
159
161
  .leftJoin(embeddings, eq(chunks.id, embeddings.chunkId))
160
162
  .leftJoin(fileChunks, eq(chunks.id, fileChunks.chunkId))
163
+ .leftJoin(files, eq(fileChunks.fileId, files.id))
161
164
  .where(fileIds ? inArray(fileChunks.fileId, fileIds) : undefined)
162
165
  .orderBy((t) => desc(t.similarity))
163
166
  .limit(30);
164
167
 
165
- return data.map(
166
- (item): SemanticSearchChunk => ({
167
- ...item,
168
- metadata: item.metadata as ChunkMetadata,
169
- }),
170
- );
168
+ return data.map((item) => ({
169
+ ...item,
170
+ metadata: item.metadata as ChunkMetadata,
171
+ }));
171
172
  }
172
173
 
173
174
  async semanticSearchForChat({
@@ -187,7 +188,7 @@ export class ChunkModel {
187
188
  const result = await serverDB
188
189
  .select({
189
190
  fileId: files.id,
190
- filename: files.name,
191
+ fileName: files.name,
191
192
  id: chunks.id,
192
193
  index: chunks.index,
193
194
  metadata: chunks.metadata,
@@ -205,6 +206,8 @@ export class ChunkModel {
205
206
 
206
207
  return result.map((item) => {
207
208
  return {
209
+ fileId: item.fileId,
210
+ fileName: item.fileName,
208
211
  id: item.id,
209
212
  index: item.index,
210
213
  similarity: item.similarity,
@@ -0,0 +1,26 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`knowledgeBaseQAPrompts > should generate prompt with all parameters 1`] = `
4
+ "<knowledge_base_qa_info>
5
+ You are also a helpful assistant good answering questions related to Test Knowledge. And you'll be provided with a question and several passages that might be relevant. And currently your task is to provide answer based on the question and passages.
6
+ <knowledge_base_anwser_instruction>
7
+ - Note that passages might not be relevant to the question, please only use the passages that are relevant.
8
+ - if there is no relevant passage, please answer using your knowledge.
9
+ - Answer should use the same original language as the question and follow markdown syntax.
10
+ </knowledge_base_anwser_instruction>
11
+ <knowledge_bases>
12
+ <knowledge_bases_docstring>here are the knowledge base scope we retrieve chunks from:</knowledge_bases_docstring>
13
+ <knowledge id="kb1" name="Test Knowledge" type="file" fileType="txt" >Test description</knowledge>
14
+ </knowledge_bases>
15
+ <retrieved_chunks>
16
+ <retrieved_chunks_docstring>here are retrived chunks you can refer to:</retrieved_chunks_docstring>
17
+ <chunk fileId="file1" fileName="test.txt" similarity="0.8" pageNumber="1" >This is a test chunk</chunk>
18
+ </retrieved_chunks>
19
+ <user_query>
20
+ <user_query_docstring>to make result better, we may rewrite user's question.If there is a rewrite query, it will be wrapper with \`rewrite_query\` tag.</user_query_docstring>
21
+
22
+ <raw_query>What is the test about?</raw_query>
23
+ <rewrite_query>Could you explain the content of the test?</rewrite_query>
24
+ <user_query>
25
+ </knowledge_base_qa_info>"
26
+ `;
@@ -0,0 +1,15 @@
1
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
2
+
3
+ const chunkPrompt = (item: ChatSemanticSearchChunk) =>
4
+ `<chunk fileId="${item.fileId}" fileName="${item.fileName}" similarity="${item.similarity}" ${item.pageNumber ? ` pageNumber="${item.pageNumber}" ` : ''}>${item.text}</chunk>`;
5
+
6
+ export const chunkPrompts = (fileList: ChatSemanticSearchChunk[]) => {
7
+ if (fileList.length === 0) return '';
8
+
9
+ const prompt = `<retrieved_chunks>
10
+ <retrieved_chunks_docstring>here are retrived chunks you can refer to:</retrieved_chunks_docstring>
11
+ ${fileList.map((item) => chunkPrompt(item)).join('\n')}
12
+ </retrieved_chunks>`;
13
+
14
+ return prompt.trim();
15
+ };
@@ -0,0 +1,146 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
4
+ import { KnowledgeItem, KnowledgeType } from '@/types/knowledgeBase';
5
+
6
+ import { knowledgeBaseQAPrompts } from './index';
7
+
8
+ describe('knowledgeBaseQAPrompts', () => {
9
+ // Define test data
10
+ const mockChunks: ChatSemanticSearchChunk[] = [
11
+ {
12
+ id: '1',
13
+ fileId: 'file1',
14
+ fileName: 'test.txt',
15
+ text: 'This is a test chunk',
16
+ similarity: 0.8,
17
+ pageNumber: 1,
18
+ },
19
+ ];
20
+
21
+ const mockKnowledge: KnowledgeItem[] = [
22
+ {
23
+ id: 'kb1',
24
+ name: 'Test Knowledge',
25
+ type: KnowledgeType.File,
26
+ fileType: 'txt',
27
+ description: 'Test description',
28
+ },
29
+ ];
30
+
31
+ const userQuery = 'What is the test about?';
32
+ const rewriteQuery = 'Could you explain the content of the test?';
33
+
34
+ it('should return empty string if chunks is empty', () => {
35
+ const result = knowledgeBaseQAPrompts({
36
+ chunks: [],
37
+ knowledge: mockKnowledge,
38
+ userQuery,
39
+ });
40
+
41
+ expect(result).toBe('');
42
+ });
43
+
44
+ it('should return empty string if chunks is undefined', () => {
45
+ const result = knowledgeBaseQAPrompts({
46
+ knowledge: mockKnowledge,
47
+ userQuery,
48
+ });
49
+
50
+ expect(result).toBe('');
51
+ });
52
+
53
+ it('should generate prompt with all parameters', () => {
54
+ const result = knowledgeBaseQAPrompts({
55
+ chunks: mockChunks,
56
+ knowledge: mockKnowledge,
57
+ userQuery,
58
+ rewriteQuery,
59
+ });
60
+
61
+ // Verify the prompt structure and content
62
+ expect(result).toMatchSnapshot();
63
+ });
64
+
65
+ it('should generate prompt without rewriteQuery', () => {
66
+ const result = knowledgeBaseQAPrompts({
67
+ chunks: mockChunks,
68
+ knowledge: mockKnowledge,
69
+ userQuery,
70
+ });
71
+
72
+ expect(result).toContain('<raw_query>What is the test about?</raw_query>');
73
+ expect(result).not.toContain('<rewrite_query>');
74
+ });
75
+
76
+ it('should generate prompt without knowledge', () => {
77
+ const result = knowledgeBaseQAPrompts({
78
+ chunks: mockChunks,
79
+ userQuery,
80
+ });
81
+
82
+ expect(result).toContain(
83
+ 'You are also a helpful assistant good answering questions related to',
84
+ );
85
+ expect(result).not.toContain('<knowledge_bases>');
86
+ });
87
+
88
+ it('should handle empty knowledge array', () => {
89
+ const result = knowledgeBaseQAPrompts({
90
+ chunks: mockChunks,
91
+ knowledge: [],
92
+ userQuery,
93
+ });
94
+
95
+ expect(result).toContain(
96
+ 'You are also a helpful assistant good answering questions related to',
97
+ );
98
+ expect(result).not.toContain('<knowledge_bases>');
99
+ });
100
+
101
+ it('should properly escape special characters in input', () => {
102
+ const specialChunks: ChatSemanticSearchChunk[] = [
103
+ {
104
+ id: '1',
105
+ fileId: 'file1',
106
+ fileName: 'test&.txt',
107
+ text: 'This is a test with & < > "quotes"',
108
+ similarity: 0.8,
109
+ },
110
+ ];
111
+
112
+ const result = knowledgeBaseQAPrompts({
113
+ chunks: specialChunks,
114
+ userQuery: 'Test with & < > "quotes"',
115
+ });
116
+
117
+ expect(result).toContain('test&.txt');
118
+ expect(result).toContain('This is a test with & < > "quotes"');
119
+ expect(result).toContain('Test with & < > "quotes"');
120
+ });
121
+
122
+ it('should handle multiple knowledge items', () => {
123
+ const multipleKnowledge: KnowledgeItem[] = [
124
+ {
125
+ id: 'kb1',
126
+ name: 'Knowledge 1',
127
+ type: KnowledgeType.File,
128
+ },
129
+ {
130
+ id: 'kb2',
131
+ name: 'Knowledge 2',
132
+ type: KnowledgeType.KnowledgeBase,
133
+ },
134
+ ];
135
+
136
+ const result = knowledgeBaseQAPrompts({
137
+ chunks: mockChunks,
138
+ knowledge: multipleKnowledge,
139
+ userQuery,
140
+ });
141
+
142
+ expect(result).toContain('Knowledge 1/Knowledge 2');
143
+ expect(result).toContain('<knowledge id="kb1"');
144
+ expect(result).toContain('<knowledge id="kb2"');
145
+ });
146
+ });
@@ -0,0 +1,33 @@
1
+ import { chunkPrompts } from '@/prompts/knowledgeBaseQA/chunk';
2
+ import { knowledgePrompts } from '@/prompts/knowledgeBaseQA/knowledge';
3
+ import { userQueryPrompt } from '@/prompts/knowledgeBaseQA/userQuery';
4
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
5
+ import { KnowledgeItem } from '@/types/knowledgeBase';
6
+
7
+ export const knowledgeBaseQAPrompts = ({
8
+ chunks,
9
+ knowledge,
10
+ userQuery,
11
+ rewriteQuery,
12
+ }: {
13
+ chunks?: ChatSemanticSearchChunk[];
14
+ knowledge?: KnowledgeItem[];
15
+ rewriteQuery?: string;
16
+ userQuery: string;
17
+ }) => {
18
+ if ((chunks || [])?.length === 0) return '';
19
+
20
+ const domains = (knowledge || []).map((v) => v.name).join('/');
21
+
22
+ return `<knowledge_base_qa_info>
23
+ You are also a helpful assistant good answering questions related to ${domains}. And you'll be provided with a question and several passages that might be relevant. And currently your task is to provide answer based on the question and passages.
24
+ <knowledge_base_anwser_instruction>
25
+ - Note that passages might not be relevant to the question, please only use the passages that are relevant.
26
+ - if there is no relevant passage, please answer using your knowledge.
27
+ - Answer should use the same original language as the question and follow markdown syntax.
28
+ </knowledge_base_anwser_instruction>
29
+ ${knowledgePrompts(knowledge)}
30
+ ${chunks ? chunkPrompts(chunks) : ''}
31
+ ${userQueryPrompt(userQuery, rewriteQuery)}
32
+ </knowledge_base_qa_info>`;
33
+ };
@@ -0,0 +1,15 @@
1
+ import { KnowledgeItem } from '@/types/knowledgeBase';
2
+
3
+ const knowledgePrompt = (item: KnowledgeItem) =>
4
+ `<knowledge id="${item.id}" name="${item.name}" type="${item.type}"${item.fileType ? ` fileType="${item.fileType}" ` : ''}>${item.description || ''}</knowledge>`;
5
+
6
+ export const knowledgePrompts = (list?: KnowledgeItem[]) => {
7
+ if ((list || []).length === 0) return '';
8
+
9
+ const prompt = `<knowledge_bases>
10
+ <knowledge_bases_docstring>here are the knowledge base scope we retrieve chunks from:</knowledge_bases_docstring>
11
+ ${list?.map((item) => knowledgePrompt(item)).join('\n')}
12
+ </knowledge_bases>`;
13
+
14
+ return prompt.trim();
15
+ };
@@ -0,0 +1,8 @@
1
+ export const userQueryPrompt = (userQuery: string, rewriteQuery?: string) => {
2
+ return `<user_query>
3
+ <user_query_docstring>to make result better, we may rewrite user's question.If there is a rewrite query, it will be wrapper with \`rewrite_query\` tag.</user_query_docstring>
4
+
5
+ <raw_query>${userQuery.trim()}</raw_query>
6
+ ${rewriteQuery ? `<rewrite_query>${rewriteQuery.trim()}</rewrite_query>` : ''}
7
+ <user_query>`;
8
+ };
@@ -0,0 +1,261 @@
1
+ import { act, renderHook } from '@testing-library/react';
2
+ import { Mock, beforeEach, describe, expect, it, vi } from 'vitest';
3
+
4
+ import { chatService } from '@/services/chat';
5
+ import { ragService } from '@/services/rag';
6
+ import { useAgentStore } from '@/store/agent';
7
+ import { agentSelectors } from '@/store/agent/selectors';
8
+ import { chatSelectors } from '@/store/chat/selectors';
9
+ import { systemAgentSelectors } from '@/store/user/selectors';
10
+ import { ChatMessage } from '@/types/message';
11
+ import { QueryRewriteSystemAgent } from '@/types/user/settings';
12
+
13
+ import { useChatStore } from '../../../../store';
14
+
15
+ // Mock services
16
+ vi.mock('@/services/chat', () => ({
17
+ chatService: {
18
+ fetchPresetTaskResult: vi.fn(),
19
+ },
20
+ }));
21
+
22
+ vi.mock('@/services/rag', () => ({
23
+ ragService: {
24
+ deleteMessageRagQuery: vi.fn(),
25
+ semanticSearchForChat: vi.fn(),
26
+ },
27
+ }));
28
+
29
+ beforeEach(() => {
30
+ vi.clearAllMocks();
31
+ });
32
+
33
+ describe('chatRAG actions', () => {
34
+ describe('deleteUserMessageRagQuery', () => {
35
+ it('should not delete if message not found', async () => {
36
+ const { result } = renderHook(() => useChatStore());
37
+
38
+ await act(async () => {
39
+ await result.current.deleteUserMessageRagQuery('non-existent-id');
40
+ });
41
+
42
+ expect(ragService.deleteMessageRagQuery).not.toHaveBeenCalled();
43
+ });
44
+
45
+ it('should not delete if message has no ragQueryId', async () => {
46
+ const { result } = renderHook(() => useChatStore());
47
+ const messageId = 'message-id';
48
+
49
+ act(() => {
50
+ useChatStore.setState({
51
+ messagesMap: {
52
+ default: [{ id: messageId }] as ChatMessage[],
53
+ },
54
+ });
55
+ });
56
+
57
+ await act(async () => {
58
+ await result.current.deleteUserMessageRagQuery(messageId);
59
+ });
60
+
61
+ expect(ragService.deleteMessageRagQuery).not.toHaveBeenCalled();
62
+ });
63
+ });
64
+
65
+ describe('internal_retrieveChunks', () => {
66
+ it('should retrieve chunks with existing ragQuery', async () => {
67
+ const { result } = renderHook(() => useChatStore());
68
+ const messageId = 'message-id';
69
+ const existingRagQuery = 'existing-query';
70
+ const userQuery = 'user-query';
71
+
72
+ // Mock the message with existing ragQuery
73
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
74
+ () =>
75
+ ({
76
+ id: messageId,
77
+ ragQuery: existingRagQuery,
78
+ }) as ChatMessage,
79
+ );
80
+
81
+ // Mock the semantic search response
82
+ (ragService.semanticSearchForChat as Mock).mockResolvedValue({
83
+ chunks: [{ id: 'chunk-1' }],
84
+ queryId: 'query-id',
85
+ });
86
+
87
+ vi.spyOn(agentSelectors, 'currentKnowledgeIds').mockReturnValue({
88
+ fileIds: [],
89
+ knowledgeBaseIds: [],
90
+ });
91
+
92
+ const result1 = await act(async () => {
93
+ return await result.current.internal_retrieveChunks(messageId, userQuery, []);
94
+ });
95
+
96
+ expect(result1).toEqual({
97
+ chunks: [{ id: 'chunk-1' }],
98
+ queryId: 'query-id',
99
+ rewriteQuery: existingRagQuery,
100
+ });
101
+ expect(ragService.semanticSearchForChat).toHaveBeenCalledWith(
102
+ expect.objectContaining({
103
+ rewriteQuery: existingRagQuery,
104
+ userQuery,
105
+ }),
106
+ );
107
+ });
108
+
109
+ it('should rewrite query if no existing ragQuery', async () => {
110
+ const { result } = renderHook(() => useChatStore());
111
+ const messageId = 'message-id';
112
+ const userQuery = 'user-query';
113
+ const rewrittenQuery = 'rewritten-query';
114
+
115
+ // Mock the message without ragQuery
116
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
117
+ () =>
118
+ ({
119
+ id: messageId,
120
+ }) as ChatMessage,
121
+ );
122
+
123
+ // Mock the rewrite query function
124
+ vi.spyOn(result.current, 'internal_rewriteQuery').mockResolvedValueOnce(rewrittenQuery);
125
+
126
+ // Mock the semantic search response
127
+ (ragService.semanticSearchForChat as Mock).mockResolvedValue({
128
+ chunks: [{ id: 'chunk-1' }],
129
+ queryId: 'query-id',
130
+ });
131
+
132
+ vi.spyOn(agentSelectors, 'currentKnowledgeIds').mockReturnValue({
133
+ fileIds: [],
134
+ knowledgeBaseIds: [],
135
+ });
136
+
137
+ const result2 = await act(async () => {
138
+ return await result.current.internal_retrieveChunks(messageId, userQuery, ['message']);
139
+ });
140
+
141
+ expect(result2).toEqual({
142
+ chunks: [{ id: 'chunk-1' }],
143
+ queryId: 'query-id',
144
+ rewriteQuery: rewrittenQuery,
145
+ });
146
+ expect(result.current.internal_rewriteQuery).toHaveBeenCalledWith(messageId, userQuery, [
147
+ 'message',
148
+ ]);
149
+ });
150
+ });
151
+
152
+ describe('internal_rewriteQuery', () => {
153
+ it('should return original content if query rewrite is disabled', async () => {
154
+ const { result } = renderHook(() => useChatStore());
155
+ const content = 'original content';
156
+
157
+ vi.spyOn(systemAgentSelectors, 'queryRewrite').mockReturnValueOnce({
158
+ enabled: false,
159
+ } as QueryRewriteSystemAgent);
160
+
161
+ const rewrittenQuery = await result.current.internal_rewriteQuery('id', content, []);
162
+
163
+ expect(rewrittenQuery).toBe(content);
164
+ expect(chatService.fetchPresetTaskResult).not.toHaveBeenCalled();
165
+ });
166
+
167
+ it('should rewrite query if enabled', async () => {
168
+ const { result } = renderHook(() => useChatStore());
169
+ const messageId = 'message-id';
170
+ const content = 'original content';
171
+ const rewrittenContent = 'rewritten content';
172
+
173
+ vi.spyOn(systemAgentSelectors, 'queryRewrite').mockReturnValueOnce({
174
+ enabled: true,
175
+ model: 'gpt-3.5',
176
+ provider: 'openai',
177
+ });
178
+
179
+ (chatService.fetchPresetTaskResult as Mock).mockImplementation(({ onFinish }) => {
180
+ onFinish(rewrittenContent);
181
+ });
182
+
183
+ const rewrittenQuery = await result.current.internal_rewriteQuery(messageId, content, []);
184
+
185
+ expect(rewrittenQuery).toBe(rewrittenContent);
186
+ expect(chatService.fetchPresetTaskResult).toHaveBeenCalled();
187
+ });
188
+ });
189
+
190
+ describe('internal_shouldUseRAG', () => {
191
+ it('should return true if has enabled knowledge', () => {
192
+ const { result } = renderHook(() => useChatStore());
193
+
194
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(true);
195
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([]);
196
+
197
+ expect(result.current.internal_shouldUseRAG()).toBe(true);
198
+ });
199
+
200
+ it('should return true if has user files', () => {
201
+ const { result } = renderHook(() => useChatStore());
202
+
203
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(false);
204
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([{ id: 'file-1' }] as any);
205
+
206
+ expect(result.current.internal_shouldUseRAG()).toBe(true);
207
+ });
208
+
209
+ it('should return false if no knowledge or files', () => {
210
+ const { result } = renderHook(() => useChatStore());
211
+
212
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(false);
213
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([]);
214
+
215
+ expect(result.current.internal_shouldUseRAG()).toBe(false);
216
+ });
217
+ });
218
+
219
+ describe('rewriteQuery', () => {
220
+ it('should not rewrite if message not found', async () => {
221
+ const { result } = renderHook(() => useChatStore());
222
+
223
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(() => undefined);
224
+ const rewriteSpy = vi.spyOn(result.current, 'internal_rewriteQuery');
225
+
226
+ await act(async () => {
227
+ await result.current.rewriteQuery('non-existent-id');
228
+ });
229
+
230
+ expect(rewriteSpy).not.toHaveBeenCalled();
231
+ });
232
+
233
+ it('should rewrite query for existing message', async () => {
234
+ const { result } = renderHook(() => useChatStore());
235
+ const messageId = 'message-id';
236
+ const content = 'message content';
237
+
238
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
239
+ () =>
240
+ ({
241
+ id: messageId,
242
+ content,
243
+ }) as ChatMessage,
244
+ );
245
+
246
+ vi.spyOn(chatSelectors, 'currentChatsWithHistoryConfig').mockReturnValue([
247
+ { content: 'history' },
248
+ ] as ChatMessage[]);
249
+
250
+ const rewriteSpy = vi.spyOn(result.current, 'internal_rewriteQuery');
251
+ const deleteSpy = vi.spyOn(result.current, 'deleteUserMessageRagQuery');
252
+
253
+ await act(async () => {
254
+ await result.current.rewriteQuery(messageId);
255
+ });
256
+
257
+ expect(deleteSpy).toHaveBeenCalledWith(messageId);
258
+ expect(rewriteSpy).toHaveBeenCalledWith(messageId, content, ['history']);
259
+ });
260
+ });
261
+ });
@@ -4,10 +4,10 @@ import { produce } from 'immer';
4
4
  import { template } from 'lodash-es';
5
5
  import { StateCreator } from 'zustand/vanilla';
6
6
 
7
- import { chainAnswerWithContext } from '@/chains/answerWithContext';
8
7
  import { LOADING_FLAT, MESSAGE_CANCEL_FLAT } from '@/const/message';
9
8
  import { TraceEventType, TraceNameMap } from '@/const/trace';
10
9
  import { isServerMode } from '@/const/version';
10
+ import { knowledgeBaseQAPrompts } from '@/prompts/knowledgeBaseQA';
11
11
  import { chatService } from '@/services/chat';
12
12
  import { messageService } from '@/services/message';
13
13
  import { useAgentStore } from '@/store/agent';
@@ -269,10 +269,11 @@ export const generateAIChat: StateCreator<
269
269
 
270
270
  let fileChunks: MessageSemanticSearchChunk[] | undefined;
271
271
  let ragQueryId;
272
+
272
273
  // go into RAG flow if there is ragQuery flag
273
274
  if (params?.ragQuery) {
274
275
  // 1. get the relative chunks from semantic search
275
- const { chunks, queryId } = await get().internal_retrieveChunks(
276
+ const { chunks, queryId, rewriteQuery } = await get().internal_retrieveChunks(
276
277
  userMessageId,
277
278
  params?.ragQuery,
278
279
  // should skip the last content
@@ -281,19 +282,21 @@ export const generateAIChat: StateCreator<
281
282
 
282
283
  ragQueryId = queryId;
283
284
 
285
+ const lastMsg = messages.pop() as ChatMessage;
286
+
284
287
  // 2. build the retrieve context messages
285
- const retrieveContext = chainAnswerWithContext({
286
- context: chunks.map((c) => c.text as string),
287
- question: params?.ragQuery,
288
- knowledge: getAgentKnowledge().map((knowledge) => knowledge.name),
288
+ const knowledgeBaseQAContext = knowledgeBaseQAPrompts({
289
+ chunks,
290
+ userQuery: lastMsg.content,
291
+ rewriteQuery,
292
+ knowledge: getAgentKnowledge(),
289
293
  });
290
294
 
291
295
  // 3. add the retrieve context messages to the messages history
292
- if (retrieveContext.messages && retrieveContext.messages?.length > 0) {
293
- // remove the last message due to the query is in the retrieveContext
294
- messages.pop();
295
- retrieveContext.messages?.forEach((m) => messages.push(m as ChatMessage));
296
- }
296
+ messages.push({
297
+ ...lastMsg,
298
+ content: (lastMsg.content + '\n\n' + knowledgeBaseQAContext).trim(),
299
+ });
297
300
 
298
301
  fileChunks = chunks.map((c) => ({ id: c.id, similarity: c.similarity }));
299
302
  }
@@ -499,7 +502,7 @@ export const generateAIChat: StateCreator<
499
502
 
500
503
  await internal_coreProcessMessage(contextMessages, latestMsg.id, {
501
504
  traceId,
502
- ragQuery: get().internal_shouldUseRAG() ? currentMessage.content : undefined,
505
+ ragQuery: get().internal_shouldUseRAG() ? latestMsg.content : undefined,
503
506
  });
504
507
  },
505
508
 
@@ -21,7 +21,7 @@ export interface ChatRAGAction {
21
21
  id: string,
22
22
  userQuery: string,
23
23
  messages: string[],
24
- ) => Promise<{ chunks: ChatSemanticSearchChunk[]; queryId: string }>;
24
+ ) => Promise<{ chunks: ChatSemanticSearchChunk[]; queryId: string; rewriteQuery?: string }>;
25
25
  /**
26
26
  * Rewrite user content to better RAG query
27
27
  */
@@ -64,12 +64,11 @@ export const chatRag: StateCreator<ChatStore, [['zustand/devtools', never]], [],
64
64
  const message = chatSelectors.getMessageById(id)(get());
65
65
 
66
66
  // 1. get the rewrite query
67
- let rewriteQuery = message?.ragQuery || userQuery;
67
+ let rewriteQuery = message?.ragQuery as string | undefined;
68
68
 
69
- // only rewrite query length is less than 15 characters, refs: https://github.com/lobehub/lobe-chat/pull/4288
70
69
  // if there is no ragQuery and there is a chat history
71
70
  // we need to rewrite the user message to get better results
72
- if (rewriteQuery.length < 15 && !message?.ragQuery && messages.length > 0) {
71
+ if (!message?.ragQuery && messages.length > 0) {
73
72
  rewriteQuery = await get().internal_rewriteQuery(id, userQuery, messages);
74
73
  }
75
74
 
@@ -79,13 +78,13 @@ export const chatRag: StateCreator<ChatStore, [['zustand/devtools', never]], [],
79
78
  fileIds: knowledgeIds().fileIds.concat(files),
80
79
  knowledgeIds: knowledgeIds().knowledgeBaseIds,
81
80
  messageId: id,
82
- rewriteQuery,
81
+ rewriteQuery: rewriteQuery || userQuery,
83
82
  userQuery,
84
83
  });
85
84
 
86
85
  get().internal_toggleMessageRAGLoading(false, id);
87
86
 
88
- return { chunks, queryId };
87
+ return { chunks, queryId, rewriteQuery };
89
88
  },
90
89
  internal_rewriteQuery: async (id, content, messages) => {
91
90
  let rewriteQuery = content;
@@ -39,6 +39,8 @@ export interface FileChunk {
39
39
  }
40
40
 
41
41
  export interface SemanticSearchChunk {
42
+ fileId: string | null;
43
+ fileName: string | null;
42
44
  id: string;
43
45
  metadata: ChunkMetadata | null;
44
46
  pageNumber?: number | null;