@lobehub/chat 1.26.10 → 1.26.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,48 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.26.12](https://github.com/lobehub/lobe-chat/compare/v1.26.11...v1.26.12)
6
+
7
+ <sup>Released on **2024-10-30**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix file image prompts in client mode.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix file image prompts in client mode, closes [#4548](https://github.com/lobehub/lobe-chat/issues/4548) ([1b66639](https://github.com/lobehub/lobe-chat/commit/1b66639))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.26.11](https://github.com/lobehub/lobe-chat/compare/v1.26.10...v1.26.11)
31
+
32
+ <sup>Released on **2024-10-29**</sup>
33
+
34
+ <br/>
35
+
36
+ <details>
37
+ <summary><kbd>Improvements and Fixes</kbd></summary>
38
+
39
+ </details>
40
+
41
+ <div align="right">
42
+
43
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
44
+
45
+ </div>
46
+
5
47
  ### [Version 1.26.10](https://github.com/lobehub/lobe-chat/compare/v1.26.9...v1.26.10)
6
48
 
7
49
  <sup>Released on **2024-10-29**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.26.10",
3
+ "version": "1.26.12",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -3,7 +3,7 @@ import { and, desc, isNull } from 'drizzle-orm/expressions';
3
3
  import { chunk } from 'lodash-es';
4
4
 
5
5
  import { serverDB } from '@/database/server';
6
- import { ChunkMetadata, FileChunk, SemanticSearchChunk } from '@/types/chunk';
6
+ import { ChunkMetadata, FileChunk } from '@/types/chunk';
7
7
 
8
8
  import {
9
9
  NewChunkItem,
@@ -148,6 +148,8 @@ export class ChunkModel {
148
148
 
149
149
  const data = await serverDB
150
150
  .select({
151
+ fileId: fileChunks.fileId,
152
+ fileName: files.name,
151
153
  id: chunks.id,
152
154
  index: chunks.index,
153
155
  metadata: chunks.metadata,
@@ -158,16 +160,15 @@ export class ChunkModel {
158
160
  .from(chunks)
159
161
  .leftJoin(embeddings, eq(chunks.id, embeddings.chunkId))
160
162
  .leftJoin(fileChunks, eq(chunks.id, fileChunks.chunkId))
163
+ .leftJoin(files, eq(fileChunks.fileId, files.id))
161
164
  .where(fileIds ? inArray(fileChunks.fileId, fileIds) : undefined)
162
165
  .orderBy((t) => desc(t.similarity))
163
166
  .limit(30);
164
167
 
165
- return data.map(
166
- (item): SemanticSearchChunk => ({
167
- ...item,
168
- metadata: item.metadata as ChunkMetadata,
169
- }),
170
- );
168
+ return data.map((item) => ({
169
+ ...item,
170
+ metadata: item.metadata as ChunkMetadata,
171
+ }));
171
172
  }
172
173
 
173
174
  async semanticSearchForChat({
@@ -187,7 +188,7 @@ export class ChunkModel {
187
188
  const result = await serverDB
188
189
  .select({
189
190
  fileId: files.id,
190
- filename: files.name,
191
+ fileName: files.name,
191
192
  id: chunks.id,
192
193
  index: chunks.index,
193
194
  metadata: chunks.metadata,
@@ -205,6 +206,8 @@ export class ChunkModel {
205
206
 
206
207
  return result.map((item) => {
207
208
  return {
209
+ fileId: item.fileId,
210
+ fileName: item.fileName,
208
211
  id: item.id,
209
212
  index: item.index,
210
213
  similarity: item.similarity,
@@ -0,0 +1,26 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`knowledgeBaseQAPrompts > should generate prompt with all parameters 1`] = `
4
+ "<knowledge_base_qa_info>
5
+ You are also a helpful assistant good answering questions related to Test Knowledge. And you'll be provided with a question and several passages that might be relevant. And currently your task is to provide answer based on the question and passages.
6
+ <knowledge_base_anwser_instruction>
7
+ - Note that passages might not be relevant to the question, please only use the passages that are relevant.
8
+ - if there is no relevant passage, please answer using your knowledge.
9
+ - Answer should use the same original language as the question and follow markdown syntax.
10
+ </knowledge_base_anwser_instruction>
11
+ <knowledge_bases>
12
+ <knowledge_bases_docstring>here are the knowledge base scope we retrieve chunks from:</knowledge_bases_docstring>
13
+ <knowledge id="kb1" name="Test Knowledge" type="file" fileType="txt" >Test description</knowledge>
14
+ </knowledge_bases>
15
+ <retrieved_chunks>
16
+ <retrieved_chunks_docstring>here are retrived chunks you can refer to:</retrieved_chunks_docstring>
17
+ <chunk fileId="file1" fileName="test.txt" similarity="0.8" pageNumber="1" >This is a test chunk</chunk>
18
+ </retrieved_chunks>
19
+ <user_query>
20
+ <user_query_docstring>to make result better, we may rewrite user's question.If there is a rewrite query, it will be wrapper with \`rewrite_query\` tag.</user_query_docstring>
21
+
22
+ <raw_query>What is the test about?</raw_query>
23
+ <rewrite_query>Could you explain the content of the test?</rewrite_query>
24
+ <user_query>
25
+ </knowledge_base_qa_info>"
26
+ `;
@@ -0,0 +1,15 @@
1
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
2
+
3
+ const chunkPrompt = (item: ChatSemanticSearchChunk) =>
4
+ `<chunk fileId="${item.fileId}" fileName="${item.fileName}" similarity="${item.similarity}" ${item.pageNumber ? ` pageNumber="${item.pageNumber}" ` : ''}>${item.text}</chunk>`;
5
+
6
+ export const chunkPrompts = (fileList: ChatSemanticSearchChunk[]) => {
7
+ if (fileList.length === 0) return '';
8
+
9
+ const prompt = `<retrieved_chunks>
10
+ <retrieved_chunks_docstring>here are retrived chunks you can refer to:</retrieved_chunks_docstring>
11
+ ${fileList.map((item) => chunkPrompt(item)).join('\n')}
12
+ </retrieved_chunks>`;
13
+
14
+ return prompt.trim();
15
+ };
@@ -0,0 +1,146 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
4
+ import { KnowledgeItem, KnowledgeType } from '@/types/knowledgeBase';
5
+
6
+ import { knowledgeBaseQAPrompts } from './index';
7
+
8
+ describe('knowledgeBaseQAPrompts', () => {
9
+ // Define test data
10
+ const mockChunks: ChatSemanticSearchChunk[] = [
11
+ {
12
+ id: '1',
13
+ fileId: 'file1',
14
+ fileName: 'test.txt',
15
+ text: 'This is a test chunk',
16
+ similarity: 0.8,
17
+ pageNumber: 1,
18
+ },
19
+ ];
20
+
21
+ const mockKnowledge: KnowledgeItem[] = [
22
+ {
23
+ id: 'kb1',
24
+ name: 'Test Knowledge',
25
+ type: KnowledgeType.File,
26
+ fileType: 'txt',
27
+ description: 'Test description',
28
+ },
29
+ ];
30
+
31
+ const userQuery = 'What is the test about?';
32
+ const rewriteQuery = 'Could you explain the content of the test?';
33
+
34
+ it('should return empty string if chunks is empty', () => {
35
+ const result = knowledgeBaseQAPrompts({
36
+ chunks: [],
37
+ knowledge: mockKnowledge,
38
+ userQuery,
39
+ });
40
+
41
+ expect(result).toBe('');
42
+ });
43
+
44
+ it('should return empty string if chunks is undefined', () => {
45
+ const result = knowledgeBaseQAPrompts({
46
+ knowledge: mockKnowledge,
47
+ userQuery,
48
+ });
49
+
50
+ expect(result).toBe('');
51
+ });
52
+
53
+ it('should generate prompt with all parameters', () => {
54
+ const result = knowledgeBaseQAPrompts({
55
+ chunks: mockChunks,
56
+ knowledge: mockKnowledge,
57
+ userQuery,
58
+ rewriteQuery,
59
+ });
60
+
61
+ // Verify the prompt structure and content
62
+ expect(result).toMatchSnapshot();
63
+ });
64
+
65
+ it('should generate prompt without rewriteQuery', () => {
66
+ const result = knowledgeBaseQAPrompts({
67
+ chunks: mockChunks,
68
+ knowledge: mockKnowledge,
69
+ userQuery,
70
+ });
71
+
72
+ expect(result).toContain('<raw_query>What is the test about?</raw_query>');
73
+ expect(result).not.toContain('<rewrite_query>');
74
+ });
75
+
76
+ it('should generate prompt without knowledge', () => {
77
+ const result = knowledgeBaseQAPrompts({
78
+ chunks: mockChunks,
79
+ userQuery,
80
+ });
81
+
82
+ expect(result).toContain(
83
+ 'You are also a helpful assistant good answering questions related to',
84
+ );
85
+ expect(result).not.toContain('<knowledge_bases>');
86
+ });
87
+
88
+ it('should handle empty knowledge array', () => {
89
+ const result = knowledgeBaseQAPrompts({
90
+ chunks: mockChunks,
91
+ knowledge: [],
92
+ userQuery,
93
+ });
94
+
95
+ expect(result).toContain(
96
+ 'You are also a helpful assistant good answering questions related to',
97
+ );
98
+ expect(result).not.toContain('<knowledge_bases>');
99
+ });
100
+
101
+ it('should properly escape special characters in input', () => {
102
+ const specialChunks: ChatSemanticSearchChunk[] = [
103
+ {
104
+ id: '1',
105
+ fileId: 'file1',
106
+ fileName: 'test&.txt',
107
+ text: 'This is a test with & < > "quotes"',
108
+ similarity: 0.8,
109
+ },
110
+ ];
111
+
112
+ const result = knowledgeBaseQAPrompts({
113
+ chunks: specialChunks,
114
+ userQuery: 'Test with & < > "quotes"',
115
+ });
116
+
117
+ expect(result).toContain('test&.txt');
118
+ expect(result).toContain('This is a test with & < > "quotes"');
119
+ expect(result).toContain('Test with & < > "quotes"');
120
+ });
121
+
122
+ it('should handle multiple knowledge items', () => {
123
+ const multipleKnowledge: KnowledgeItem[] = [
124
+ {
125
+ id: 'kb1',
126
+ name: 'Knowledge 1',
127
+ type: KnowledgeType.File,
128
+ },
129
+ {
130
+ id: 'kb2',
131
+ name: 'Knowledge 2',
132
+ type: KnowledgeType.KnowledgeBase,
133
+ },
134
+ ];
135
+
136
+ const result = knowledgeBaseQAPrompts({
137
+ chunks: mockChunks,
138
+ knowledge: multipleKnowledge,
139
+ userQuery,
140
+ });
141
+
142
+ expect(result).toContain('Knowledge 1/Knowledge 2');
143
+ expect(result).toContain('<knowledge id="kb1"');
144
+ expect(result).toContain('<knowledge id="kb2"');
145
+ });
146
+ });
@@ -0,0 +1,33 @@
1
+ import { chunkPrompts } from '@/prompts/knowledgeBaseQA/chunk';
2
+ import { knowledgePrompts } from '@/prompts/knowledgeBaseQA/knowledge';
3
+ import { userQueryPrompt } from '@/prompts/knowledgeBaseQA/userQuery';
4
+ import { ChatSemanticSearchChunk } from '@/types/chunk';
5
+ import { KnowledgeItem } from '@/types/knowledgeBase';
6
+
7
+ export const knowledgeBaseQAPrompts = ({
8
+ chunks,
9
+ knowledge,
10
+ userQuery,
11
+ rewriteQuery,
12
+ }: {
13
+ chunks?: ChatSemanticSearchChunk[];
14
+ knowledge?: KnowledgeItem[];
15
+ rewriteQuery?: string;
16
+ userQuery: string;
17
+ }) => {
18
+ if ((chunks || [])?.length === 0) return '';
19
+
20
+ const domains = (knowledge || []).map((v) => v.name).join('/');
21
+
22
+ return `<knowledge_base_qa_info>
23
+ You are also a helpful assistant good answering questions related to ${domains}. And you'll be provided with a question and several passages that might be relevant. And currently your task is to provide answer based on the question and passages.
24
+ <knowledge_base_anwser_instruction>
25
+ - Note that passages might not be relevant to the question, please only use the passages that are relevant.
26
+ - if there is no relevant passage, please answer using your knowledge.
27
+ - Answer should use the same original language as the question and follow markdown syntax.
28
+ </knowledge_base_anwser_instruction>
29
+ ${knowledgePrompts(knowledge)}
30
+ ${chunks ? chunkPrompts(chunks) : ''}
31
+ ${userQueryPrompt(userQuery, rewriteQuery)}
32
+ </knowledge_base_qa_info>`;
33
+ };
@@ -0,0 +1,15 @@
1
+ import { KnowledgeItem } from '@/types/knowledgeBase';
2
+
3
+ const knowledgePrompt = (item: KnowledgeItem) =>
4
+ `<knowledge id="${item.id}" name="${item.name}" type="${item.type}"${item.fileType ? ` fileType="${item.fileType}" ` : ''}>${item.description || ''}</knowledge>`;
5
+
6
+ export const knowledgePrompts = (list?: KnowledgeItem[]) => {
7
+ if ((list || []).length === 0) return '';
8
+
9
+ const prompt = `<knowledge_bases>
10
+ <knowledge_bases_docstring>here are the knowledge base scope we retrieve chunks from:</knowledge_bases_docstring>
11
+ ${list?.map((item) => knowledgePrompt(item)).join('\n')}
12
+ </knowledge_bases>`;
13
+
14
+ return prompt.trim();
15
+ };
@@ -0,0 +1,8 @@
1
+ export const userQueryPrompt = (userQuery: string, rewriteQuery?: string) => {
2
+ return `<user_query>
3
+ <user_query_docstring>to make result better, we may rewrite user's question.If there is a rewrite query, it will be wrapper with \`rewrite_query\` tag.</user_query_docstring>
4
+
5
+ <raw_query>${userQuery.trim()}</raw_query>
6
+ ${rewriteQuery ? `<rewrite_query>${rewriteQuery.trim()}</rewrite_query>` : ''}
7
+ <user_query>`;
8
+ };
@@ -2,8 +2,10 @@ import { LobeChatPluginManifest } from '@lobehub/chat-plugin-sdk';
2
2
  import { act } from '@testing-library/react';
3
3
  import { merge } from 'lodash-es';
4
4
  import OpenAI from 'openai';
5
- import { describe, expect, it, vi } from 'vitest';
5
+ import { beforeAll, beforeEach, describe, expect, it, vi } from 'vitest';
6
6
 
7
+ import { getAppConfig } from '@/config/app';
8
+ import { getServerDBConfig } from '@/config/db';
7
9
  import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
8
10
  import {
9
11
  LobeAnthropicAI,
@@ -53,6 +55,15 @@ vi.mock('@/utils/fetch', async (importOriginal) => {
53
55
  return { ...(module as any), getMessageError: vi.fn() };
54
56
  });
55
57
 
58
+ beforeEach(() => {
59
+ // 清除所有模块的缓存
60
+ vi.resetModules();
61
+ // 默认设置 isServerMode 为 false
62
+ vi.mock('@/const/version', () => ({
63
+ isServerMode: false,
64
+ }));
65
+ });
66
+
56
67
  // mock auth
57
68
  vi.mock('../_auth', () => ({
58
69
  createHeaderWithAuth: vi.fn().mockResolvedValue({}),
@@ -126,92 +137,6 @@ describe('ChatService', () => {
126
137
  );
127
138
  });
128
139
 
129
- describe('handle with files content', () => {
130
- it('should includes files', async () => {
131
- const messages = [
132
- {
133
- content: 'Hello',
134
- role: 'user',
135
- imageList: [
136
- {
137
- id: 'imagecx1',
138
- url: 'http://example.com/xxx0asd-dsd.png',
139
- alt: 'ttt.png',
140
- },
141
- ],
142
- fileList: [
143
- {
144
- fileType: 'plain/txt',
145
- size: 100000,
146
- id: 'file1',
147
- url: 'http://abc.com/abc.txt',
148
- name: 'abc.png',
149
- },
150
- {
151
- id: 'file_oKMve9qySLMI',
152
- name: '2402.16667v1.pdf',
153
- type: 'application/pdf',
154
- size: 11256078,
155
- url: 'https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf',
156
- },
157
- ],
158
- }, // Message with files
159
- { content: 'Hi', role: 'tool', plugin: { identifier: 'plugin1', apiName: 'api1' } }, // Message with tool role
160
- { content: 'Hey', role: 'assistant' }, // Regular user message
161
- ] as ChatMessage[];
162
-
163
- const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
164
- await chatService.createAssistantMessage({
165
- messages,
166
- plugins: [],
167
- model: 'gpt-4o',
168
- });
169
-
170
- expect(getChatCompletionSpy).toHaveBeenCalledWith(
171
- {
172
- messages: [
173
- {
174
- content: [
175
- {
176
- text: `Hello
177
-
178
- <files_info>
179
- <images>
180
- <images_docstring>here are user upload images you can refer to</images_docstring>
181
- <image name="ttt.png" url="http://example.com/xxx0asd-dsd.png"></image>
182
- </images>
183
- <files>
184
- <files_docstring>here are user upload files you can refer to</files_docstring>
185
- <file id="file1" name="abc.png" type="plain/txt" size="100000" url="http://abc.com/abc.txt"></file>
186
- <file id="file_oKMve9qySLMI" name="2402.16667v1.pdf" type="undefined" size="11256078" url="https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf"></file>
187
- </files>
188
- </files_info>`,
189
- type: 'text',
190
- },
191
- {
192
- image_url: { detail: 'auto', url: 'http://example.com/xxx0asd-dsd.png' },
193
- type: 'image_url',
194
- },
195
- ],
196
- role: 'user',
197
- },
198
- {
199
- content: 'Hi',
200
- name: 'plugin1____api1',
201
- role: 'tool',
202
- },
203
- {
204
- content: 'Hey',
205
- role: 'assistant',
206
- },
207
- ],
208
- model: 'gpt-4o',
209
- },
210
- undefined,
211
- );
212
- });
213
- });
214
-
215
140
  describe('should handle content correctly for vision models', () => {
216
141
  it('should include image content when with vision model', async () => {
217
142
  const messages = [
@@ -243,15 +168,7 @@ describe('ChatService', () => {
243
168
  {
244
169
  content: [
245
170
  {
246
- text: `Hello
247
-
248
- <files_info>
249
- <images>
250
- <images_docstring>here are user upload images you can refer to</images_docstring>
251
- <image name="abc.png" url="http://example.com/image.jpg"></image>
252
- </images>
253
-
254
- </files_info>`,
171
+ text: 'Hello',
255
172
  type: 'text',
256
173
  },
257
174
  {
@@ -829,6 +746,163 @@ describe('ChatService', () => {
829
746
  },
830
747
  ]);
831
748
  });
749
+
750
+ describe('handle with files content in server mode', () => {
751
+ it('should includes files', async () => {
752
+ // 重新模拟模块,设置 isServerMode 为 true
753
+ vi.doMock('@/const/version', () => ({
754
+ isServerMode: true,
755
+ }));
756
+
757
+ // 需要在修改模拟后重新导入相关模块
758
+ const { chatService } = await import('../chat');
759
+
760
+ const messages = [
761
+ {
762
+ content: 'Hello',
763
+ role: 'user',
764
+ imageList: [
765
+ {
766
+ id: 'imagecx1',
767
+ url: 'http://example.com/xxx0asd-dsd.png',
768
+ alt: 'ttt.png',
769
+ },
770
+ ],
771
+ fileList: [
772
+ {
773
+ fileType: 'plain/txt',
774
+ size: 100000,
775
+ id: 'file1',
776
+ url: 'http://abc.com/abc.txt',
777
+ name: 'abc.png',
778
+ },
779
+ {
780
+ id: 'file_oKMve9qySLMI',
781
+ name: '2402.16667v1.pdf',
782
+ type: 'application/pdf',
783
+ size: 11256078,
784
+ url: 'https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf',
785
+ },
786
+ ],
787
+ }, // Message with files
788
+ { content: 'Hi', role: 'tool', plugin: { identifier: 'plugin1', apiName: 'api1' } }, // Message with tool role
789
+ { content: 'Hey', role: 'assistant' }, // Regular user message
790
+ ] as ChatMessage[];
791
+
792
+ const output = chatService['processMessages']({
793
+ messages,
794
+ model: 'gpt-4o',
795
+ });
796
+
797
+ expect(output).toEqual([
798
+ {
799
+ content: [
800
+ {
801
+ text: `Hello
802
+
803
+ <files_info>
804
+ <images>
805
+ <images_docstring>here are user upload images you can refer to</images_docstring>
806
+ <image name="ttt.png" url="http://example.com/xxx0asd-dsd.png"></image>
807
+ </images>
808
+ <files>
809
+ <files_docstring>here are user upload files you can refer to</files_docstring>
810
+ <file id="file1" name="abc.png" type="plain/txt" size="100000" url="http://abc.com/abc.txt"></file>
811
+ <file id="file_oKMve9qySLMI" name="2402.16667v1.pdf" type="undefined" size="11256078" url="https://xxx.com/ppp/480497/5826c2b8-fde0-4de1-a54b-a224d5e3d898.pdf"></file>
812
+ </files>
813
+ </files_info>`,
814
+ type: 'text',
815
+ },
816
+ {
817
+ image_url: { detail: 'auto', url: 'http://example.com/xxx0asd-dsd.png' },
818
+ type: 'image_url',
819
+ },
820
+ ],
821
+ role: 'user',
822
+ },
823
+ {
824
+ content: 'Hi',
825
+ name: 'plugin1____api1',
826
+ role: 'tool',
827
+ },
828
+ {
829
+ content: 'Hey',
830
+ role: 'assistant',
831
+ },
832
+ ]);
833
+ });
834
+ });
835
+
836
+ it('should include image files in server mode', async () => {
837
+ // 重新模拟模块,设置 isServerMode 为 true
838
+ vi.doMock('@/const/version', () => ({
839
+ isServerMode: true,
840
+ }));
841
+
842
+ // 需要在修改模拟后重新导入相关模块
843
+ const { chatService } = await import('../chat');
844
+ const messages = [
845
+ {
846
+ content: 'Hello',
847
+ role: 'user',
848
+ imageList: [
849
+ {
850
+ id: 'file1',
851
+ url: 'http://example.com/image.jpg',
852
+ alt: 'abc.png',
853
+ },
854
+ ],
855
+ }, // Message with files
856
+ { content: 'Hi', role: 'tool', plugin: { identifier: 'plugin1', apiName: 'api1' } }, // Message with tool role
857
+ { content: 'Hey', role: 'assistant' }, // Regular user message
858
+ ] as ChatMessage[];
859
+
860
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
861
+ await chatService.createAssistantMessage({
862
+ messages,
863
+ plugins: [],
864
+ model: 'gpt-4-vision-preview',
865
+ });
866
+
867
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
868
+ {
869
+ messages: [
870
+ {
871
+ content: [
872
+ {
873
+ text: `Hello
874
+
875
+ <files_info>
876
+ <images>
877
+ <images_docstring>here are user upload images you can refer to</images_docstring>
878
+ <image name="abc.png" url="http://example.com/image.jpg"></image>
879
+ </images>
880
+
881
+ </files_info>`,
882
+ type: 'text',
883
+ },
884
+ {
885
+ image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
886
+ type: 'image_url',
887
+ },
888
+ ],
889
+ role: 'user',
890
+ },
891
+ {
892
+ content: 'Hi',
893
+ name: 'plugin1____api1',
894
+ role: 'tool',
895
+ },
896
+ {
897
+ content: 'Hey',
898
+ role: 'assistant',
899
+ },
900
+ ],
901
+ model: 'gpt-4-vision-preview',
902
+ },
903
+ undefined,
904
+ );
905
+ });
832
906
  });
833
907
  });
834
908
 
@@ -7,6 +7,7 @@ import { INBOX_GUIDE_SYSTEMROLE } from '@/const/guide';
7
7
  import { INBOX_SESSION_ID } from '@/const/session';
8
8
  import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
9
9
  import { TracePayload, TraceTagMap } from '@/const/trace';
10
+ import { isServerMode } from '@/const/version';
10
11
  import { AgentRuntime, ChatCompletionErrorPayload, ModelProvider } from '@/libs/agent-runtime';
11
12
  import { filesPrompts } from '@/prompts/files';
12
13
  import { useSessionStore } from '@/store/session';
@@ -420,7 +421,7 @@ class ChatService {
420
421
 
421
422
  const imageList = m.imageList || [];
422
423
 
423
- const filesContext = filesPrompts({ fileList: m.fileList, imageList });
424
+ const filesContext = isServerMode ? filesPrompts({ fileList: m.fileList, imageList }) : '';
424
425
  return [
425
426
  { text: (m.content + '\n\n' + filesContext).trim(), type: 'text' },
426
427
  ...imageList.map(
@@ -0,0 +1,261 @@
1
+ import { act, renderHook } from '@testing-library/react';
2
+ import { Mock, beforeEach, describe, expect, it, vi } from 'vitest';
3
+
4
+ import { chatService } from '@/services/chat';
5
+ import { ragService } from '@/services/rag';
6
+ import { useAgentStore } from '@/store/agent';
7
+ import { agentSelectors } from '@/store/agent/selectors';
8
+ import { chatSelectors } from '@/store/chat/selectors';
9
+ import { systemAgentSelectors } from '@/store/user/selectors';
10
+ import { ChatMessage } from '@/types/message';
11
+ import { QueryRewriteSystemAgent } from '@/types/user/settings';
12
+
13
+ import { useChatStore } from '../../../../store';
14
+
15
+ // Mock services
16
+ vi.mock('@/services/chat', () => ({
17
+ chatService: {
18
+ fetchPresetTaskResult: vi.fn(),
19
+ },
20
+ }));
21
+
22
+ vi.mock('@/services/rag', () => ({
23
+ ragService: {
24
+ deleteMessageRagQuery: vi.fn(),
25
+ semanticSearchForChat: vi.fn(),
26
+ },
27
+ }));
28
+
29
+ beforeEach(() => {
30
+ vi.clearAllMocks();
31
+ });
32
+
33
+ describe('chatRAG actions', () => {
34
+ describe('deleteUserMessageRagQuery', () => {
35
+ it('should not delete if message not found', async () => {
36
+ const { result } = renderHook(() => useChatStore());
37
+
38
+ await act(async () => {
39
+ await result.current.deleteUserMessageRagQuery('non-existent-id');
40
+ });
41
+
42
+ expect(ragService.deleteMessageRagQuery).not.toHaveBeenCalled();
43
+ });
44
+
45
+ it('should not delete if message has no ragQueryId', async () => {
46
+ const { result } = renderHook(() => useChatStore());
47
+ const messageId = 'message-id';
48
+
49
+ act(() => {
50
+ useChatStore.setState({
51
+ messagesMap: {
52
+ default: [{ id: messageId }] as ChatMessage[],
53
+ },
54
+ });
55
+ });
56
+
57
+ await act(async () => {
58
+ await result.current.deleteUserMessageRagQuery(messageId);
59
+ });
60
+
61
+ expect(ragService.deleteMessageRagQuery).not.toHaveBeenCalled();
62
+ });
63
+ });
64
+
65
+ describe('internal_retrieveChunks', () => {
66
+ it('should retrieve chunks with existing ragQuery', async () => {
67
+ const { result } = renderHook(() => useChatStore());
68
+ const messageId = 'message-id';
69
+ const existingRagQuery = 'existing-query';
70
+ const userQuery = 'user-query';
71
+
72
+ // Mock the message with existing ragQuery
73
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
74
+ () =>
75
+ ({
76
+ id: messageId,
77
+ ragQuery: existingRagQuery,
78
+ }) as ChatMessage,
79
+ );
80
+
81
+ // Mock the semantic search response
82
+ (ragService.semanticSearchForChat as Mock).mockResolvedValue({
83
+ chunks: [{ id: 'chunk-1' }],
84
+ queryId: 'query-id',
85
+ });
86
+
87
+ vi.spyOn(agentSelectors, 'currentKnowledgeIds').mockReturnValue({
88
+ fileIds: [],
89
+ knowledgeBaseIds: [],
90
+ });
91
+
92
+ const result1 = await act(async () => {
93
+ return await result.current.internal_retrieveChunks(messageId, userQuery, []);
94
+ });
95
+
96
+ expect(result1).toEqual({
97
+ chunks: [{ id: 'chunk-1' }],
98
+ queryId: 'query-id',
99
+ rewriteQuery: existingRagQuery,
100
+ });
101
+ expect(ragService.semanticSearchForChat).toHaveBeenCalledWith(
102
+ expect.objectContaining({
103
+ rewriteQuery: existingRagQuery,
104
+ userQuery,
105
+ }),
106
+ );
107
+ });
108
+
109
+ it('should rewrite query if no existing ragQuery', async () => {
110
+ const { result } = renderHook(() => useChatStore());
111
+ const messageId = 'message-id';
112
+ const userQuery = 'user-query';
113
+ const rewrittenQuery = 'rewritten-query';
114
+
115
+ // Mock the message without ragQuery
116
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
117
+ () =>
118
+ ({
119
+ id: messageId,
120
+ }) as ChatMessage,
121
+ );
122
+
123
+ // Mock the rewrite query function
124
+ vi.spyOn(result.current, 'internal_rewriteQuery').mockResolvedValueOnce(rewrittenQuery);
125
+
126
+ // Mock the semantic search response
127
+ (ragService.semanticSearchForChat as Mock).mockResolvedValue({
128
+ chunks: [{ id: 'chunk-1' }],
129
+ queryId: 'query-id',
130
+ });
131
+
132
+ vi.spyOn(agentSelectors, 'currentKnowledgeIds').mockReturnValue({
133
+ fileIds: [],
134
+ knowledgeBaseIds: [],
135
+ });
136
+
137
+ const result2 = await act(async () => {
138
+ return await result.current.internal_retrieveChunks(messageId, userQuery, ['message']);
139
+ });
140
+
141
+ expect(result2).toEqual({
142
+ chunks: [{ id: 'chunk-1' }],
143
+ queryId: 'query-id',
144
+ rewriteQuery: rewrittenQuery,
145
+ });
146
+ expect(result.current.internal_rewriteQuery).toHaveBeenCalledWith(messageId, userQuery, [
147
+ 'message',
148
+ ]);
149
+ });
150
+ });
151
+
152
+ describe('internal_rewriteQuery', () => {
153
+ it('should return original content if query rewrite is disabled', async () => {
154
+ const { result } = renderHook(() => useChatStore());
155
+ const content = 'original content';
156
+
157
+ vi.spyOn(systemAgentSelectors, 'queryRewrite').mockReturnValueOnce({
158
+ enabled: false,
159
+ } as QueryRewriteSystemAgent);
160
+
161
+ const rewrittenQuery = await result.current.internal_rewriteQuery('id', content, []);
162
+
163
+ expect(rewrittenQuery).toBe(content);
164
+ expect(chatService.fetchPresetTaskResult).not.toHaveBeenCalled();
165
+ });
166
+
167
+ it('should rewrite query if enabled', async () => {
168
+ const { result } = renderHook(() => useChatStore());
169
+ const messageId = 'message-id';
170
+ const content = 'original content';
171
+ const rewrittenContent = 'rewritten content';
172
+
173
+ vi.spyOn(systemAgentSelectors, 'queryRewrite').mockReturnValueOnce({
174
+ enabled: true,
175
+ model: 'gpt-3.5',
176
+ provider: 'openai',
177
+ });
178
+
179
+ (chatService.fetchPresetTaskResult as Mock).mockImplementation(({ onFinish }) => {
180
+ onFinish(rewrittenContent);
181
+ });
182
+
183
+ const rewrittenQuery = await result.current.internal_rewriteQuery(messageId, content, []);
184
+
185
+ expect(rewrittenQuery).toBe(rewrittenContent);
186
+ expect(chatService.fetchPresetTaskResult).toHaveBeenCalled();
187
+ });
188
+ });
189
+
190
+ describe('internal_shouldUseRAG', () => {
191
+ it('should return true if has enabled knowledge', () => {
192
+ const { result } = renderHook(() => useChatStore());
193
+
194
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(true);
195
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([]);
196
+
197
+ expect(result.current.internal_shouldUseRAG()).toBe(true);
198
+ });
199
+
200
+ it('should return true if has user files', () => {
201
+ const { result } = renderHook(() => useChatStore());
202
+
203
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(false);
204
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([{ id: 'file-1' }] as any);
205
+
206
+ expect(result.current.internal_shouldUseRAG()).toBe(true);
207
+ });
208
+
209
+ it('should return false if no knowledge or files', () => {
210
+ const { result } = renderHook(() => useChatStore());
211
+
212
+ vi.spyOn(agentSelectors, 'hasEnabledKnowledge').mockReturnValue(false);
213
+ vi.spyOn(chatSelectors, 'currentUserFiles').mockReturnValue([]);
214
+
215
+ expect(result.current.internal_shouldUseRAG()).toBe(false);
216
+ });
217
+ });
218
+
219
+ describe('rewriteQuery', () => {
220
+ it('should not rewrite if message not found', async () => {
221
+ const { result } = renderHook(() => useChatStore());
222
+
223
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(() => undefined);
224
+ const rewriteSpy = vi.spyOn(result.current, 'internal_rewriteQuery');
225
+
226
+ await act(async () => {
227
+ await result.current.rewriteQuery('non-existent-id');
228
+ });
229
+
230
+ expect(rewriteSpy).not.toHaveBeenCalled();
231
+ });
232
+
233
+ it('should rewrite query for existing message', async () => {
234
+ const { result } = renderHook(() => useChatStore());
235
+ const messageId = 'message-id';
236
+ const content = 'message content';
237
+
238
+ vi.spyOn(chatSelectors, 'getMessageById').mockReturnValue(
239
+ () =>
240
+ ({
241
+ id: messageId,
242
+ content,
243
+ }) as ChatMessage,
244
+ );
245
+
246
+ vi.spyOn(chatSelectors, 'currentChatsWithHistoryConfig').mockReturnValue([
247
+ { content: 'history' },
248
+ ] as ChatMessage[]);
249
+
250
+ const rewriteSpy = vi.spyOn(result.current, 'internal_rewriteQuery');
251
+ const deleteSpy = vi.spyOn(result.current, 'deleteUserMessageRagQuery');
252
+
253
+ await act(async () => {
254
+ await result.current.rewriteQuery(messageId);
255
+ });
256
+
257
+ expect(deleteSpy).toHaveBeenCalledWith(messageId);
258
+ expect(rewriteSpy).toHaveBeenCalledWith(messageId, content, ['history']);
259
+ });
260
+ });
261
+ });
@@ -4,10 +4,10 @@ import { produce } from 'immer';
4
4
  import { template } from 'lodash-es';
5
5
  import { StateCreator } from 'zustand/vanilla';
6
6
 
7
- import { chainAnswerWithContext } from '@/chains/answerWithContext';
8
7
  import { LOADING_FLAT, MESSAGE_CANCEL_FLAT } from '@/const/message';
9
8
  import { TraceEventType, TraceNameMap } from '@/const/trace';
10
9
  import { isServerMode } from '@/const/version';
10
+ import { knowledgeBaseQAPrompts } from '@/prompts/knowledgeBaseQA';
11
11
  import { chatService } from '@/services/chat';
12
12
  import { messageService } from '@/services/message';
13
13
  import { useAgentStore } from '@/store/agent';
@@ -269,10 +269,11 @@ export const generateAIChat: StateCreator<
269
269
 
270
270
  let fileChunks: MessageSemanticSearchChunk[] | undefined;
271
271
  let ragQueryId;
272
+
272
273
  // go into RAG flow if there is ragQuery flag
273
274
  if (params?.ragQuery) {
274
275
  // 1. get the relative chunks from semantic search
275
- const { chunks, queryId } = await get().internal_retrieveChunks(
276
+ const { chunks, queryId, rewriteQuery } = await get().internal_retrieveChunks(
276
277
  userMessageId,
277
278
  params?.ragQuery,
278
279
  // should skip the last content
@@ -281,19 +282,21 @@ export const generateAIChat: StateCreator<
281
282
 
282
283
  ragQueryId = queryId;
283
284
 
285
+ const lastMsg = messages.pop() as ChatMessage;
286
+
284
287
  // 2. build the retrieve context messages
285
- const retrieveContext = chainAnswerWithContext({
286
- context: chunks.map((c) => c.text as string),
287
- question: params?.ragQuery,
288
- knowledge: getAgentKnowledge().map((knowledge) => knowledge.name),
288
+ const knowledgeBaseQAContext = knowledgeBaseQAPrompts({
289
+ chunks,
290
+ userQuery: lastMsg.content,
291
+ rewriteQuery,
292
+ knowledge: getAgentKnowledge(),
289
293
  });
290
294
 
291
295
  // 3. add the retrieve context messages to the messages history
292
- if (retrieveContext.messages && retrieveContext.messages?.length > 0) {
293
- // remove the last message due to the query is in the retrieveContext
294
- messages.pop();
295
- retrieveContext.messages?.forEach((m) => messages.push(m as ChatMessage));
296
- }
296
+ messages.push({
297
+ ...lastMsg,
298
+ content: (lastMsg.content + '\n\n' + knowledgeBaseQAContext).trim(),
299
+ });
297
300
 
298
301
  fileChunks = chunks.map((c) => ({ id: c.id, similarity: c.similarity }));
299
302
  }
@@ -499,7 +502,7 @@ export const generateAIChat: StateCreator<
499
502
 
500
503
  await internal_coreProcessMessage(contextMessages, latestMsg.id, {
501
504
  traceId,
502
- ragQuery: get().internal_shouldUseRAG() ? currentMessage.content : undefined,
505
+ ragQuery: get().internal_shouldUseRAG() ? latestMsg.content : undefined,
503
506
  });
504
507
  },
505
508
 
@@ -21,7 +21,7 @@ export interface ChatRAGAction {
21
21
  id: string,
22
22
  userQuery: string,
23
23
  messages: string[],
24
- ) => Promise<{ chunks: ChatSemanticSearchChunk[]; queryId: string }>;
24
+ ) => Promise<{ chunks: ChatSemanticSearchChunk[]; queryId: string; rewriteQuery?: string }>;
25
25
  /**
26
26
  * Rewrite user content to better RAG query
27
27
  */
@@ -64,12 +64,11 @@ export const chatRag: StateCreator<ChatStore, [['zustand/devtools', never]], [],
64
64
  const message = chatSelectors.getMessageById(id)(get());
65
65
 
66
66
  // 1. get the rewrite query
67
- let rewriteQuery = message?.ragQuery || userQuery;
67
+ let rewriteQuery = message?.ragQuery as string | undefined;
68
68
 
69
- // only rewrite query length is less than 15 characters, refs: https://github.com/lobehub/lobe-chat/pull/4288
70
69
  // if there is no ragQuery and there is a chat history
71
70
  // we need to rewrite the user message to get better results
72
- if (rewriteQuery.length < 15 && !message?.ragQuery && messages.length > 0) {
71
+ if (!message?.ragQuery && messages.length > 0) {
73
72
  rewriteQuery = await get().internal_rewriteQuery(id, userQuery, messages);
74
73
  }
75
74
 
@@ -79,13 +78,13 @@ export const chatRag: StateCreator<ChatStore, [['zustand/devtools', never]], [],
79
78
  fileIds: knowledgeIds().fileIds.concat(files),
80
79
  knowledgeIds: knowledgeIds().knowledgeBaseIds,
81
80
  messageId: id,
82
- rewriteQuery,
81
+ rewriteQuery: rewriteQuery || userQuery,
83
82
  userQuery,
84
83
  });
85
84
 
86
85
  get().internal_toggleMessageRAGLoading(false, id);
87
86
 
88
- return { chunks, queryId };
87
+ return { chunks, queryId, rewriteQuery };
89
88
  },
90
89
  internal_rewriteQuery: async (id, content, messages) => {
91
90
  let rewriteQuery = content;
@@ -39,6 +39,8 @@ export interface FileChunk {
39
39
  }
40
40
 
41
41
  export interface SemanticSearchChunk {
42
+ fileId: string | null;
43
+ fileName: string | null;
42
44
  id: string;
43
45
  metadata: ChunkMetadata | null;
44
46
  pageNumber?: number | null;