@lobehub/chat 1.121.1 → 1.122.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/next.config.ts +1 -0
  4. package/package.json +1 -1
  5. package/packages/const/src/index.ts +1 -0
  6. package/packages/const/src/session.ts +4 -11
  7. package/packages/database/src/models/__tests__/message.test.ts +41 -0
  8. package/packages/database/src/models/message.ts +21 -13
  9. package/packages/database/src/models/topic.ts +4 -9
  10. package/packages/types/src/aiChat.ts +55 -0
  11. package/packages/types/src/index.ts +4 -0
  12. package/packages/types/src/message/base.ts +17 -4
  13. package/packages/types/src/message/chat.ts +1 -15
  14. package/packages/types/src/message/index.ts +1 -0
  15. package/packages/types/src/message/rag.ts +21 -0
  16. package/packages/utils/src/index.ts +1 -0
  17. package/packages/utils/src/object.test.ts +11 -0
  18. package/src/server/routers/lambda/__tests__/message.test.ts +30 -0
  19. package/src/server/routers/lambda/aiChat.test.ts +107 -0
  20. package/src/server/routers/lambda/aiChat.ts +80 -0
  21. package/src/server/routers/lambda/index.ts +2 -0
  22. package/src/server/routers/lambda/message.ts +7 -0
  23. package/src/server/services/aiChat/index.test.ts +57 -0
  24. package/src/server/services/aiChat/index.ts +36 -0
  25. package/src/services/aiChat.ts +12 -0
  26. package/src/services/message/_deprecated.ts +4 -0
  27. package/src/services/message/client.ts +5 -0
  28. package/src/services/message/server.ts +4 -0
  29. package/src/services/message/type.ts +2 -0
  30. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +11 -1
  31. package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +410 -0
  32. package/src/store/chat/slices/aiChat/actions/index.ts +7 -1
  33. package/src/store/chat/slices/message/action.ts +38 -1
  34. package/src/store/chat/slices/message/reducer.ts +11 -0
  35. package/src/store/chat/slices/topic/reducer.ts +14 -1
@@ -7,6 +7,7 @@ import { authedProcedure, publicProcedure, router } from '@/libs/trpc/lambda';
7
7
  import { serverDatabase } from '@/libs/trpc/lambda/middleware';
8
8
  import { FileService } from '@/server/services/file';
9
9
  import { ChatMessage } from '@/types/message';
10
+ import { UpdateMessageRAGParamsSchema } from '@/types/message/rag';
10
11
  import { BatchTaskResult } from '@/types/service';
11
12
 
12
13
  type ChatMessageList = ChatMessage[];
@@ -174,6 +175,12 @@ export const messageRouter = router({
174
175
  return ctx.messageModel.updateMessagePlugin(input.id, input.value);
175
176
  }),
176
177
 
178
+ updateMessageRAG: messageProcedure
179
+ .input(UpdateMessageRAGParamsSchema)
180
+ .mutation(async ({ input, ctx }) => {
181
+ await ctx.messageModel.updateMessageRAG(input.id, input.value);
182
+ }),
183
+
177
184
  updatePluginError: messageProcedure
178
185
  .input(
179
186
  z.object({
@@ -0,0 +1,57 @@
1
+ import { describe, expect, it, vi } from 'vitest';
2
+
3
+ import { MessageModel } from '@/database/models/message';
4
+ import { TopicModel } from '@/database/models/topic';
5
+ import { LobeChatDatabase } from '@/database/type';
6
+ import { FileService } from '@/server/services/file';
7
+
8
+ import { AiChatService } from '.';
9
+
10
+ vi.mock('@/database/models/message');
11
+ vi.mock('@/database/models/topic');
12
+ vi.mock('@/server/services/file');
13
+
14
+ describe('AiChatService', () => {
15
+ it('getMessagesAndTopics should fetch messages and topics concurrently', async () => {
16
+ const serverDB = {} as unknown as LobeChatDatabase;
17
+
18
+ const mockQueryMessages = vi.fn().mockResolvedValue([{ id: 'm1' }]);
19
+ const mockQueryTopics = vi.fn().mockResolvedValue([{ id: 't1' }]);
20
+
21
+ vi.mocked(MessageModel).mockImplementation(() => ({ query: mockQueryMessages }) as any);
22
+ vi.mocked(TopicModel).mockImplementation(() => ({ query: mockQueryTopics }) as any);
23
+ vi.mocked(FileService).mockImplementation(
24
+ () => ({ getFullFileUrl: vi.fn().mockResolvedValue('url') }) as any,
25
+ );
26
+
27
+ const service = new AiChatService(serverDB, 'u1');
28
+
29
+ const res = await service.getMessagesAndTopics({ includeTopic: true, sessionId: 's1' });
30
+
31
+ expect(mockQueryMessages).toHaveBeenCalledWith(
32
+ { includeTopic: true, sessionId: 's1' },
33
+ expect.objectContaining({ postProcessUrl: expect.any(Function) }),
34
+ );
35
+ expect(mockQueryTopics).toHaveBeenCalledWith({ sessionId: 's1' });
36
+ expect(res.messages).toEqual([{ id: 'm1' }]);
37
+ expect(res.topics).toEqual([{ id: 't1' }]);
38
+ });
39
+
40
+ it('getMessagesAndTopics should not query topics when includeTopic is false', async () => {
41
+ const serverDB = {} as unknown as LobeChatDatabase;
42
+
43
+ const mockQueryMessages = vi.fn().mockResolvedValue([]);
44
+ vi.mocked(MessageModel).mockImplementation(() => ({ query: mockQueryMessages }) as any);
45
+ vi.mocked(TopicModel).mockImplementation(() => ({ query: vi.fn() }) as any);
46
+ vi.mocked(FileService).mockImplementation(
47
+ () => ({ getFullFileUrl: vi.fn().mockResolvedValue('url') }) as any,
48
+ );
49
+
50
+ const service = new AiChatService(serverDB, 'u1');
51
+
52
+ const res = await service.getMessagesAndTopics({ includeTopic: false, topicId: 't1' });
53
+
54
+ expect(mockQueryMessages).toHaveBeenCalled();
55
+ expect(res.topics).toBeUndefined();
56
+ });
57
+ });
@@ -0,0 +1,36 @@
1
+ import { MessageModel } from '@/database/models/message';
2
+ import { TopicModel } from '@/database/models/topic';
3
+ import { LobeChatDatabase } from '@/database/type';
4
+ import { FileService } from '@/server/services/file';
5
+
6
+ export class AiChatService {
7
+ private userId: string;
8
+ private messageModel: MessageModel;
9
+ private fileService: FileService;
10
+ private topicModel: TopicModel;
11
+
12
+ constructor(serverDB: LobeChatDatabase, userId: string) {
13
+ this.userId = userId;
14
+
15
+ this.messageModel = new MessageModel(serverDB, userId);
16
+ this.topicModel = new TopicModel(serverDB, userId);
17
+ this.fileService = new FileService(serverDB, userId);
18
+ }
19
+
20
+ async getMessagesAndTopics(params: {
21
+ current?: number;
22
+ includeTopic?: boolean;
23
+ pageSize?: number;
24
+ sessionId?: string;
25
+ topicId?: string;
26
+ }) {
27
+ const [messages, topics] = await Promise.all([
28
+ this.messageModel.query(params, {
29
+ postProcessUrl: (path) => this.fileService.getFullFileUrl(path),
30
+ }),
31
+ params.includeTopic ? this.topicModel.query({ sessionId: params.sessionId }) : undefined,
32
+ ]);
33
+
34
+ return { messages, topics };
35
+ }
36
+ }
@@ -0,0 +1,12 @@
1
+ import { SendMessageServerParams } from '@lobechat/types';
2
+ import { cleanObject } from '@lobechat/utils';
3
+
4
+ import { lambdaClient } from '@/libs/trpc/client';
5
+
6
+ class AiChatService {
7
+ sendMessageInServer = async (params: SendMessageServerParams) => {
8
+ return lambdaClient.aiChat.sendMessageInServer.mutate(cleanObject(params));
9
+ };
10
+ }
11
+
12
+ export const aiChatService = new AiChatService();
@@ -143,4 +143,8 @@ export class ClientService implements IMessageService {
143
143
  async updateMessagePluginError() {
144
144
  throw new Error('Method not implemented.');
145
145
  }
146
+
147
+ async updateMessageRAG(): Promise<void> {
148
+ throw new Error('Method not implemented.');
149
+ }
146
150
  }
@@ -96,6 +96,11 @@ export class ClientService extends BaseClientService implements IMessageService
96
96
  return this.messageModel.updateMessagePlugin(id, { error: value });
97
97
  };
98
98
 
99
+ updateMessageRAG: IMessageService['updateMessageRAG'] = async (id, value) => {
100
+ console.log(id, value);
101
+ throw new Error('not implemented');
102
+ };
103
+
99
104
  updateMessagePluginArguments: IMessageService['updateMessagePluginArguments'] = async (
100
105
  id,
101
106
  value,
@@ -84,6 +84,10 @@ export class ServerService implements IMessageService {
84
84
  return lambdaClient.message.updatePluginError.mutate({ id, value: error as any });
85
85
  };
86
86
 
87
+ updateMessageRAG: IMessageService['updateMessageRAG'] = async (id, data) => {
88
+ return lambdaClient.message.updateMessageRAG.mutate({ id, value: data });
89
+ };
90
+
87
91
  removeMessage: IMessageService['removeMessage'] = async (id) => {
88
92
  return lambdaClient.message.removeMessage.mutate({ id });
89
93
  };
@@ -11,6 +11,7 @@ import {
11
11
  ModelRankItem,
12
12
  UpdateMessageParams,
13
13
  } from '@/types/message';
14
+ import { UpdateMessageRAGParams } from '@/types/message/rag';
14
15
 
15
16
  /* eslint-disable typescript-sort-keys/interface */
16
17
 
@@ -39,6 +40,7 @@ export interface IMessageService {
39
40
  updateMessageTranslate(id: string, translate: Partial<ChatTranslate> | false): Promise<any>;
40
41
  updateMessagePluginState(id: string, value: Record<string, any>): Promise<any>;
41
42
  updateMessagePluginError(id: string, value: ChatMessagePluginError | null): Promise<any>;
43
+ updateMessageRAG(id: string, value: UpdateMessageRAGParams): Promise<void>;
42
44
  updateMessagePluginArguments(id: string, value: string | Record<string, any>): Promise<any>;
43
45
  removeMessage(id: string): Promise<any>;
44
46
  removeMessages(ids: string[]): Promise<any>;
@@ -152,7 +152,13 @@ export const generateAIChat: StateCreator<
152
152
  },
153
153
 
154
154
  sendMessage: async ({ message, files, onlyAddUserMessage, isWelcomeQuestion }) => {
155
- const { internal_coreProcessMessage, activeTopicId, activeId, activeThreadId } = get();
155
+ const {
156
+ internal_coreProcessMessage,
157
+ activeTopicId,
158
+ activeId,
159
+ activeThreadId,
160
+ sendMessageInServer,
161
+ } = get();
156
162
  if (!activeId) return;
157
163
 
158
164
  const fileIdList = files?.map((f) => f.id);
@@ -162,6 +168,10 @@ export const generateAIChat: StateCreator<
162
168
  // if message is empty or no files, then stop
163
169
  if (!message && !hasFile) return;
164
170
 
171
+ // router to server mode send message
172
+ if (isServerMode)
173
+ return sendMessageInServer({ message, files, onlyAddUserMessage, isWelcomeQuestion });
174
+
165
175
  set({ isCreatingMessage: true }, false, n('creatingMessage/start'));
166
176
 
167
177
  const newMessage: CreateMessageParams = {
@@ -0,0 +1,410 @@
1
+ /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
+ // Disable the auto sort key eslint rule to make the code more logic and readable
3
+ import { INBOX_SESSION_ID, isDesktop } from '@lobechat/const';
4
+ import { knowledgeBaseQAPrompts } from '@lobechat/prompts';
5
+ import {
6
+ ChatMessage,
7
+ ChatTopic,
8
+ MessageSemanticSearchChunk,
9
+ SendMessageParams,
10
+ TraceNameMap,
11
+ } from '@lobechat/types';
12
+ import { t } from 'i18next';
13
+ import { StateCreator } from 'zustand/vanilla';
14
+
15
+ import { aiChatService } from '@/services/aiChat';
16
+ import { chatService } from '@/services/chat';
17
+ import { messageService } from '@/services/message';
18
+ import { getAgentStoreState } from '@/store/agent';
19
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/slices/chat';
20
+ import { aiModelSelectors, aiProviderSelectors, getAiInfraStoreState } from '@/store/aiInfra';
21
+ import type { ChatStore } from '@/store/chat/store';
22
+ import { getSessionStoreState } from '@/store/session';
23
+ import { WebBrowsingManifest } from '@/tools/web-browsing';
24
+ import { setNamespace } from '@/utils/storeDebug';
25
+
26
+ import { chatSelectors, topicSelectors } from '../../../selectors';
27
+ import { messageMapKey } from '../../../utils/messageMapKey';
28
+
29
+ const n = setNamespace('ai');
30
+
31
+ export interface AIGenerateV2Action {
32
+ /**
33
+ * Sends a new message to the AI chat system
34
+ */
35
+ sendMessageInServer: (params: SendMessageParams) => Promise<void>;
36
+ internal_refreshAiChat: (params: {
37
+ topics?: ChatTopic[];
38
+ messages: ChatMessage[];
39
+ sessionId: string;
40
+ topicId?: string;
41
+ }) => void;
42
+ /**
43
+ * Executes the core processing logic for AI messages
44
+ * including preprocessing and postprocessing steps
45
+ */
46
+ internal_execAgentRuntime: (params: {
47
+ messages: ChatMessage[];
48
+ userMessageId: string;
49
+ assistantMessageId: string;
50
+ isWelcomeQuestion?: boolean;
51
+ inSearchWorkflow?: boolean;
52
+ /**
53
+ * the RAG query content, should be embedding and used in the semantic search
54
+ */
55
+ ragQuery?: string;
56
+ threadId?: string;
57
+ inPortalThread?: boolean;
58
+ traceId?: string;
59
+ }) => Promise<void>;
60
+ }
61
+
62
+ export const generateAIChatV2: StateCreator<
63
+ ChatStore,
64
+ [['zustand/devtools', never]],
65
+ [],
66
+ AIGenerateV2Action
67
+ > = (set, get) => ({
68
+ sendMessageInServer: async ({ message, files, onlyAddUserMessage, isWelcomeQuestion }) => {
69
+ const { activeTopicId, activeId, activeThreadId, internal_execAgentRuntime } = get();
70
+ if (!activeId) return;
71
+
72
+ const fileIdList = files?.map((f) => f.id);
73
+
74
+ const hasFile = !!fileIdList && fileIdList.length > 0;
75
+
76
+ // if message is empty or no files, then stop
77
+ if (!message && !hasFile) return;
78
+
79
+ if (onlyAddUserMessage) {
80
+ await get().addUserMessage({ message, fileList: fileIdList });
81
+
82
+ return;
83
+ }
84
+
85
+ const messages = chatSelectors.activeBaseChats(get());
86
+
87
+ // use optimistic update to avoid the slow waiting
88
+ const tempId = get().internal_createTmpMessage({
89
+ content: message,
90
+ // if message has attached with files, then add files to message and the agent
91
+ files: fileIdList,
92
+ role: 'user',
93
+ sessionId: activeId,
94
+ // if there is activeTopicId,then add topicId to message
95
+ topicId: activeTopicId,
96
+ threadId: activeThreadId,
97
+ });
98
+
99
+ get().internal_toggleMessageLoading(true, tempId);
100
+ set({ isCreatingMessage: true }, false, 'creatingMessage/start');
101
+
102
+ const { model, provider } = agentSelectors.currentAgentConfig(getAgentStoreState());
103
+
104
+ const data = await aiChatService.sendMessageInServer({
105
+ newUserMessage: {
106
+ content: message,
107
+ files: fileIdList,
108
+ },
109
+ // if there is activeTopicId,then add topicId to message
110
+ topicId: activeTopicId,
111
+ threadId: activeThreadId,
112
+ newTopic: !activeTopicId
113
+ ? {
114
+ topicMessageIds: messages.map((m) => m.id),
115
+ title: t('defaultTitle', { ns: 'topic' }),
116
+ }
117
+ : undefined,
118
+ sessionId: activeId === INBOX_SESSION_ID ? undefined : activeId,
119
+ newAssistantMessage: { model, provider: provider! },
120
+ });
121
+
122
+ // refresh the total data
123
+ get().internal_refreshAiChat({
124
+ messages: data.messages,
125
+ topics: data.topics,
126
+ sessionId: activeId,
127
+ topicId: data.topicId,
128
+ });
129
+ get().internal_dispatchMessage({ type: 'deleteMessage', id: tempId });
130
+
131
+ if (!activeTopicId) {
132
+ await get().switchTopic(data.topicId!, true);
133
+ }
134
+
135
+ get().internal_toggleMessageLoading(false, tempId);
136
+
137
+ // update assistant update to make it rerank
138
+ getSessionStoreState().triggerSessionUpdate(get().activeId);
139
+
140
+ // Get the current messages to generate AI response
141
+ // remove the latest assistant message id
142
+ const baseMessages = chatSelectors
143
+ .activeBaseChats(get())
144
+ .filter((item) => item.id !== data.assistantMessageId);
145
+
146
+ try {
147
+ await internal_execAgentRuntime({
148
+ messages: baseMessages,
149
+ userMessageId: data.userMessageId,
150
+ assistantMessageId: data.assistantMessageId,
151
+ isWelcomeQuestion,
152
+ ragQuery: get().internal_shouldUseRAG() ? message : undefined,
153
+ threadId: activeThreadId,
154
+ });
155
+ set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
156
+
157
+ const summaryTitle = async () => {
158
+ // check activeTopic and then auto update topic title
159
+ if (data.isCreatNewTopic) {
160
+ await get().summaryTopicTitle(data.topicId, data.messages);
161
+ return;
162
+ }
163
+
164
+ if (!activeTopicId) return;
165
+
166
+ const topic = topicSelectors.getTopicById(activeTopicId)(get());
167
+
168
+ if (topic && !topic.title) {
169
+ const chats = chatSelectors.getBaseChatsByKey(messageMapKey(activeId, topic.id))(get());
170
+ await get().summaryTopicTitle(topic.id, chats);
171
+ }
172
+ };
173
+ //
174
+ // // if there is relative files, then add files to agent
175
+ // // only available in server mode
176
+ const userFiles = chatSelectors.currentUserFiles(get()).map((f) => f.id);
177
+ const addFilesToAgent = async () => {
178
+ await getAgentStoreState().addFilesToAgent(userFiles, false);
179
+ };
180
+
181
+ await Promise.all([summaryTitle(), addFilesToAgent()]);
182
+ } catch (e) {
183
+ console.error(e);
184
+ set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
185
+ }
186
+ },
187
+
188
+ internal_refreshAiChat: ({ topics, messages, sessionId, topicId }) => {
189
+ set(
190
+ {
191
+ topicMaps: topics ? { ...get().topicMaps, [sessionId]: topics } : get().topicMaps,
192
+ messagesMap: { ...get().messagesMap, [messageMapKey(sessionId, topicId)]: messages },
193
+ },
194
+ false,
195
+ 'refreshAiChat',
196
+ );
197
+ },
198
+
199
+ internal_execAgentRuntime: async (params) => {
200
+ const {
201
+ assistantMessageId: assistantId,
202
+ userMessageId,
203
+ ragQuery,
204
+ messages: originalMessages,
205
+ } = params;
206
+ const {
207
+ internal_fetchAIChatMessage,
208
+ triggerToolCalls,
209
+ refreshMessages,
210
+ internal_updateMessageRAG,
211
+ } = get();
212
+
213
+ // create a new array to avoid the original messages array change
214
+ const messages = [...originalMessages];
215
+
216
+ const agentStoreState = getAgentStoreState();
217
+ const { model, provider, chatConfig } = agentSelectors.currentAgentConfig(agentStoreState);
218
+
219
+ let fileChunks: MessageSemanticSearchChunk[] | undefined;
220
+ let ragQueryId;
221
+
222
+ // go into RAG flow if there is ragQuery flag
223
+ if (ragQuery) {
224
+ // 1. get the relative chunks from semantic search
225
+ const { chunks, queryId, rewriteQuery } = await get().internal_retrieveChunks(
226
+ userMessageId,
227
+ ragQuery,
228
+ // should skip the last content
229
+ messages.map((m) => m.content).slice(0, messages.length - 1),
230
+ );
231
+
232
+ ragQueryId = queryId;
233
+
234
+ const lastMsg = messages.pop() as ChatMessage;
235
+
236
+ // 2. build the retrieve context messages
237
+ const knowledgeBaseQAContext = knowledgeBaseQAPrompts({
238
+ chunks,
239
+ userQuery: lastMsg.content,
240
+ rewriteQuery,
241
+ knowledge: agentSelectors.currentEnabledKnowledge(agentStoreState),
242
+ });
243
+
244
+ // 3. add the retrieve context messages to the messages history
245
+ messages.push({
246
+ ...lastMsg,
247
+ content: (lastMsg.content + '\n\n' + knowledgeBaseQAContext).trim(),
248
+ });
249
+
250
+ fileChunks = chunks.map((c) => ({ id: c.id, similarity: c.similarity }));
251
+
252
+ if (fileChunks.length > 0) {
253
+ await internal_updateMessageRAG(assistantId, { ragQueryId, fileChunks });
254
+ }
255
+ }
256
+
257
+ // 3. place a search with the search working model if this model is not support tool use
258
+ const aiInfraStoreState = getAiInfraStoreState();
259
+ const isModelSupportToolUse = aiModelSelectors.isModelSupportToolUse(
260
+ model,
261
+ provider!,
262
+ )(aiInfraStoreState);
263
+ const isProviderHasBuiltinSearch = aiProviderSelectors.isProviderHasBuiltinSearch(provider!)(
264
+ aiInfraStoreState,
265
+ );
266
+ const isModelHasBuiltinSearch = aiModelSelectors.isModelHasBuiltinSearch(
267
+ model,
268
+ provider!,
269
+ )(aiInfraStoreState);
270
+ const useModelBuiltinSearch = agentChatConfigSelectors.useModelBuiltinSearch(agentStoreState);
271
+ const useModelSearch =
272
+ (isProviderHasBuiltinSearch || isModelHasBuiltinSearch) && useModelBuiltinSearch;
273
+ const isAgentEnableSearch = agentChatConfigSelectors.isAgentEnableSearch(agentStoreState);
274
+
275
+ if (isAgentEnableSearch && !useModelSearch && !isModelSupportToolUse) {
276
+ const { model, provider } = agentChatConfigSelectors.searchFCModel(agentStoreState);
277
+
278
+ let isToolsCalling = false;
279
+ let isError = false;
280
+
281
+ const abortController = get().internal_toggleChatLoading(
282
+ true,
283
+ assistantId,
284
+ n('generateMessage(start)', { messageId: assistantId, messages }),
285
+ );
286
+
287
+ get().internal_toggleSearchWorkflow(true, assistantId);
288
+ await chatService.fetchPresetTaskResult({
289
+ params: { messages, model, provider, plugins: [WebBrowsingManifest.identifier] },
290
+ onFinish: async (_, { toolCalls, usage }) => {
291
+ if (toolCalls && toolCalls.length > 0) {
292
+ get().internal_toggleToolCallingStreaming(assistantId, undefined);
293
+ // update tools calling
294
+ await get().internal_updateMessageContent(assistantId, '', {
295
+ toolCalls,
296
+ metadata: usage,
297
+ model,
298
+ provider,
299
+ });
300
+ }
301
+ },
302
+ trace: {
303
+ traceId: params.traceId,
304
+ sessionId: get().activeId,
305
+ topicId: get().activeTopicId,
306
+ traceName: TraceNameMap.SearchIntentRecognition,
307
+ },
308
+ abortController,
309
+ onMessageHandle: async (chunk) => {
310
+ if (chunk.type === 'tool_calls') {
311
+ get().internal_toggleSearchWorkflow(false, assistantId);
312
+ get().internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives);
313
+ get().internal_dispatchMessage({
314
+ id: assistantId,
315
+ type: 'updateMessage',
316
+ value: { tools: get().internal_transformToolCalls(chunk.tool_calls) },
317
+ });
318
+ isToolsCalling = true;
319
+ }
320
+
321
+ if (chunk.type === 'text') {
322
+ abortController!.abort('not fc');
323
+ }
324
+ },
325
+ onErrorHandle: async (error) => {
326
+ isError = true;
327
+ await messageService.updateMessageError(assistantId, error);
328
+ await refreshMessages();
329
+ },
330
+ });
331
+
332
+ get().internal_toggleChatLoading(
333
+ false,
334
+ assistantId,
335
+ n('generateMessage(start)', { messageId: assistantId, messages }),
336
+ );
337
+ get().internal_toggleSearchWorkflow(false, assistantId);
338
+
339
+ // if there is error, then stop
340
+ if (isError) return;
341
+
342
+ // if it's the function call message, trigger the function method
343
+ if (isToolsCalling) {
344
+ get().internal_toggleMessageInToolsCalling(true, assistantId);
345
+ await refreshMessages();
346
+ await triggerToolCalls(assistantId, {
347
+ threadId: params?.threadId,
348
+ inPortalThread: params?.inPortalThread,
349
+ });
350
+
351
+ // then story the workflow
352
+ return;
353
+ }
354
+ }
355
+
356
+ // 4. fetch the AI response
357
+ const { isFunctionCall, content } = await internal_fetchAIChatMessage({
358
+ messages,
359
+ messageId: assistantId,
360
+ params,
361
+ model,
362
+ provider: provider!,
363
+ });
364
+
365
+ // 5. if it's the function call message, trigger the function method
366
+ if (isFunctionCall) {
367
+ get().internal_toggleMessageInToolsCalling(true, assistantId);
368
+ await refreshMessages();
369
+ await triggerToolCalls(assistantId, {
370
+ threadId: params?.threadId,
371
+ inPortalThread: params?.inPortalThread,
372
+ });
373
+ } else {
374
+ // 显示桌面通知(仅在桌面端且窗口隐藏时)
375
+ if (isDesktop) {
376
+ try {
377
+ // 动态导入桌面通知服务,避免在非桌面端环境中导入
378
+ const { desktopNotificationService } = await import(
379
+ '@/services/electron/desktopNotification'
380
+ );
381
+
382
+ await desktopNotificationService.showNotification({
383
+ body: content,
384
+ title: t('notification.finishChatGeneration', { ns: 'electron' }),
385
+ });
386
+ } catch (error) {
387
+ // 静默处理错误,不影响正常流程
388
+ console.error('Desktop notification error:', error);
389
+ }
390
+ }
391
+ }
392
+
393
+ // 6. summary history if context messages is larger than historyCount
394
+ const historyCount = agentChatConfigSelectors.historyCount(agentStoreState);
395
+
396
+ if (
397
+ agentChatConfigSelectors.enableHistoryCount(agentStoreState) &&
398
+ chatConfig.enableCompressHistory &&
399
+ originalMessages.length > historyCount
400
+ ) {
401
+ // after generation: [u1,a1,u2,a2,u3,a3]
402
+ // but the `originalMessages` is still: [u1,a1,u2,a2,u3]
403
+ // So if historyCount=2, we need to summary [u1,a1,u2,a2]
404
+ // because user find UI is [u1,a1,u2,a2 | u3,a3]
405
+ const historyMessages = originalMessages.slice(0, -historyCount + 1);
406
+
407
+ await get().internal_summaryHistory(historyMessages);
408
+ }
409
+ },
410
+ });
@@ -3,10 +3,15 @@ import { StateCreator } from 'zustand/vanilla';
3
3
  import { ChatStore } from '@/store/chat/store';
4
4
 
5
5
  import { AIGenerateAction, generateAIChat } from './generateAIChat';
6
+ import { AIGenerateV2Action, generateAIChatV2 } from './generateAIChatV2';
6
7
  import { ChatMemoryAction, chatMemory } from './memory';
7
8
  import { ChatRAGAction, chatRag } from './rag';
8
9
 
9
- export interface ChatAIChatAction extends ChatRAGAction, ChatMemoryAction, AIGenerateAction {
10
+ export interface ChatAIChatAction
11
+ extends ChatRAGAction,
12
+ ChatMemoryAction,
13
+ AIGenerateAction,
14
+ AIGenerateV2Action {
10
15
  /**/
11
16
  }
12
17
 
@@ -19,4 +24,5 @@ export const chatAiChat: StateCreator<
19
24
  ...chatRag(...params),
20
25
  ...generateAIChat(...params),
21
26
  ...chatMemory(...params),
27
+ ...generateAIChatV2(...params),
22
28
  });