@lobehub/chat 1.22.3 → 1.22.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,536 @@
1
+ /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
+ // Disable the auto sort key eslint rule to make the code more logic and readable
3
+ import { produce } from 'immer';
4
+ import { template } from 'lodash-es';
5
+ import { StateCreator } from 'zustand/vanilla';
6
+
7
+ import { chainAnswerWithContext } from '@/chains/answerWithContext';
8
+ import { LOADING_FLAT, MESSAGE_CANCEL_FLAT } from '@/const/message';
9
+ import { TraceEventType, TraceNameMap } from '@/const/trace';
10
+ import { isServerMode } from '@/const/version';
11
+ import { chatService } from '@/services/chat';
12
+ import { messageService } from '@/services/message';
13
+ import { useAgentStore } from '@/store/agent';
14
+ import { agentSelectors } from '@/store/agent/selectors';
15
+ import { chatHelpers } from '@/store/chat/helpers';
16
+ import { ChatStore } from '@/store/chat/store';
17
+ import { messageMapKey } from '@/store/chat/utils/messageMapKey';
18
+ import { useSessionStore } from '@/store/session';
19
+ import { UploadFileItem } from '@/types/files/upload';
20
+ import { ChatMessage, CreateMessageParams } from '@/types/message';
21
+ import { MessageSemanticSearchChunk } from '@/types/rag';
22
+ import { setNamespace } from '@/utils/storeDebug';
23
+
24
+ import { chatSelectors, topicSelectors } from '../../selectors';
25
+ import { ChatRAGAction, chatRag } from './actions/rag';
26
+
27
+ const n = setNamespace('m');
28
+
29
+ export interface SendMessageParams {
30
+ message: string;
31
+ files?: UploadFileItem[];
32
+ onlyAddUserMessage?: boolean;
33
+ /**
34
+ *
35
+ * https://github.com/lobehub/lobe-chat/pull/2086
36
+ */
37
+ isWelcomeQuestion?: boolean;
38
+ }
39
+
40
+ interface ProcessMessageParams {
41
+ traceId?: string;
42
+ isWelcomeQuestion?: boolean;
43
+ /**
44
+ * the RAG query content, should be embedding and used in the semantic search
45
+ */
46
+ ragQuery?: string;
47
+ }
48
+
49
+ export interface ChatAIChatAction extends ChatRAGAction {
50
+ // create
51
+ sendMessage: (params: SendMessageParams) => Promise<void>;
52
+ /**
53
+ * regenerate message
54
+ * trace enabled
55
+ * @param id
56
+ */
57
+ regenerateMessage: (id: string) => Promise<void>;
58
+
59
+ // delete
60
+ delAndRegenerateMessage: (id: string) => Promise<void>;
61
+ stopGenerateMessage: () => void;
62
+
63
+ // ========= ↓ Internal Method ↓ ========== //
64
+ // ========================================== //
65
+ // ========================================== //
66
+
67
+ /**
68
+ * core process of the AI message (include preprocess and postprocess)
69
+ */
70
+ internal_coreProcessMessage: (
71
+ messages: ChatMessage[],
72
+ parentId: string,
73
+ params?: ProcessMessageParams,
74
+ ) => Promise<void>;
75
+ /**
76
+ * the method to fetch the AI message
77
+ */
78
+ internal_fetchAIChatMessage: (
79
+ messages: ChatMessage[],
80
+ assistantMessageId: string,
81
+ params?: ProcessMessageParams,
82
+ ) => Promise<{
83
+ isFunctionCall: boolean;
84
+ traceId?: string;
85
+ }>;
86
+
87
+ internal_resendMessage: (id: string, traceId?: string) => Promise<void>;
88
+
89
+ /**
90
+ * method to toggle ai message generating loading
91
+ */
92
+ internal_toggleChatLoading: (
93
+ loading: boolean,
94
+ id?: string,
95
+ action?: string,
96
+ ) => AbortController | undefined;
97
+ /**
98
+ * method to toggle the tool calling loading state
99
+ */
100
+ internal_toggleToolCallingStreaming: (id: string, streaming: boolean[] | undefined) => void;
101
+ }
102
+
103
+ const getAgentConfig = () => agentSelectors.currentAgentConfig(useAgentStore.getState());
104
+ const getAgentChatConfig = () => agentSelectors.currentAgentChatConfig(useAgentStore.getState());
105
+ const getAgentKnowledge = () => agentSelectors.currentEnabledKnowledge(useAgentStore.getState());
106
+
107
+ export const chatAiChat: StateCreator<
108
+ ChatStore,
109
+ [['zustand/devtools', never]],
110
+ [],
111
+ ChatAIChatAction
112
+ > = (set, get, ...rest) => ({
113
+ ...chatRag(set, get, ...rest),
114
+
115
+ delAndRegenerateMessage: async (id) => {
116
+ const traceId = chatSelectors.getTraceIdByMessageId(id)(get());
117
+ get().internal_resendMessage(id, traceId);
118
+ get().deleteMessage(id);
119
+
120
+ // trace the delete and regenerate message
121
+ get().internal_traceMessage(id, { eventType: TraceEventType.DeleteAndRegenerateMessage });
122
+ },
123
+ regenerateMessage: async (id: string) => {
124
+ const traceId = chatSelectors.getTraceIdByMessageId(id)(get());
125
+ await get().internal_resendMessage(id, traceId);
126
+
127
+ // trace the delete and regenerate message
128
+ get().internal_traceMessage(id, { eventType: TraceEventType.RegenerateMessage });
129
+ },
130
+
131
+ sendMessage: async ({ message, files, onlyAddUserMessage, isWelcomeQuestion }) => {
132
+ const { internal_coreProcessMessage, activeTopicId, activeId } = get();
133
+ if (!activeId) return;
134
+
135
+ const fileIdList = files?.map((f) => f.id);
136
+
137
+ const hasFile = !!fileIdList && fileIdList.length > 0;
138
+
139
+ // if message is empty or no files, then stop
140
+ if (!message && !hasFile) return;
141
+
142
+ set({ isCreatingMessage: true }, false, 'creatingMessage/start');
143
+
144
+ const newMessage: CreateMessageParams = {
145
+ content: message,
146
+ // if message has attached with files, then add files to message and the agent
147
+ files: fileIdList,
148
+ role: 'user',
149
+ sessionId: activeId,
150
+ // if there is activeTopicId,then add topicId to message
151
+ topicId: activeTopicId,
152
+ };
153
+
154
+ const agentConfig = getAgentChatConfig();
155
+
156
+ let tempMessageId: string | undefined = undefined;
157
+ let newTopicId: string | undefined = undefined;
158
+
159
+ // it should be the default topic, then
160
+ // if autoCreateTopic is enabled, check to whether we need to create a topic
161
+ if (!onlyAddUserMessage && !activeTopicId && agentConfig.enableAutoCreateTopic) {
162
+ // check activeTopic and then auto create topic
163
+ const chats = chatSelectors.currentChats(get());
164
+
165
+ // we will add two messages (user and assistant), so the finial length should +2
166
+ const featureLength = chats.length + 2;
167
+
168
+ // if there is no activeTopicId and the feature length is greater than the threshold
169
+ // then create a new topic and active it
170
+ if (!get().activeTopicId && featureLength >= agentConfig.autoCreateTopicThreshold) {
171
+ // we need to create a temp message for optimistic update
172
+ tempMessageId = get().internal_createTmpMessage(newMessage);
173
+ get().internal_toggleMessageLoading(true, tempMessageId);
174
+
175
+ const topicId = await get().createTopic();
176
+
177
+ if (topicId) {
178
+ newTopicId = topicId;
179
+ newMessage.topicId = topicId;
180
+
181
+ // we need to copy the messages to the new topic or the message will disappear
182
+ const mapKey = chatSelectors.currentChatKey(get());
183
+ const newMaps = {
184
+ ...get().messagesMap,
185
+ [messageMapKey(activeId, topicId)]: get().messagesMap[mapKey],
186
+ };
187
+ set({ messagesMap: newMaps }, false, 'internal_copyMessages');
188
+
189
+ // get().internal_dispatchMessage({ type: 'deleteMessage', id: tempMessageId });
190
+ get().internal_toggleMessageLoading(false, tempMessageId);
191
+
192
+ // make the topic loading
193
+ get().internal_updateTopicLoading(topicId, true);
194
+ }
195
+ }
196
+ }
197
+ // update assistant update to make it rerank
198
+ useSessionStore.getState().triggerSessionUpdate(get().activeId);
199
+
200
+ const id = await get().internal_createMessage(newMessage, {
201
+ tempMessageId,
202
+ skipRefresh: !onlyAddUserMessage,
203
+ });
204
+
205
+ // switch to the new topic if create the new topic
206
+ if (!!newTopicId) {
207
+ await get().switchTopic(newTopicId, true);
208
+ await get().internal_fetchMessages();
209
+
210
+ // delete previous messages
211
+ // remove the temp message map
212
+ const newMaps = { ...get().messagesMap, [messageMapKey(activeId, null)]: [] };
213
+ set({ messagesMap: newMaps }, false, 'internal_copyMessages');
214
+ }
215
+
216
+ // if only add user message, then stop
217
+ if (onlyAddUserMessage) {
218
+ set({ isCreatingMessage: false }, false, 'creatingMessage/start');
219
+ return;
220
+ }
221
+
222
+ // Get the current messages to generate AI response
223
+ const messages = chatSelectors.currentChats(get());
224
+ const userFiles = chatSelectors.currentUserFiles(get()).map((f) => f.id);
225
+
226
+ await internal_coreProcessMessage(messages, id, {
227
+ isWelcomeQuestion,
228
+ ragQuery: get().internal_shouldUseRAG() ? message : undefined,
229
+ });
230
+
231
+ set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
232
+
233
+ const summaryTitle = async () => {
234
+ // if autoCreateTopic is false, then stop
235
+ if (!agentConfig.enableAutoCreateTopic) return;
236
+
237
+ // check activeTopic and then auto update topic title
238
+ if (newTopicId) {
239
+ const chats = chatSelectors.currentChats(get());
240
+ await get().summaryTopicTitle(newTopicId, chats);
241
+ return;
242
+ }
243
+
244
+ const topic = topicSelectors.currentActiveTopic(get());
245
+
246
+ if (topic && !topic.title) {
247
+ const chats = chatSelectors.currentChats(get());
248
+ await get().summaryTopicTitle(topic.id, chats);
249
+ }
250
+ };
251
+
252
+ // if there is relative files, then add files to agent
253
+ // only available in server mode
254
+ const addFilesToAgent = async () => {
255
+ if (userFiles.length === 0 || !isServerMode) return;
256
+
257
+ await useAgentStore.getState().addFilesToAgent(userFiles, false);
258
+ };
259
+
260
+ await Promise.all([summaryTitle(), addFilesToAgent()]);
261
+ },
262
+ stopGenerateMessage: () => {
263
+ const { abortController, internal_toggleChatLoading } = get();
264
+ if (!abortController) return;
265
+
266
+ abortController.abort(MESSAGE_CANCEL_FLAT);
267
+
268
+ internal_toggleChatLoading(false, undefined, n('stopGenerateMessage') as string);
269
+ },
270
+
271
+ // the internal process method of the AI message
272
+ internal_coreProcessMessage: async (originalMessages, userMessageId, params) => {
273
+ const { internal_fetchAIChatMessage, triggerToolCalls, refreshMessages, activeTopicId } = get();
274
+
275
+ // create a new array to avoid the original messages array change
276
+ const messages = [...originalMessages];
277
+
278
+ const { model, provider } = getAgentConfig();
279
+
280
+ let fileChunks: MessageSemanticSearchChunk[] | undefined;
281
+ let ragQueryId;
282
+ // go into RAG flow if there is ragQuery flag
283
+ if (params?.ragQuery) {
284
+ // 1. get the relative chunks from semantic search
285
+ const { chunks, queryId } = await get().internal_retrieveChunks(
286
+ userMessageId,
287
+ params?.ragQuery,
288
+ // should skip the last content
289
+ messages.map((m) => m.content).slice(0, messages.length - 1),
290
+ );
291
+
292
+ ragQueryId = queryId;
293
+
294
+ // 2. build the retrieve context messages
295
+ const retrieveContext = chainAnswerWithContext({
296
+ context: chunks.map((c) => c.text as string),
297
+ question: params?.ragQuery,
298
+ knowledge: getAgentKnowledge().map((knowledge) => knowledge.name),
299
+ });
300
+
301
+ // 3. add the retrieve context messages to the messages history
302
+ if (retrieveContext.messages && retrieveContext.messages?.length > 0) {
303
+ // remove the last message due to the query is in the retrieveContext
304
+ messages.pop();
305
+ retrieveContext.messages?.forEach((m) => messages.push(m as ChatMessage));
306
+ }
307
+
308
+ fileChunks = chunks.map((c) => ({ id: c.id, similarity: c.similarity }));
309
+ }
310
+
311
+ // 2. Add an empty message to place the AI response
312
+ const assistantMessage: CreateMessageParams = {
313
+ role: 'assistant',
314
+ content: LOADING_FLAT,
315
+ fromModel: model,
316
+ fromProvider: provider,
317
+
318
+ parentId: userMessageId,
319
+ sessionId: get().activeId,
320
+ topicId: activeTopicId, // if there is activeTopicId,then add it to topicId
321
+ fileChunks,
322
+ ragQueryId,
323
+ };
324
+
325
+ const assistantId = await get().internal_createMessage(assistantMessage);
326
+
327
+ // 3. fetch the AI response
328
+ const { isFunctionCall } = await internal_fetchAIChatMessage(messages, assistantId, params);
329
+
330
+ // 4. if it's the function call message, trigger the function method
331
+ if (isFunctionCall) {
332
+ await refreshMessages();
333
+ await triggerToolCalls(assistantId);
334
+ }
335
+ },
336
+ internal_fetchAIChatMessage: async (messages, assistantId, params) => {
337
+ const {
338
+ internal_toggleChatLoading,
339
+ refreshMessages,
340
+ internal_updateMessageContent,
341
+ internal_dispatchMessage,
342
+ internal_toggleToolCallingStreaming,
343
+ } = get();
344
+
345
+ const abortController = internal_toggleChatLoading(
346
+ true,
347
+ assistantId,
348
+ n('generateMessage(start)', { assistantId, messages }) as string,
349
+ );
350
+
351
+ const agentConfig = getAgentConfig();
352
+ const chatConfig = agentConfig.chatConfig;
353
+
354
+ const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\S\s]+?)}}/g });
355
+
356
+ // ================================== //
357
+ // messages uniformly preprocess //
358
+ // ================================== //
359
+
360
+ // 1. slice messages with config
361
+ let preprocessMsgs = chatHelpers.getSlicedMessagesWithConfig(messages, chatConfig);
362
+
363
+ // 2. replace inputMessage template
364
+ preprocessMsgs = !chatConfig.inputTemplate
365
+ ? preprocessMsgs
366
+ : preprocessMsgs.map((m) => {
367
+ if (m.role === 'user') {
368
+ try {
369
+ return { ...m, content: compiler({ text: m.content }) };
370
+ } catch (error) {
371
+ console.error(error);
372
+
373
+ return m;
374
+ }
375
+ }
376
+
377
+ return m;
378
+ });
379
+
380
+ // 3. add systemRole
381
+ if (agentConfig.systemRole) {
382
+ preprocessMsgs.unshift({ content: agentConfig.systemRole, role: 'system' } as ChatMessage);
383
+ }
384
+
385
+ // 4. handle max_tokens
386
+ agentConfig.params.max_tokens = chatConfig.enableMaxTokens
387
+ ? agentConfig.params.max_tokens
388
+ : undefined;
389
+
390
+ // 5. handle config for the vision model
391
+ // Due to the gpt-4-vision-preview model's default max_tokens is very small
392
+ // we need to set the max_tokens a larger one.
393
+ if (agentConfig.model === 'gpt-4-vision-preview') {
394
+ /* eslint-disable unicorn/no-lonely-if */
395
+ if (!agentConfig.params.max_tokens)
396
+ // refs: https://github.com/lobehub/lobe-chat/issues/837
397
+ agentConfig.params.max_tokens = 2048;
398
+ }
399
+
400
+ let isFunctionCall = false;
401
+ let msgTraceId: string | undefined;
402
+ let output = '';
403
+
404
+ await chatService.createAssistantMessageStream({
405
+ abortController,
406
+ params: {
407
+ messages: preprocessMsgs,
408
+ model: agentConfig.model,
409
+ provider: agentConfig.provider,
410
+ ...agentConfig.params,
411
+ plugins: agentConfig.plugins,
412
+ },
413
+ trace: {
414
+ traceId: params?.traceId,
415
+ sessionId: get().activeId,
416
+ topicId: get().activeTopicId,
417
+ traceName: TraceNameMap.Conversation,
418
+ },
419
+ isWelcomeQuestion: params?.isWelcomeQuestion,
420
+ onErrorHandle: async (error) => {
421
+ await messageService.updateMessageError(assistantId, error);
422
+ await refreshMessages();
423
+ },
424
+ onFinish: async (content, { traceId, observationId, toolCalls }) => {
425
+ // if there is traceId, update it
426
+ if (traceId) {
427
+ msgTraceId = traceId;
428
+ await messageService.updateMessage(assistantId, {
429
+ traceId,
430
+ observationId: observationId ?? undefined,
431
+ });
432
+ }
433
+
434
+ if (toolCalls && toolCalls.length > 0) {
435
+ internal_toggleToolCallingStreaming(assistantId, undefined);
436
+ }
437
+
438
+ // update the content after fetch result
439
+ await internal_updateMessageContent(assistantId, content, toolCalls);
440
+ },
441
+ onMessageHandle: async (chunk) => {
442
+ switch (chunk.type) {
443
+ case 'text': {
444
+ output += chunk.text;
445
+ internal_dispatchMessage({
446
+ id: assistantId,
447
+ type: 'updateMessage',
448
+ value: { content: output },
449
+ });
450
+ break;
451
+ }
452
+
453
+ // is this message is just a tool call
454
+ case 'tool_calls': {
455
+ internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives);
456
+ internal_dispatchMessage({
457
+ id: assistantId,
458
+ type: 'updateMessage',
459
+ value: { tools: get().internal_transformToolCalls(chunk.tool_calls) },
460
+ });
461
+ isFunctionCall = true;
462
+ }
463
+ }
464
+ },
465
+ });
466
+
467
+ internal_toggleChatLoading(false, assistantId, n('generateMessage(end)') as string);
468
+
469
+ return {
470
+ isFunctionCall,
471
+ traceId: msgTraceId,
472
+ };
473
+ },
474
+
475
+ internal_resendMessage: async (messageId, traceId) => {
476
+ // 1. 构造所有相关的历史记录
477
+ const chats = chatSelectors.currentChats(get());
478
+
479
+ const currentIndex = chats.findIndex((c) => c.id === messageId);
480
+ if (currentIndex < 0) return;
481
+
482
+ const currentMessage = chats[currentIndex];
483
+
484
+ let contextMessages: ChatMessage[] = [];
485
+
486
+ switch (currentMessage.role) {
487
+ case 'tool':
488
+ case 'user': {
489
+ contextMessages = chats.slice(0, currentIndex + 1);
490
+ break;
491
+ }
492
+ case 'assistant': {
493
+ // 消息是 AI 发出的因此需要找到它的 user 消息
494
+ const userId = currentMessage.parentId;
495
+ const userIndex = chats.findIndex((c) => c.id === userId);
496
+ // 如果消息没有 parentId,那么同 user/function 模式
497
+ contextMessages = chats.slice(0, userIndex < 0 ? currentIndex + 1 : userIndex + 1);
498
+ break;
499
+ }
500
+ }
501
+
502
+ if (contextMessages.length <= 0) return;
503
+
504
+ const { internal_coreProcessMessage } = get();
505
+
506
+ const latestMsg = contextMessages.findLast((s) => s.role === 'user');
507
+
508
+ if (!latestMsg) return;
509
+
510
+ await internal_coreProcessMessage(contextMessages, latestMsg.id, {
511
+ traceId,
512
+ ragQuery: get().internal_shouldUseRAG() ? currentMessage.content : undefined,
513
+ });
514
+ },
515
+
516
+ // ----- Loading ------- //
517
+ internal_toggleChatLoading: (loading, id, action) => {
518
+ return get().internal_toggleLoadingArrays('chatLoadingIds', loading, id, action);
519
+ },
520
+ internal_toggleToolCallingStreaming: (id, streaming) => {
521
+ set(
522
+ {
523
+ toolCallingStreamIds: produce(get().toolCallingStreamIds, (draft) => {
524
+ if (!!streaming) {
525
+ draft[id] = streaming;
526
+ } else {
527
+ delete draft[id];
528
+ }
529
+ }),
530
+ },
531
+
532
+ false,
533
+ 'toggleToolCallingStreaming',
534
+ );
535
+ },
536
+ });
@@ -0,0 +1,27 @@
1
+ export interface ChatAIChatState {
2
+ abortController?: AbortController;
3
+ /**
4
+ * is the AI message is generating
5
+ */
6
+ chatLoadingIds: string[];
7
+ inputFiles: File[];
8
+ inputMessage: string;
9
+ /**
10
+ * is the message is in RAG flow
11
+ */
12
+ messageRAGLoadingIds: string[];
13
+ pluginApiLoadingIds: string[];
14
+ /**
15
+ * the tool calling stream ids
16
+ */
17
+ toolCallingStreamIds: Record<string, boolean[]>;
18
+ }
19
+
20
+ export const initialAiChatState: ChatAIChatState = {
21
+ chatLoadingIds: [],
22
+ inputFiles: [],
23
+ inputMessage: '',
24
+ messageRAGLoadingIds: [],
25
+ pluginApiLoadingIds: [],
26
+ toolCallingStreamIds: {},
27
+ };
@@ -5,7 +5,7 @@ import { chainLangDetect } from '@/chains/langDetect';
5
5
  import { chainTranslate } from '@/chains/translate';
6
6
  import { chatService } from '@/services/chat';
7
7
  import { messageService } from '@/services/message';
8
- import { messageMapKey } from '@/store/chat/slices/message/utils';
8
+ import { messageMapKey } from '@/store/chat/utils/messageMapKey';
9
9
 
10
10
  import { useChatStore } from '../../store';
11
11