@lobehub/chat 1.121.0 → 1.122.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/apps/desktop/package.json +1 -0
  3. package/apps/desktop/src/main/modules/networkProxy/dispatcher.ts +24 -2
  4. package/changelog/v1.json +18 -0
  5. package/next.config.ts +1 -0
  6. package/package.json +1 -1
  7. package/packages/const/src/index.ts +1 -0
  8. package/packages/const/src/session.ts +4 -11
  9. package/packages/database/src/models/__tests__/message.test.ts +41 -0
  10. package/packages/database/src/models/message.ts +21 -13
  11. package/packages/database/src/models/topic.ts +4 -9
  12. package/packages/types/src/aiChat.ts +55 -0
  13. package/packages/types/src/index.ts +4 -0
  14. package/packages/types/src/message/base.ts +17 -4
  15. package/packages/types/src/message/chat.ts +1 -15
  16. package/packages/types/src/message/index.ts +1 -0
  17. package/packages/types/src/message/rag.ts +21 -0
  18. package/packages/utils/src/index.ts +1 -0
  19. package/packages/utils/src/object.test.ts +11 -0
  20. package/src/app/[variants]/(main)/chat/(workspace)/@topic/features/TopicListContent/ByTimeMode/index.tsx +3 -3
  21. package/src/server/routers/lambda/__tests__/message.test.ts +30 -0
  22. package/src/server/routers/lambda/aiChat.test.ts +107 -0
  23. package/src/server/routers/lambda/aiChat.ts +80 -0
  24. package/src/server/routers/lambda/index.ts +2 -0
  25. package/src/server/routers/lambda/message.ts +7 -0
  26. package/src/server/services/aiChat/index.test.ts +57 -0
  27. package/src/server/services/aiChat/index.ts +36 -0
  28. package/src/services/aiChat.ts +12 -0
  29. package/src/services/message/_deprecated.ts +4 -0
  30. package/src/services/message/client.ts +5 -0
  31. package/src/services/message/server.ts +4 -0
  32. package/src/services/message/type.ts +2 -0
  33. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +11 -1
  34. package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +410 -0
  35. package/src/store/chat/slices/aiChat/actions/index.ts +7 -1
  36. package/src/store/chat/slices/message/action.ts +38 -1
  37. package/src/store/chat/slices/message/reducer.ts +11 -0
  38. package/src/store/chat/slices/topic/reducer.ts +14 -1
@@ -0,0 +1,410 @@
1
+ /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
+ // Disable the auto sort key eslint rule to make the code more logic and readable
3
+ import { INBOX_SESSION_ID, isDesktop } from '@lobechat/const';
4
+ import { knowledgeBaseQAPrompts } from '@lobechat/prompts';
5
+ import {
6
+ ChatMessage,
7
+ ChatTopic,
8
+ MessageSemanticSearchChunk,
9
+ SendMessageParams,
10
+ TraceNameMap,
11
+ } from '@lobechat/types';
12
+ import { t } from 'i18next';
13
+ import { StateCreator } from 'zustand/vanilla';
14
+
15
+ import { aiChatService } from '@/services/aiChat';
16
+ import { chatService } from '@/services/chat';
17
+ import { messageService } from '@/services/message';
18
+ import { getAgentStoreState } from '@/store/agent';
19
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/slices/chat';
20
+ import { aiModelSelectors, aiProviderSelectors, getAiInfraStoreState } from '@/store/aiInfra';
21
+ import type { ChatStore } from '@/store/chat/store';
22
+ import { getSessionStoreState } from '@/store/session';
23
+ import { WebBrowsingManifest } from '@/tools/web-browsing';
24
+ import { setNamespace } from '@/utils/storeDebug';
25
+
26
+ import { chatSelectors, topicSelectors } from '../../../selectors';
27
+ import { messageMapKey } from '../../../utils/messageMapKey';
28
+
29
+ const n = setNamespace('ai');
30
+
31
+ export interface AIGenerateV2Action {
32
+ /**
33
+ * Sends a new message to the AI chat system
34
+ */
35
+ sendMessageInServer: (params: SendMessageParams) => Promise<void>;
36
+ internal_refreshAiChat: (params: {
37
+ topics?: ChatTopic[];
38
+ messages: ChatMessage[];
39
+ sessionId: string;
40
+ topicId?: string;
41
+ }) => void;
42
+ /**
43
+ * Executes the core processing logic for AI messages
44
+ * including preprocessing and postprocessing steps
45
+ */
46
+ internal_execAgentRuntime: (params: {
47
+ messages: ChatMessage[];
48
+ userMessageId: string;
49
+ assistantMessageId: string;
50
+ isWelcomeQuestion?: boolean;
51
+ inSearchWorkflow?: boolean;
52
+ /**
53
+ * the RAG query content, should be embedding and used in the semantic search
54
+ */
55
+ ragQuery?: string;
56
+ threadId?: string;
57
+ inPortalThread?: boolean;
58
+ traceId?: string;
59
+ }) => Promise<void>;
60
+ }
61
+
62
+ export const generateAIChatV2: StateCreator<
63
+ ChatStore,
64
+ [['zustand/devtools', never]],
65
+ [],
66
+ AIGenerateV2Action
67
+ > = (set, get) => ({
68
+ sendMessageInServer: async ({ message, files, onlyAddUserMessage, isWelcomeQuestion }) => {
69
+ const { activeTopicId, activeId, activeThreadId, internal_execAgentRuntime } = get();
70
+ if (!activeId) return;
71
+
72
+ const fileIdList = files?.map((f) => f.id);
73
+
74
+ const hasFile = !!fileIdList && fileIdList.length > 0;
75
+
76
+ // if message is empty or no files, then stop
77
+ if (!message && !hasFile) return;
78
+
79
+ if (onlyAddUserMessage) {
80
+ await get().addUserMessage({ message, fileList: fileIdList });
81
+
82
+ return;
83
+ }
84
+
85
+ const messages = chatSelectors.activeBaseChats(get());
86
+
87
+ // use optimistic update to avoid the slow waiting
88
+ const tempId = get().internal_createTmpMessage({
89
+ content: message,
90
+ // if message has attached with files, then add files to message and the agent
91
+ files: fileIdList,
92
+ role: 'user',
93
+ sessionId: activeId,
94
+ // if there is activeTopicId,then add topicId to message
95
+ topicId: activeTopicId,
96
+ threadId: activeThreadId,
97
+ });
98
+
99
+ get().internal_toggleMessageLoading(true, tempId);
100
+ set({ isCreatingMessage: true }, false, 'creatingMessage/start');
101
+
102
+ const { model, provider } = agentSelectors.currentAgentConfig(getAgentStoreState());
103
+
104
+ const data = await aiChatService.sendMessageInServer({
105
+ newUserMessage: {
106
+ content: message,
107
+ files: fileIdList,
108
+ },
109
+ // if there is activeTopicId,then add topicId to message
110
+ topicId: activeTopicId,
111
+ threadId: activeThreadId,
112
+ newTopic: !activeTopicId
113
+ ? {
114
+ topicMessageIds: messages.map((m) => m.id),
115
+ title: t('defaultTitle', { ns: 'topic' }),
116
+ }
117
+ : undefined,
118
+ sessionId: activeId === INBOX_SESSION_ID ? undefined : activeId,
119
+ newAssistantMessage: { model, provider: provider! },
120
+ });
121
+
122
+ // refresh the total data
123
+ get().internal_refreshAiChat({
124
+ messages: data.messages,
125
+ topics: data.topics,
126
+ sessionId: activeId,
127
+ topicId: data.topicId,
128
+ });
129
+ get().internal_dispatchMessage({ type: 'deleteMessage', id: tempId });
130
+
131
+ if (!activeTopicId) {
132
+ await get().switchTopic(data.topicId!, true);
133
+ }
134
+
135
+ get().internal_toggleMessageLoading(false, tempId);
136
+
137
+ // update assistant update to make it rerank
138
+ getSessionStoreState().triggerSessionUpdate(get().activeId);
139
+
140
+ // Get the current messages to generate AI response
141
+ // remove the latest assistant message id
142
+ const baseMessages = chatSelectors
143
+ .activeBaseChats(get())
144
+ .filter((item) => item.id !== data.assistantMessageId);
145
+
146
+ try {
147
+ await internal_execAgentRuntime({
148
+ messages: baseMessages,
149
+ userMessageId: data.userMessageId,
150
+ assistantMessageId: data.assistantMessageId,
151
+ isWelcomeQuestion,
152
+ ragQuery: get().internal_shouldUseRAG() ? message : undefined,
153
+ threadId: activeThreadId,
154
+ });
155
+ set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
156
+
157
+ const summaryTitle = async () => {
158
+ // check activeTopic and then auto update topic title
159
+ if (data.isCreatNewTopic) {
160
+ await get().summaryTopicTitle(data.topicId, data.messages);
161
+ return;
162
+ }
163
+
164
+ if (!activeTopicId) return;
165
+
166
+ const topic = topicSelectors.getTopicById(activeTopicId)(get());
167
+
168
+ if (topic && !topic.title) {
169
+ const chats = chatSelectors.getBaseChatsByKey(messageMapKey(activeId, topic.id))(get());
170
+ await get().summaryTopicTitle(topic.id, chats);
171
+ }
172
+ };
173
+ //
174
+ // // if there is relative files, then add files to agent
175
+ // // only available in server mode
176
+ const userFiles = chatSelectors.currentUserFiles(get()).map((f) => f.id);
177
+ const addFilesToAgent = async () => {
178
+ await getAgentStoreState().addFilesToAgent(userFiles, false);
179
+ };
180
+
181
+ await Promise.all([summaryTitle(), addFilesToAgent()]);
182
+ } catch (e) {
183
+ console.error(e);
184
+ set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
185
+ }
186
+ },
187
+
188
+ internal_refreshAiChat: ({ topics, messages, sessionId, topicId }) => {
189
+ set(
190
+ {
191
+ topicMaps: topics ? { ...get().topicMaps, [sessionId]: topics } : get().topicMaps,
192
+ messagesMap: { ...get().messagesMap, [messageMapKey(sessionId, topicId)]: messages },
193
+ },
194
+ false,
195
+ 'refreshAiChat',
196
+ );
197
+ },
198
+
199
+ internal_execAgentRuntime: async (params) => {
200
+ const {
201
+ assistantMessageId: assistantId,
202
+ userMessageId,
203
+ ragQuery,
204
+ messages: originalMessages,
205
+ } = params;
206
+ const {
207
+ internal_fetchAIChatMessage,
208
+ triggerToolCalls,
209
+ refreshMessages,
210
+ internal_updateMessageRAG,
211
+ } = get();
212
+
213
+ // create a new array to avoid the original messages array change
214
+ const messages = [...originalMessages];
215
+
216
+ const agentStoreState = getAgentStoreState();
217
+ const { model, provider, chatConfig } = agentSelectors.currentAgentConfig(agentStoreState);
218
+
219
+ let fileChunks: MessageSemanticSearchChunk[] | undefined;
220
+ let ragQueryId;
221
+
222
+ // go into RAG flow if there is ragQuery flag
223
+ if (ragQuery) {
224
+ // 1. get the relative chunks from semantic search
225
+ const { chunks, queryId, rewriteQuery } = await get().internal_retrieveChunks(
226
+ userMessageId,
227
+ ragQuery,
228
+ // should skip the last content
229
+ messages.map((m) => m.content).slice(0, messages.length - 1),
230
+ );
231
+
232
+ ragQueryId = queryId;
233
+
234
+ const lastMsg = messages.pop() as ChatMessage;
235
+
236
+ // 2. build the retrieve context messages
237
+ const knowledgeBaseQAContext = knowledgeBaseQAPrompts({
238
+ chunks,
239
+ userQuery: lastMsg.content,
240
+ rewriteQuery,
241
+ knowledge: agentSelectors.currentEnabledKnowledge(agentStoreState),
242
+ });
243
+
244
+ // 3. add the retrieve context messages to the messages history
245
+ messages.push({
246
+ ...lastMsg,
247
+ content: (lastMsg.content + '\n\n' + knowledgeBaseQAContext).trim(),
248
+ });
249
+
250
+ fileChunks = chunks.map((c) => ({ id: c.id, similarity: c.similarity }));
251
+
252
+ if (fileChunks.length > 0) {
253
+ await internal_updateMessageRAG(assistantId, { ragQueryId, fileChunks });
254
+ }
255
+ }
256
+
257
+ // 3. place a search with the search working model if this model is not support tool use
258
+ const aiInfraStoreState = getAiInfraStoreState();
259
+ const isModelSupportToolUse = aiModelSelectors.isModelSupportToolUse(
260
+ model,
261
+ provider!,
262
+ )(aiInfraStoreState);
263
+ const isProviderHasBuiltinSearch = aiProviderSelectors.isProviderHasBuiltinSearch(provider!)(
264
+ aiInfraStoreState,
265
+ );
266
+ const isModelHasBuiltinSearch = aiModelSelectors.isModelHasBuiltinSearch(
267
+ model,
268
+ provider!,
269
+ )(aiInfraStoreState);
270
+ const useModelBuiltinSearch = agentChatConfigSelectors.useModelBuiltinSearch(agentStoreState);
271
+ const useModelSearch =
272
+ (isProviderHasBuiltinSearch || isModelHasBuiltinSearch) && useModelBuiltinSearch;
273
+ const isAgentEnableSearch = agentChatConfigSelectors.isAgentEnableSearch(agentStoreState);
274
+
275
+ if (isAgentEnableSearch && !useModelSearch && !isModelSupportToolUse) {
276
+ const { model, provider } = agentChatConfigSelectors.searchFCModel(agentStoreState);
277
+
278
+ let isToolsCalling = false;
279
+ let isError = false;
280
+
281
+ const abortController = get().internal_toggleChatLoading(
282
+ true,
283
+ assistantId,
284
+ n('generateMessage(start)', { messageId: assistantId, messages }),
285
+ );
286
+
287
+ get().internal_toggleSearchWorkflow(true, assistantId);
288
+ await chatService.fetchPresetTaskResult({
289
+ params: { messages, model, provider, plugins: [WebBrowsingManifest.identifier] },
290
+ onFinish: async (_, { toolCalls, usage }) => {
291
+ if (toolCalls && toolCalls.length > 0) {
292
+ get().internal_toggleToolCallingStreaming(assistantId, undefined);
293
+ // update tools calling
294
+ await get().internal_updateMessageContent(assistantId, '', {
295
+ toolCalls,
296
+ metadata: usage,
297
+ model,
298
+ provider,
299
+ });
300
+ }
301
+ },
302
+ trace: {
303
+ traceId: params.traceId,
304
+ sessionId: get().activeId,
305
+ topicId: get().activeTopicId,
306
+ traceName: TraceNameMap.SearchIntentRecognition,
307
+ },
308
+ abortController,
309
+ onMessageHandle: async (chunk) => {
310
+ if (chunk.type === 'tool_calls') {
311
+ get().internal_toggleSearchWorkflow(false, assistantId);
312
+ get().internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives);
313
+ get().internal_dispatchMessage({
314
+ id: assistantId,
315
+ type: 'updateMessage',
316
+ value: { tools: get().internal_transformToolCalls(chunk.tool_calls) },
317
+ });
318
+ isToolsCalling = true;
319
+ }
320
+
321
+ if (chunk.type === 'text') {
322
+ abortController!.abort('not fc');
323
+ }
324
+ },
325
+ onErrorHandle: async (error) => {
326
+ isError = true;
327
+ await messageService.updateMessageError(assistantId, error);
328
+ await refreshMessages();
329
+ },
330
+ });
331
+
332
+ get().internal_toggleChatLoading(
333
+ false,
334
+ assistantId,
335
+ n('generateMessage(start)', { messageId: assistantId, messages }),
336
+ );
337
+ get().internal_toggleSearchWorkflow(false, assistantId);
338
+
339
+ // if there is error, then stop
340
+ if (isError) return;
341
+
342
+ // if it's the function call message, trigger the function method
343
+ if (isToolsCalling) {
344
+ get().internal_toggleMessageInToolsCalling(true, assistantId);
345
+ await refreshMessages();
346
+ await triggerToolCalls(assistantId, {
347
+ threadId: params?.threadId,
348
+ inPortalThread: params?.inPortalThread,
349
+ });
350
+
351
+ // then story the workflow
352
+ return;
353
+ }
354
+ }
355
+
356
+ // 4. fetch the AI response
357
+ const { isFunctionCall, content } = await internal_fetchAIChatMessage({
358
+ messages,
359
+ messageId: assistantId,
360
+ params,
361
+ model,
362
+ provider: provider!,
363
+ });
364
+
365
+ // 5. if it's the function call message, trigger the function method
366
+ if (isFunctionCall) {
367
+ get().internal_toggleMessageInToolsCalling(true, assistantId);
368
+ await refreshMessages();
369
+ await triggerToolCalls(assistantId, {
370
+ threadId: params?.threadId,
371
+ inPortalThread: params?.inPortalThread,
372
+ });
373
+ } else {
374
+ // 显示桌面通知(仅在桌面端且窗口隐藏时)
375
+ if (isDesktop) {
376
+ try {
377
+ // 动态导入桌面通知服务,避免在非桌面端环境中导入
378
+ const { desktopNotificationService } = await import(
379
+ '@/services/electron/desktopNotification'
380
+ );
381
+
382
+ await desktopNotificationService.showNotification({
383
+ body: content,
384
+ title: t('notification.finishChatGeneration', { ns: 'electron' }),
385
+ });
386
+ } catch (error) {
387
+ // 静默处理错误,不影响正常流程
388
+ console.error('Desktop notification error:', error);
389
+ }
390
+ }
391
+ }
392
+
393
+ // 6. summary history if context messages is larger than historyCount
394
+ const historyCount = agentChatConfigSelectors.historyCount(agentStoreState);
395
+
396
+ if (
397
+ agentChatConfigSelectors.enableHistoryCount(agentStoreState) &&
398
+ chatConfig.enableCompressHistory &&
399
+ originalMessages.length > historyCount
400
+ ) {
401
+ // after generation: [u1,a1,u2,a2,u3,a3]
402
+ // but the `originalMessages` is still: [u1,a1,u2,a2,u3]
403
+ // So if historyCount=2, we need to summary [u1,a1,u2,a2]
404
+ // because user find UI is [u1,a1,u2,a2 | u3,a3]
405
+ const historyMessages = originalMessages.slice(0, -historyCount + 1);
406
+
407
+ await get().internal_summaryHistory(historyMessages);
408
+ }
409
+ },
410
+ });
@@ -3,10 +3,15 @@ import { StateCreator } from 'zustand/vanilla';
3
3
  import { ChatStore } from '@/store/chat/store';
4
4
 
5
5
  import { AIGenerateAction, generateAIChat } from './generateAIChat';
6
+ import { AIGenerateV2Action, generateAIChatV2 } from './generateAIChatV2';
6
7
  import { ChatMemoryAction, chatMemory } from './memory';
7
8
  import { ChatRAGAction, chatRag } from './rag';
8
9
 
9
- export interface ChatAIChatAction extends ChatRAGAction, ChatMemoryAction, AIGenerateAction {
10
+ export interface ChatAIChatAction
11
+ extends ChatRAGAction,
12
+ ChatMemoryAction,
13
+ AIGenerateAction,
14
+ AIGenerateV2Action {
10
15
  /**/
11
16
  }
12
17
 
@@ -19,4 +24,5 @@ export const chatAiChat: StateCreator<
19
24
  ...chatRag(...params),
20
25
  ...generateAIChat(...params),
21
26
  ...chatMemory(...params),
27
+ ...generateAIChatV2(...params),
22
28
  });
@@ -22,6 +22,7 @@ import {
22
22
  ModelReasoning,
23
23
  } from '@/types/message';
24
24
  import { ChatImageItem } from '@/types/message/image';
25
+ import { UpdateMessageRAGParams } from '@/types/message/rag';
25
26
  import { GroundingSearch } from '@/types/search';
26
27
  import { TraceEventPayloads } from '@/types/trace';
27
28
  import { Action, setNamespace } from '@/utils/storeDebug';
@@ -39,6 +40,7 @@ const SWR_USE_FETCH_MESSAGES = 'SWR_USE_FETCH_MESSAGES';
39
40
  export interface ChatMessageAction {
40
41
  // create
41
42
  addAIMessage: () => Promise<void>;
43
+ addUserMessage: (params: { message: string; fileList?: string[] }) => Promise<void>;
42
44
  // delete
43
45
  /**
44
46
  * clear message on the active session
@@ -59,10 +61,11 @@ export interface ChatMessageAction {
59
61
  ) => SWRResponse<ChatMessage[]>;
60
62
  copyMessage: (id: string, content: string) => Promise<void>;
61
63
  refreshMessages: () => Promise<void>;
62
-
64
+ replaceMessages: (messages: ChatMessage[]) => void;
63
65
  // ========= ↓ Internal Method ↓ ========== //
64
66
  // ========================================== //
65
67
  // ========================================== //
68
+ internal_updateMessageRAG: (id: string, input: UpdateMessageRAGParams) => Promise<void>;
66
69
 
67
70
  /**
68
71
  * update message at the frontend
@@ -213,6 +216,21 @@ export const chatMessage: StateCreator<
213
216
 
214
217
  updateInputMessage('');
215
218
  },
219
+ addUserMessage: async ({ message, fileList }) => {
220
+ const { internal_createMessage, updateInputMessage, activeTopicId, activeId } = get();
221
+ if (!activeId) return;
222
+
223
+ await internal_createMessage({
224
+ content: message,
225
+ files: fileList,
226
+ role: 'user',
227
+ sessionId: activeId,
228
+ // if there is activeTopicId,then add topicId to message
229
+ topicId: activeTopicId,
230
+ });
231
+
232
+ updateInputMessage('');
233
+ },
216
234
  copyMessage: async (id, content) => {
217
235
  await copyToClipboard(content);
218
236
 
@@ -266,6 +284,25 @@ export const chatMessage: StateCreator<
266
284
  refreshMessages: async () => {
267
285
  await mutate([SWR_USE_FETCH_MESSAGES, get().activeId, get().activeTopicId]);
268
286
  },
287
+ replaceMessages: (messages) => {
288
+ set(
289
+ {
290
+ messagesMap: {
291
+ ...get().messagesMap,
292
+ [messageMapKey(get().activeId, get().activeTopicId)]: messages,
293
+ },
294
+ },
295
+ false,
296
+ 'replaceMessages',
297
+ );
298
+ },
299
+
300
+ internal_updateMessageRAG: async (id, data) => {
301
+ const { refreshMessages } = get();
302
+
303
+ await messageService.updateMessageRAG(id, data);
304
+ await refreshMessages();
305
+ },
269
306
 
270
307
  // the internal process method of the AI message
271
308
  internal_dispatchMessage: (payload) => {
@@ -11,6 +11,11 @@ import {
11
11
  import { merge } from '@/utils/merge';
12
12
 
13
13
  interface UpdateMessages {
14
+ type: 'updateMessages';
15
+ value: ChatMessage[];
16
+ }
17
+
18
+ interface UpdateMessage {
14
19
  id: string;
15
20
  type: 'updateMessage';
16
21
  value: Partial<ChatMessage>;
@@ -72,6 +77,7 @@ interface UpdateMessageExtra {
72
77
 
73
78
  export type MessageDispatch =
74
79
  | CreateMessage
80
+ | UpdateMessage
75
81
  | UpdateMessages
76
82
  | UpdatePluginState
77
83
  | UpdateMessageExtra
@@ -194,6 +200,11 @@ export const messagesReducer = (state: ChatMessage[], payload: MessageDispatch):
194
200
  draftState.push({ ...value, createdAt: Date.now(), id, meta: {}, updatedAt: Date.now() });
195
201
  });
196
202
  }
203
+
204
+ case 'updateMessages': {
205
+ return payload.value;
206
+ }
207
+
197
208
  case 'deleteMessage': {
198
209
  return produce(state, (draft) => {
199
210
  const { id } = payload;
@@ -14,12 +14,21 @@ interface UpdateChatTopicAction {
14
14
  value: Partial<ChatTopic>;
15
15
  }
16
16
 
17
+ interface UpdateTopicsAction {
18
+ type: 'updateTopics';
19
+ value: ChatTopic[];
20
+ }
21
+
17
22
  interface DeleteChatTopicAction {
18
23
  id: string;
19
24
  type: 'deleteTopic';
20
25
  }
21
26
 
22
- export type ChatTopicDispatch = AddChatTopicAction | UpdateChatTopicAction | DeleteChatTopicAction;
27
+ export type ChatTopicDispatch =
28
+ | AddChatTopicAction
29
+ | UpdateChatTopicAction
30
+ | DeleteChatTopicAction
31
+ | UpdateTopicsAction;
23
32
 
24
33
  export const topicReducer = (state: ChatTopic[] = [], payload: ChatTopicDispatch): ChatTopic[] => {
25
34
  switch (payload.type) {
@@ -51,6 +60,10 @@ export const topicReducer = (state: ChatTopic[] = [], payload: ChatTopicDispatch
51
60
  });
52
61
  }
53
62
 
63
+ case 'updateTopics': {
64
+ return payload.value;
65
+ }
66
+
54
67
  case 'deleteTopic': {
55
68
  return produce(state, (draftState) => {
56
69
  const topicIndex = draftState.findIndex((topic) => topic.id === payload.id);