@lobehub/chat 1.22.5 → 1.22.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.22.6](https://github.com/lobehub/lobe-chat/compare/v1.22.5...v1.22.6)
6
+
7
+ <sup>Released on **2024-10-13**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix images not go in to chat context.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix images not go in to chat context, closes [#4361](https://github.com/lobehub/lobe-chat/issues/4361) ([f17ab49](https://github.com/lobehub/lobe-chat/commit/f17ab49))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.22.5](https://github.com/lobehub/lobe-chat/compare/v1.22.4...v1.22.5)
6
31
 
7
32
  <sup>Released on **2024-10-13**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.22.5",
3
+ "version": "1.22.6",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -30,7 +30,7 @@ const SkeletonList = memo<SkeletonListProps>(({ mobile }) => {
30
30
  const { cx, styles } = useStyles();
31
31
 
32
32
  return (
33
- <Flexbox gap={24} padding={mobile ? 8 : 12} style={{ marginTop: 24 + (mobile ? 0 : 64) }}>
33
+ <Flexbox gap={24} padding={mobile ? 8 : 12} style={{ marginTop: 24 }}>
34
34
  <Skeleton
35
35
  active
36
36
  avatar={{ size: mobile ? 32 : 40 }}
@@ -24,7 +24,7 @@ import { setNamespace } from '@/utils/storeDebug';
24
24
  import { chatSelectors, topicSelectors } from '../../selectors';
25
25
  import { ChatRAGAction, chatRag } from './actions/rag';
26
26
 
27
- const n = setNamespace('m');
27
+ const n = setNamespace('ai');
28
28
 
29
29
  export interface SendMessageParams {
30
30
  message: string;
@@ -47,17 +47,21 @@ interface ProcessMessageParams {
47
47
  }
48
48
 
49
49
  export interface ChatAIChatAction extends ChatRAGAction {
50
- // create
50
+ /**
51
+ * Sends a new message to the AI chat system
52
+ */
51
53
  sendMessage: (params: SendMessageParams) => Promise<void>;
52
54
  /**
53
- * regenerate message
54
- * trace enabled
55
- * @param id
55
+ * Regenerates a specific message in the chat
56
56
  */
57
57
  regenerateMessage: (id: string) => Promise<void>;
58
-
59
- // delete
58
+ /**
59
+ * Deletes an existing message and generates a new one in its place
60
+ */
60
61
  delAndRegenerateMessage: (id: string) => Promise<void>;
62
+ /**
63
+ * Interrupts the ongoing ai message generation process
64
+ */
61
65
  stopGenerateMessage: () => void;
62
66
 
63
67
  // ========= ↓ Internal Method ↓ ========== //
@@ -65,7 +69,8 @@ export interface ChatAIChatAction extends ChatRAGAction {
65
69
  // ========================================== //
66
70
 
67
71
  /**
68
- * core process of the AI message (include preprocess and postprocess)
72
+ * Executes the core processing logic for AI messages
73
+ * including preprocessing and postprocessing steps
69
74
  */
70
75
  internal_coreProcessMessage: (
71
76
  messages: ChatMessage[],
@@ -73,7 +78,7 @@ export interface ChatAIChatAction extends ChatRAGAction {
73
78
  params?: ProcessMessageParams,
74
79
  ) => Promise<void>;
75
80
  /**
76
- * the method to fetch the AI message
81
+ * Retrieves an AI-generated chat message from the backend service
77
82
  */
78
83
  internal_fetchAIChatMessage: (
79
84
  messages: ChatMessage[],
@@ -83,11 +88,12 @@ export interface ChatAIChatAction extends ChatRAGAction {
83
88
  isFunctionCall: boolean;
84
89
  traceId?: string;
85
90
  }>;
86
-
91
+ /**
92
+ * Resends a specific message, optionally using a trace ID for tracking
93
+ */
87
94
  internal_resendMessage: (id: string, traceId?: string) => Promise<void>;
88
-
89
95
  /**
90
- * method to toggle ai message generating loading
96
+ * Toggles the loading state for AI message generation, managing the UI feedback
91
97
  */
92
98
  internal_toggleChatLoading: (
93
99
  loading: boolean,
@@ -95,7 +101,7 @@ export interface ChatAIChatAction extends ChatRAGAction {
95
101
  action?: string,
96
102
  ) => AbortController | undefined;
97
103
  /**
98
- * method to toggle the tool calling loading state
104
+ * Controls the streaming state of tool calling processes, updating the UI accordingly
99
105
  */
100
106
  internal_toggleToolCallingStreaming: (id: string, streaming: boolean[] | undefined) => void;
101
107
  }
@@ -139,7 +145,7 @@ export const chatAiChat: StateCreator<
139
145
  // if message is empty or no files, then stop
140
146
  if (!message && !hasFile) return;
141
147
 
142
- set({ isCreatingMessage: true }, false, 'creatingMessage/start');
148
+ set({ isCreatingMessage: true }, false, n('creatingMessage/start'));
143
149
 
144
150
  const newMessage: CreateMessageParams = {
145
151
  content: message,
@@ -184,10 +190,7 @@ export const chatAiChat: StateCreator<
184
190
  ...get().messagesMap,
185
191
  [messageMapKey(activeId, topicId)]: get().messagesMap[mapKey],
186
192
  };
187
- set({ messagesMap: newMaps }, false, 'internal_copyMessages');
188
-
189
- // get().internal_dispatchMessage({ type: 'deleteMessage', id: tempMessageId });
190
- get().internal_toggleMessageLoading(false, tempMessageId);
193
+ set({ messagesMap: newMaps }, false, n('moveMessagesToNewTopic'));
191
194
 
192
195
  // make the topic loading
193
196
  get().internal_updateTopicLoading(topicId, true);
@@ -199,9 +202,11 @@ export const chatAiChat: StateCreator<
199
202
 
200
203
  const id = await get().internal_createMessage(newMessage, {
201
204
  tempMessageId,
202
- skipRefresh: !onlyAddUserMessage,
205
+ skipRefresh: !onlyAddUserMessage && newMessage.fileList?.length === 0,
203
206
  });
204
207
 
208
+ if (tempMessageId) get().internal_toggleMessageLoading(false, tempMessageId);
209
+
205
210
  // switch to the new topic if create the new topic
206
211
  if (!!newTopicId) {
207
212
  await get().switchTopic(newTopicId, true);
@@ -228,7 +233,7 @@ export const chatAiChat: StateCreator<
228
233
  ragQuery: get().internal_shouldUseRAG() ? message : undefined,
229
234
  });
230
235
 
231
- set({ isCreatingMessage: false }, false, 'creatingMessage/stop');
236
+ set({ isCreatingMessage: false }, false, n('creatingMessage/stop'));
232
237
 
233
238
  const summaryTitle = async () => {
234
239
  // if autoCreateTopic is false, then stop
@@ -301,6 +301,7 @@ export const chatMessage: StateCreator<
301
301
 
302
302
  const id = await messageService.createMessage(message);
303
303
  if (!context?.skipRefresh) {
304
+ internal_toggleMessageLoading(true, tempId);
304
305
  await refreshMessages();
305
306
  }
306
307
 
@@ -9,7 +9,7 @@ export const preventLeavingFn = (e: BeforeUnloadEvent) => {
9
9
  export const toggleBooleanList = (ids: string[], id: string, loading: boolean) => {
10
10
  return produce(ids, (draft) => {
11
11
  if (loading) {
12
- draft.push(id);
12
+ if (!draft.includes(id)) draft.push(id);
13
13
  } else {
14
14
  const index = draft.indexOf(id);
15
15