@lobehub/lobehub 2.0.0-next.356 → 2.0.0-next.358

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,58 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.358](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.357...v2.0.0-next.358)
6
+
7
+ <sup>Released on **2026-01-23**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **store**: Delete message before regeneration.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **store**: Delete message before regeneration, closes [#11760](https://github.com/lobehub/lobe-chat/issues/11760) ([a8a6300](https://github.com/lobehub/lobe-chat/commit/a8a6300))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 2.0.0-next.357](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.356...v2.0.0-next.357)
31
+
32
+ <sup>Released on **2026-01-23**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **model-runtime**: Handle null content in anthropic message builder.
37
+ - **misc**: Page content switch mismatch.
38
+
39
+ <br/>
40
+
41
+ <details>
42
+ <summary><kbd>Improvements and Fixes</kbd></summary>
43
+
44
+ #### What's fixed
45
+
46
+ - **model-runtime**: Handle null content in anthropic message builder, closes [#11756](https://github.com/lobehub/lobe-chat/issues/11756) ([539753a](https://github.com/lobehub/lobe-chat/commit/539753a))
47
+ - **misc**: Page content switch mismatch, closes [#11758](https://github.com/lobehub/lobe-chat/issues/11758) ([fdc8f95](https://github.com/lobehub/lobe-chat/commit/fdc8f95))
48
+
49
+ </details>
50
+
51
+ <div align="right">
52
+
53
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
54
+
55
+ </div>
56
+
5
57
  ## [Version 2.0.0-next.356](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.355...v2.0.0-next.356)
6
58
 
7
59
  <sup>Released on **2026-01-23**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,18 @@
1
1
  [
2
+ {
3
+ "children": {},
4
+ "date": "2026-01-23",
5
+ "version": "2.0.0-next.358"
6
+ },
7
+ {
8
+ "children": {
9
+ "fixes": [
10
+ "Page content switch mismatch."
11
+ ]
12
+ },
13
+ "date": "2026-01-23",
14
+ "version": "2.0.0-next.357"
15
+ },
2
16
  {
3
17
  "children": {
4
18
  "features": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.356",
3
+ "version": "2.0.0-next.358",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -281,6 +281,62 @@ describe('anthropicHelpers', () => {
281
281
  const result = await buildAnthropicMessage(message);
282
282
  expect(result).toBeUndefined();
283
283
  });
284
+
285
+ it('should handle assistant message with tool_calls but null content', async () => {
286
+ const message: OpenAIChatMessage = {
287
+ content: null as any,
288
+ role: 'assistant',
289
+ tool_calls: [
290
+ {
291
+ id: 'call1',
292
+ type: 'function',
293
+ function: {
294
+ name: 'search_people',
295
+ arguments: '{"location":"Singapore"}',
296
+ },
297
+ },
298
+ ],
299
+ };
300
+ const result = await buildAnthropicMessage(message);
301
+ expect(result!.role).toBe('assistant');
302
+ // null content should be filtered out, only tool_use remains
303
+ expect(result!.content).toEqual([
304
+ {
305
+ id: 'call1',
306
+ input: { location: 'Singapore' },
307
+ name: 'search_people',
308
+ type: 'tool_use',
309
+ },
310
+ ]);
311
+ });
312
+
313
+ it('should handle assistant message with tool_calls but empty string content', async () => {
314
+ const message: OpenAIChatMessage = {
315
+ content: '',
316
+ role: 'assistant',
317
+ tool_calls: [
318
+ {
319
+ id: 'call1',
320
+ type: 'function',
321
+ function: {
322
+ name: 'search_people',
323
+ arguments: '{"location":"Singapore"}',
324
+ },
325
+ },
326
+ ],
327
+ };
328
+ const result = await buildAnthropicMessage(message);
329
+ expect(result!.role).toBe('assistant');
330
+ // empty string content should be filtered out, only tool_use remains
331
+ expect(result!.content).toEqual([
332
+ {
333
+ id: 'call1',
334
+ input: { location: 'Singapore' },
335
+ name: 'search_people',
336
+ type: 'tool_use',
337
+ },
338
+ ]);
339
+ });
284
340
  });
285
341
 
286
342
  describe('buildAnthropicMessages', () => {
@@ -526,6 +582,320 @@ describe('anthropicHelpers', () => {
526
582
  ]);
527
583
  });
528
584
 
585
+ it('should handle tool message with null content', async () => {
586
+ const messages: OpenAIChatMessage[] = [
587
+ {
588
+ content: '搜索人员',
589
+ role: 'user',
590
+ },
591
+ {
592
+ content: '正在搜索...',
593
+ role: 'assistant',
594
+ tool_calls: [
595
+ {
596
+ function: {
597
+ arguments: '{"location": "Singapore"}',
598
+ name: 'search_people',
599
+ },
600
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
601
+ type: 'function',
602
+ },
603
+ ],
604
+ },
605
+ {
606
+ content: null as any,
607
+ name: 'search_people',
608
+ role: 'tool',
609
+ tool_call_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
610
+ },
611
+ ];
612
+
613
+ const contents = await buildAnthropicMessages(messages);
614
+
615
+ expect(contents).toEqual([
616
+ { content: '搜索人员', role: 'user' },
617
+ {
618
+ content: [
619
+ { text: '正在搜索...', type: 'text' },
620
+ {
621
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
622
+ input: { location: 'Singapore' },
623
+ name: 'search_people',
624
+ type: 'tool_use',
625
+ },
626
+ ],
627
+ role: 'assistant',
628
+ },
629
+ {
630
+ content: [
631
+ {
632
+ content: [{ text: '<empty_content>', type: 'text' }],
633
+ tool_use_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
634
+ type: 'tool_result',
635
+ },
636
+ ],
637
+ role: 'user',
638
+ },
639
+ ]);
640
+ });
641
+
642
+ it('should handle tool message with empty string content', async () => {
643
+ const messages: OpenAIChatMessage[] = [
644
+ {
645
+ content: '搜索人员',
646
+ role: 'user',
647
+ },
648
+ {
649
+ content: '正在搜索...',
650
+ role: 'assistant',
651
+ tool_calls: [
652
+ {
653
+ function: {
654
+ arguments: '{"location": "Singapore"}',
655
+ name: 'search_people',
656
+ },
657
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
658
+ type: 'function',
659
+ },
660
+ ],
661
+ },
662
+ {
663
+ content: '',
664
+ name: 'search_people',
665
+ role: 'tool',
666
+ tool_call_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
667
+ },
668
+ ];
669
+
670
+ const contents = await buildAnthropicMessages(messages);
671
+
672
+ expect(contents).toEqual([
673
+ { content: '搜索人员', role: 'user' },
674
+ {
675
+ content: [
676
+ { text: '正在搜索...', type: 'text' },
677
+ {
678
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
679
+ input: { location: 'Singapore' },
680
+ name: 'search_people',
681
+ type: 'tool_use',
682
+ },
683
+ ],
684
+ role: 'assistant',
685
+ },
686
+ {
687
+ content: [
688
+ {
689
+ content: [{ text: '<empty_content>', type: 'text' }],
690
+ tool_use_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
691
+ type: 'tool_result',
692
+ },
693
+ ],
694
+ role: 'user',
695
+ },
696
+ ]);
697
+ });
698
+
699
+ it('should handle tool message with array content', async () => {
700
+ const messages: OpenAIChatMessage[] = [
701
+ {
702
+ content: '搜索人员',
703
+ role: 'user',
704
+ },
705
+ {
706
+ content: '正在搜索...',
707
+ role: 'assistant',
708
+ tool_calls: [
709
+ {
710
+ function: {
711
+ arguments: '{"location": "Singapore"}',
712
+ name: 'search_people',
713
+ },
714
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
715
+ type: 'function',
716
+ },
717
+ ],
718
+ },
719
+ {
720
+ content: [
721
+ { type: 'text', text: 'Found 5 candidates' },
722
+ { type: 'text', text: 'Result details here' },
723
+ ] as any,
724
+ name: 'search_people',
725
+ role: 'tool',
726
+ tool_call_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
727
+ },
728
+ ];
729
+
730
+ const contents = await buildAnthropicMessages(messages);
731
+
732
+ expect(contents).toEqual([
733
+ { content: '搜索人员', role: 'user' },
734
+ {
735
+ content: [
736
+ { text: '正在搜索...', type: 'text' },
737
+ {
738
+ id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
739
+ input: { location: 'Singapore' },
740
+ name: 'search_people',
741
+ type: 'tool_use',
742
+ },
743
+ ],
744
+ role: 'assistant',
745
+ },
746
+ {
747
+ content: [
748
+ {
749
+ content: [
750
+ { type: 'text', text: 'Found 5 candidates' },
751
+ { type: 'text', text: 'Result details here' },
752
+ ],
753
+ tool_use_id: 'toolu_01CnXPcBEqsGGbvRriem3Rth',
754
+ type: 'tool_result',
755
+ },
756
+ ],
757
+ role: 'user',
758
+ },
759
+ ]);
760
+ });
761
+
762
+ it('should handle tool message with array content containing image', async () => {
763
+ vi.mocked(parseDataUri).mockReturnValueOnce({
764
+ mimeType: 'image/png',
765
+ base64: 'screenshotBase64Data',
766
+ type: 'base64',
767
+ });
768
+
769
+ const messages: OpenAIChatMessage[] = [
770
+ {
771
+ content: '截图分析',
772
+ role: 'user',
773
+ },
774
+ {
775
+ content: '正在截图...',
776
+ role: 'assistant',
777
+ tool_calls: [
778
+ {
779
+ function: {
780
+ arguments: '{"url": "https://example.com"}',
781
+ name: 'screenshot',
782
+ },
783
+ id: 'toolu_screenshot_123',
784
+ type: 'function',
785
+ },
786
+ ],
787
+ },
788
+ {
789
+ content: [
790
+ { type: 'text', text: 'Screenshot captured' },
791
+ {
792
+ type: 'image_url',
793
+ image_url: { url: 'data:image/png;base64,screenshotBase64Data' },
794
+ },
795
+ ] as any,
796
+ name: 'screenshot',
797
+ role: 'tool',
798
+ tool_call_id: 'toolu_screenshot_123',
799
+ },
800
+ ];
801
+
802
+ const contents = await buildAnthropicMessages(messages);
803
+
804
+ expect(contents).toEqual([
805
+ { content: '截图分析', role: 'user' },
806
+ {
807
+ content: [
808
+ { text: '正在截图...', type: 'text' },
809
+ {
810
+ id: 'toolu_screenshot_123',
811
+ input: { url: 'https://example.com' },
812
+ name: 'screenshot',
813
+ type: 'tool_use',
814
+ },
815
+ ],
816
+ role: 'assistant',
817
+ },
818
+ {
819
+ content: [
820
+ {
821
+ content: [
822
+ { type: 'text', text: 'Screenshot captured' },
823
+ {
824
+ type: 'image',
825
+ source: {
826
+ type: 'base64',
827
+ media_type: 'image/png',
828
+ data: 'screenshotBase64Data',
829
+ },
830
+ },
831
+ ],
832
+ tool_use_id: 'toolu_screenshot_123',
833
+ type: 'tool_result',
834
+ },
835
+ ],
836
+ role: 'user',
837
+ },
838
+ ]);
839
+ });
840
+
841
+ it('should handle orphan tool message with null content', async () => {
842
+ // Tool message without corresponding assistant tool_call
843
+ const messages: OpenAIChatMessage[] = [
844
+ {
845
+ content: null as any,
846
+ name: 'some_tool',
847
+ role: 'tool',
848
+ tool_call_id: 'orphan_tool_call_id',
849
+ },
850
+ {
851
+ content: 'Continue',
852
+ role: 'user',
853
+ },
854
+ ];
855
+
856
+ const contents = await buildAnthropicMessages(messages);
857
+
858
+ expect(contents).toEqual([
859
+ {
860
+ content: '<empty_content>',
861
+ role: 'user',
862
+ },
863
+ {
864
+ content: 'Continue',
865
+ role: 'user',
866
+ },
867
+ ]);
868
+ });
869
+
870
+ it('should handle orphan tool message with empty string content', async () => {
871
+ // Tool message without corresponding assistant tool_call
872
+ const messages: OpenAIChatMessage[] = [
873
+ {
874
+ content: '',
875
+ name: 'some_tool',
876
+ role: 'tool',
877
+ tool_call_id: 'orphan_tool_call_id',
878
+ },
879
+ {
880
+ content: 'Continue',
881
+ role: 'user',
882
+ },
883
+ ];
884
+
885
+ const contents = await buildAnthropicMessages(messages);
886
+
887
+ expect(contents).toEqual([
888
+ {
889
+ content: '<empty_content>',
890
+ role: 'user',
891
+ },
892
+ {
893
+ content: 'Continue',
894
+ role: 'user',
895
+ },
896
+ ]);
897
+ });
898
+
529
899
  it('should work well starting with tool message', async () => {
530
900
  const messages: OpenAIChatMessage[] = [
531
901
  {
@@ -114,10 +114,13 @@ export const buildAnthropicMessage = async (
114
114
  // if there is tool_calls , we need to covert the tool_calls to tool_use content block
115
115
  // refs: https://docs.anthropic.com/claude/docs/tool-use#tool-use-and-tool-result-content-blocks
116
116
  if (message.tool_calls && message.tool_calls.length > 0) {
117
+ // Handle content: string with text, array, null/undefined/empty -> filter out
117
118
  const rawContent =
118
- typeof content === 'string'
119
- ? ([{ text: message.content, type: 'text' }] as UserMessageContentPart[])
120
- : content;
119
+ typeof content === 'string' && content.trim()
120
+ ? ([{ text: content, type: 'text' }] as UserMessageContentPart[])
121
+ : Array.isArray(content)
122
+ ? content
123
+ : []; // null/undefined/empty string -> empty array (will be filtered)
121
124
 
122
125
  const messageContent = await buildArrayContent(rawContent);
123
126
 
@@ -180,10 +183,17 @@ export const buildAnthropicMessages = async (
180
183
 
181
184
  // refs: https://docs.anthropic.com/claude/docs/tool-use#tool-use-and-tool-result-content-blocks
182
185
  if (message.role === 'tool') {
186
+ // Handle different content types in tool messages
187
+ const toolResultContent = Array.isArray(message.content)
188
+ ? await buildArrayContent(message.content)
189
+ : !message.content
190
+ ? [{ text: '<empty_content>', type: 'text' as const }]
191
+ : [{ text: message.content, type: 'text' as const }];
192
+
183
193
  // 检查这个工具消息是否有对应的 assistant 工具调用
184
194
  if (message.tool_call_id && validToolCallIds.has(message.tool_call_id)) {
185
195
  pendingToolResults.push({
186
- content: [{ text: message.content as string, type: 'text' }],
196
+ content: toolResultContent as Anthropic.ToolResultBlockParam['content'],
187
197
  tool_use_id: message.tool_call_id,
188
198
  type: 'tool_result',
189
199
  });
@@ -198,8 +208,11 @@ export const buildAnthropicMessages = async (
198
208
  }
199
209
  } else {
200
210
  // 如果工具消息没有对应的 assistant 工具调用,则作为普通文本处理
211
+ const fallbackContent = Array.isArray(message.content)
212
+ ? JSON.stringify(message.content)
213
+ : message.content || '<empty_content>';
201
214
  messages.push({
202
- content: message.content as string,
215
+ content: fallbackContent,
203
216
  role: 'user',
204
217
  });
205
218
  }
@@ -462,6 +462,122 @@ describe('Generation Actions', () => {
462
462
  // Should complete operation
463
463
  expect(mockCompleteOperation).toHaveBeenCalledWith('test-op-id');
464
464
  });
465
+
466
+ it('should delete message BEFORE regeneration to prevent message not found issue (LOBE-2533)', async () => {
467
+ // This test verifies the fix for LOBE-2533:
468
+ // When "delete and regenerate" is called, if regeneration happens first,
469
+ // it switches to a new branch, causing the original message to no longer
470
+ // appear in displayMessages. Then deleteMessage cannot find the message
471
+ // and fails silently.
472
+ //
473
+ // The fix: delete first, then regenerate.
474
+
475
+ const callOrder: string[] = [];
476
+
477
+ // Re-setup mock to track call order
478
+ const { useChatStore } = await import('@/store/chat');
479
+ vi.mocked(useChatStore.getState).mockReturnValue({
480
+ messagesMap: {},
481
+ operations: {},
482
+ messageLoadingIds: [],
483
+ cancelOperations: mockCancelOperations,
484
+ cancelOperation: mockCancelOperation,
485
+ deleteMessage: vi.fn().mockImplementation(() => {
486
+ callOrder.push('deleteMessage');
487
+ return Promise.resolve();
488
+ }),
489
+ switchMessageBranch: vi.fn().mockImplementation(() => {
490
+ callOrder.push('switchMessageBranch');
491
+ return Promise.resolve();
492
+ }),
493
+ startOperation: mockStartOperation,
494
+ completeOperation: mockCompleteOperation,
495
+ failOperation: mockFailOperation,
496
+ internal_execAgentRuntime: vi.fn().mockImplementation(() => {
497
+ callOrder.push('internal_execAgentRuntime');
498
+ return Promise.resolve();
499
+ }),
500
+ } as any);
501
+
502
+ const context: ConversationContext = {
503
+ agentId: 'session-1',
504
+ topicId: 'topic-1',
505
+ threadId: null,
506
+ groupId: 'group-1',
507
+ };
508
+
509
+ const store = createStore({ context });
510
+
511
+ // Set displayMessages and dbMessages
512
+ act(() => {
513
+ store.setState({
514
+ displayMessages: [
515
+ { id: 'msg-1', role: 'user', content: 'Hello' },
516
+ { id: 'msg-2', role: 'assistant', content: 'Hi there', parentId: 'msg-1' },
517
+ ],
518
+ dbMessages: [
519
+ { id: 'msg-1', role: 'user', content: 'Hello' },
520
+ { id: 'msg-2', role: 'assistant', content: 'Hi there', parentId: 'msg-1' },
521
+ ],
522
+ } as any);
523
+ });
524
+
525
+ await act(async () => {
526
+ await store.getState().delAndRegenerateMessage('msg-2');
527
+ });
528
+
529
+ // CRITICAL: deleteMessage must be called BEFORE switchMessageBranch and internal_execAgentRuntime
530
+ // If regeneration (which calls switchMessageBranch) happens first, the message
531
+ // won't be found in displayMessages and deletion will fail silently.
532
+ expect(callOrder[0]).toBe('deleteMessage');
533
+ expect(callOrder).toContain('switchMessageBranch');
534
+ expect(callOrder).toContain('internal_execAgentRuntime');
535
+
536
+ // Verify deleteMessage is called before any regeneration-related calls
537
+ const deleteIndex = callOrder.indexOf('deleteMessage');
538
+ const switchIndex = callOrder.indexOf('switchMessageBranch');
539
+ const execIndex = callOrder.indexOf('internal_execAgentRuntime');
540
+
541
+ expect(deleteIndex).toBeLessThan(switchIndex);
542
+ expect(deleteIndex).toBeLessThan(execIndex);
543
+ });
544
+
545
+ it('should not proceed if assistant message has no parentId', async () => {
546
+ const { useChatStore } = await import('@/store/chat');
547
+ vi.mocked(useChatStore.getState).mockReturnValue({
548
+ messagesMap: {},
549
+ operations: {},
550
+ messageLoadingIds: [],
551
+ startOperation: mockStartOperation,
552
+ completeOperation: mockCompleteOperation,
553
+ deleteMessage: mockDeleteMessage,
554
+ } as any);
555
+
556
+ const context: ConversationContext = {
557
+ agentId: 'session-1',
558
+ topicId: null,
559
+ threadId: null,
560
+ };
561
+
562
+ const store = createStore({ context });
563
+
564
+ // Set displayMessages with assistant message that has no parentId
565
+ act(() => {
566
+ store.setState({
567
+ displayMessages: [
568
+ { id: 'msg-1', role: 'assistant', content: 'Hi there' }, // no parentId
569
+ ],
570
+ } as any);
571
+ });
572
+
573
+ await act(async () => {
574
+ await store.getState().delAndRegenerateMessage('msg-1');
575
+ });
576
+
577
+ // Should not proceed - no operation created, no delete called
578
+ expect(mockStartOperation).not.toHaveBeenCalled();
579
+ expect(mockDeleteMessage).not.toHaveBeenCalled();
580
+ });
465
581
  });
466
582
 
467
583
  describe('delAndResendThreadMessage', () => {
@@ -206,18 +206,29 @@ export const generationSlice: StateCreator<
206
206
  },
207
207
 
208
208
  delAndRegenerateMessage: async (messageId: string) => {
209
- const { context } = get();
209
+ const { context, displayMessages } = get();
210
210
  const chatStore = useChatStore.getState();
211
211
 
212
+ // Find the assistant message and get parent user message ID before deletion
213
+ // This is needed because after deletion, we can't find the parent anymore
214
+ const currentMessage = displayMessages.find((c) => c.id === messageId);
215
+ if (!currentMessage) return;
216
+
217
+ const userId = currentMessage.parentId;
218
+ if (!userId) return;
219
+
212
220
  // Create operation to track context (use 'regenerate' type since this is a regenerate action)
213
221
  const { operationId } = chatStore.startOperation({
214
222
  context: { ...context, messageId },
215
223
  type: 'regenerate',
216
224
  });
217
225
 
218
- // Regenerate first, then delete
219
- await get().regenerateAssistantMessage(messageId);
226
+ // IMPORTANT: Delete first, then regenerate (LOBE-2533)
227
+ // If we regenerate first, it switches to a new branch, causing the original
228
+ // message to no longer appear in displayMessages. Then deleteMessage cannot
229
+ // find the message and fails silently.
220
230
  await chatStore.deleteMessage(messageId, { operationId });
231
+ await get().regenerateUserMessage(userId);
221
232
  chatStore.completeOperation(operationId);
222
233
  },
223
234
 
@@ -42,7 +42,6 @@ const useIsEditorInit = (editor: IEditor) => {
42
42
  if (!editor) return;
43
43
 
44
44
  const onInit = () => {
45
- console.log('init: id', editor.getLexicalEditor()?._key);
46
45
  setEditInit(true);
47
46
  };
48
47
  editor.on('initialized', onInit);
@@ -103,13 +102,13 @@ interface DiffAllToolbarProps {
103
102
  const DiffAllToolbar = memo<DiffAllToolbarProps>(({ documentId }) => {
104
103
  const { t } = useTranslation('editor');
105
104
  const isDarkMode = useIsDark();
106
- const [editor, performSave, markDirty] = useDocumentStore((s) => [
105
+ const [storeEditor, performSave, markDirty] = useDocumentStore((s) => [
107
106
  s.editor!,
108
107
  s.performSave,
109
108
  s.markDirty,
110
109
  ]);
111
110
 
112
- const hasPendingDiffs = useEditorHasPendingDiffs(editor);
111
+ const hasPendingDiffs = useEditorHasPendingDiffs(storeEditor);
113
112
 
114
113
  if (!hasPendingDiffs) return null;
115
114
 
@@ -131,7 +130,7 @@ const DiffAllToolbar = memo<DiffAllToolbarProps>(({ documentId }) => {
131
130
  <Space>
132
131
  <Button
133
132
  onClick={async () => {
134
- editor?.dispatchCommand(LITEXML_DIFFNODE_ALL_COMMAND, {
133
+ storeEditor?.dispatchCommand(LITEXML_DIFFNODE_ALL_COMMAND, {
135
134
  action: DiffAction.Reject,
136
135
  });
137
136
  await handleSave();
@@ -145,7 +144,7 @@ const DiffAllToolbar = memo<DiffAllToolbarProps>(({ documentId }) => {
145
144
  <Button
146
145
  color={'default'}
147
146
  onClick={async () => {
148
- editor?.dispatchCommand(LITEXML_DIFFNODE_ALL_COMMAND, {
147
+ storeEditor?.dispatchCommand(LITEXML_DIFFNODE_ALL_COMMAND, {
149
148
  action: DiffAction.Accept,
150
149
  });
151
150
  await handleSave();
@@ -2,7 +2,7 @@
2
2
 
3
3
  import { type IEditor } from '@lobehub/editor';
4
4
  import { Alert, Skeleton } from '@lobehub/ui';
5
- import { memo } from 'react';
5
+ import { memo, useEffect, useRef } from 'react';
6
6
  import { useTranslation } from 'react-i18next';
7
7
  import { createStoreUpdater } from 'zustand-utils';
8
8
 
@@ -82,6 +82,26 @@ const DocumentIdMode = memo<DocumentIdModeProps>(
82
82
  onContentChange?.();
83
83
  };
84
84
 
85
+ const isEditorInitialized = !!editor?.getLexicalEditor();
86
+
87
+ // 追踪已经为哪个 documentId 调用过 onEditorInit
88
+ const initializedDocIdRef = useRef<string | null>(null);
89
+
90
+ // 关键修复:如果 editor 已经初始化,需要主动调用 onEditorInit
91
+ // 因为 onInit 回调只在 editor 首次初始化时触发
92
+ useEffect(() => {
93
+ // 避免重复调用:只在 documentId 变化且 editor 已初始化时调用
94
+ if (
95
+ editor &&
96
+ isEditorInitialized &&
97
+ !isLoading &&
98
+ initializedDocIdRef.current !== documentId
99
+ ) {
100
+ initializedDocIdRef.current = documentId;
101
+ onEditorInit(editor);
102
+ }
103
+ }, [documentId, editor, isEditorInitialized, isLoading, onEditorInit]);
104
+
85
105
  // Show loading state
86
106
  if (isLoading) {
87
107
  return <EditorSkeleton />;
@@ -194,6 +194,7 @@ export const createDocumentSlice: StateCreator<
194
194
  // Check if this response is still for the current active document
195
195
  // This prevents race conditions when quickly switching between documents
196
196
  const currentActiveId = get().activeDocumentId;
197
+
197
198
  if (currentActiveId && currentActiveId !== documentId) {
198
199
  // User has already switched to another document, discard this stale response
199
200
  return;