@lobehub/lobehub 2.0.0-next.84 → 2.0.0-next.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/apps/desktop/src/main/modules/networkProxy/dispatcher.ts +16 -16
  3. package/apps/desktop/src/main/modules/networkProxy/tester.ts +11 -11
  4. package/apps/desktop/src/main/modules/networkProxy/urlBuilder.ts +3 -3
  5. package/apps/desktop/src/main/modules/networkProxy/validator.ts +10 -10
  6. package/changelog/v1.json +18 -0
  7. package/package.json +1 -1
  8. package/packages/agent-runtime/src/core/runtime.ts +36 -1
  9. package/packages/agent-runtime/src/types/event.ts +1 -0
  10. package/packages/agent-runtime/src/types/generalAgent.ts +16 -0
  11. package/packages/agent-runtime/src/types/instruction.ts +30 -0
  12. package/packages/agent-runtime/src/types/runtime.ts +7 -0
  13. package/packages/types/src/message/common/metadata.ts +3 -0
  14. package/packages/types/src/message/common/tools.ts +2 -2
  15. package/packages/types/src/tool/search/index.ts +8 -2
  16. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/V1Mobile/index.tsx +2 -2
  17. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/V1Mobile/useSend.ts +7 -2
  18. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/useSend.ts +15 -14
  19. package/src/app/[variants]/(main)/chat/session/features/SessionListContent/List/Item/index.tsx +2 -2
  20. package/src/app/[variants]/(main)/discover/(list)/features/Pagination.tsx +1 -1
  21. package/src/features/ChatInput/ActionBar/STT/browser.tsx +2 -2
  22. package/src/features/ChatInput/ActionBar/STT/openai.tsx +2 -2
  23. package/src/features/Conversation/Messages/Group/Tool/Inspector/index.tsx +1 -1
  24. package/src/features/Conversation/Messages/User/index.tsx +3 -3
  25. package/src/features/Conversation/Messages/index.tsx +3 -3
  26. package/src/features/Conversation/components/AutoScroll.tsx +2 -2
  27. package/src/services/search.ts +2 -2
  28. package/src/store/chat/agents/GeneralChatAgent.ts +98 -0
  29. package/src/store/chat/agents/__tests__/GeneralChatAgent.test.ts +366 -0
  30. package/src/store/chat/agents/__tests__/createAgentExecutors/call-llm.test.ts +1217 -0
  31. package/src/store/chat/agents/__tests__/createAgentExecutors/call-tool.test.ts +1976 -0
  32. package/src/store/chat/agents/__tests__/createAgentExecutors/finish.test.ts +453 -0
  33. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/index.ts +4 -0
  34. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockInstructions.ts +126 -0
  35. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockMessages.ts +94 -0
  36. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockOperations.ts +96 -0
  37. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockStore.ts +138 -0
  38. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/assertions.ts +185 -0
  39. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/index.ts +3 -0
  40. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/operationTestUtils.ts +94 -0
  41. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/testExecutor.ts +139 -0
  42. package/src/store/chat/agents/__tests__/createAgentExecutors/request-human-approve.test.ts +545 -0
  43. package/src/store/chat/agents/__tests__/createAgentExecutors/resolve-aborted-tools.test.ts +686 -0
  44. package/src/store/chat/agents/createAgentExecutors.ts +313 -80
  45. package/src/store/chat/selectors.ts +1 -0
  46. package/src/store/chat/slices/aiChat/__tests__/ai-chat.integration.test.ts +667 -0
  47. package/src/store/chat/slices/aiChat/actions/__tests__/cancel-functionality.test.ts +137 -27
  48. package/src/store/chat/slices/aiChat/actions/__tests__/conversationControl.test.ts +163 -125
  49. package/src/store/chat/slices/aiChat/actions/__tests__/conversationLifecycle.test.ts +12 -2
  50. package/src/store/chat/slices/aiChat/actions/__tests__/fixtures.ts +0 -2
  51. package/src/store/chat/slices/aiChat/actions/__tests__/helpers.ts +0 -2
  52. package/src/store/chat/slices/aiChat/actions/__tests__/streamingExecutor.test.ts +286 -19
  53. package/src/store/chat/slices/aiChat/actions/__tests__/streamingStates.test.ts +0 -112
  54. package/src/store/chat/slices/aiChat/actions/conversationControl.ts +42 -99
  55. package/src/store/chat/slices/aiChat/actions/conversationLifecycle.ts +90 -57
  56. package/src/store/chat/slices/aiChat/actions/generateAIGroupChat.ts +5 -25
  57. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +220 -98
  58. package/src/store/chat/slices/aiChat/actions/streamingStates.ts +0 -34
  59. package/src/store/chat/slices/aiChat/initialState.ts +0 -28
  60. package/src/store/chat/slices/aiChat/selectors.test.ts +280 -0
  61. package/src/store/chat/slices/aiChat/selectors.ts +31 -7
  62. package/src/store/chat/slices/builtinTool/actions/__tests__/localSystem.test.ts +21 -30
  63. package/src/store/chat/slices/builtinTool/actions/__tests__/search.test.ts +29 -49
  64. package/src/store/chat/slices/builtinTool/actions/interpreter.ts +83 -48
  65. package/src/store/chat/slices/builtinTool/actions/localSystem.ts +78 -28
  66. package/src/store/chat/slices/builtinTool/actions/search.ts +146 -59
  67. package/src/store/chat/slices/builtinTool/selectors.test.ts +258 -0
  68. package/src/store/chat/slices/builtinTool/selectors.ts +25 -4
  69. package/src/store/chat/slices/message/action.test.ts +134 -16
  70. package/src/store/chat/slices/message/actions/internals.ts +33 -7
  71. package/src/store/chat/slices/message/actions/optimisticUpdate.ts +85 -52
  72. package/src/store/chat/slices/message/initialState.ts +0 -10
  73. package/src/store/chat/slices/message/selectors/messageState.ts +34 -12
  74. package/src/store/chat/slices/operation/__tests__/actions.test.ts +712 -16
  75. package/src/store/chat/slices/operation/__tests__/integration.test.ts +342 -0
  76. package/src/store/chat/slices/operation/__tests__/selectors.test.ts +257 -17
  77. package/src/store/chat/slices/operation/actions.ts +218 -11
  78. package/src/store/chat/slices/operation/selectors.ts +135 -6
  79. package/src/store/chat/slices/operation/types.ts +29 -3
  80. package/src/store/chat/slices/plugin/action.test.ts +30 -322
  81. package/src/store/chat/slices/plugin/actions/internals.ts +0 -14
  82. package/src/store/chat/slices/plugin/actions/optimisticUpdate.ts +21 -19
  83. package/src/store/chat/slices/plugin/actions/pluginTypes.ts +45 -27
  84. package/src/store/chat/slices/plugin/actions/publicApi.ts +3 -4
  85. package/src/store/chat/slices/plugin/actions/workflow.ts +0 -55
  86. package/src/store/chat/slices/thread/selectors/index.ts +4 -2
  87. package/src/store/chat/slices/translate/action.ts +54 -41
  88. package/src/tools/web-browsing/ExecutionRuntime/index.ts +5 -2
  89. package/src/tools/web-browsing/Portal/Search/Footer.tsx +11 -9
@@ -109,8 +109,6 @@ export const resetTestEnvironment = () => {
109
109
  {
110
110
  activeId: TEST_IDS.SESSION_ID,
111
111
  activeTopicId: TEST_IDS.TOPIC_ID,
112
- chatLoadingIds: [],
113
- chatLoadingIdsAbortController: undefined,
114
112
  messagesMap: {},
115
113
  toolCallingStreamIds: {},
116
114
  },
@@ -68,7 +68,7 @@ describe('StreamingExecutor actions', () => {
68
68
  await onErrorHandle?.({ type: 'InvalidProviderAPIKey', message: 'Network error' } as any);
69
69
  });
70
70
 
71
- const updateMessageErrorSpy = vi.spyOn(messageService, 'updateMessageError');
71
+ const updateMessageSpy = vi.spyOn(messageService, 'updateMessage');
72
72
 
73
73
  await act(async () => {
74
74
  await result.current.internal_fetchAIChatMessage({
@@ -79,12 +79,14 @@ describe('StreamingExecutor actions', () => {
79
79
  });
80
80
  });
81
81
 
82
- expect(updateMessageErrorSpy).toHaveBeenCalledWith(
82
+ expect(updateMessageSpy).toHaveBeenCalledWith(
83
83
  TEST_IDS.ASSISTANT_MESSAGE_ID,
84
- expect.objectContaining({ type: 'InvalidProviderAPIKey' }),
84
+ expect.objectContaining({
85
+ error: expect.objectContaining({ type: 'InvalidProviderAPIKey' }),
86
+ }),
85
87
  expect.objectContaining({
86
88
  sessionId: TEST_IDS.SESSION_ID,
87
- topicId: undefined,
89
+ topicId: TEST_IDS.TOPIC_ID,
88
90
  }),
89
91
  );
90
92
 
@@ -130,6 +132,17 @@ describe('StreamingExecutor actions', () => {
130
132
  const messages = [createMockMessage({ role: 'user' })];
131
133
  const dispatchSpy = vi.spyOn(result.current, 'internal_dispatchMessage');
132
134
 
135
+ // Create operation for this test
136
+ const { operationId } = result.current.startOperation({
137
+ type: 'execAgentRuntime',
138
+ context: {
139
+ sessionId: TEST_IDS.SESSION_ID,
140
+ topicId: null,
141
+ messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
142
+ },
143
+ label: 'Test AI Generation',
144
+ });
145
+
133
146
  const streamSpy = vi
134
147
  .spyOn(chatService, 'createAssistantMessageStream')
135
148
  .mockImplementation(async ({ onMessageHandle, onFinish }) => {
@@ -144,6 +157,7 @@ describe('StreamingExecutor actions', () => {
144
157
  messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
145
158
  model: 'gpt-4o-mini',
146
159
  provider: 'openai',
160
+ operationId,
147
161
  });
148
162
  });
149
163
 
@@ -154,7 +168,7 @@ describe('StreamingExecutor actions', () => {
154
168
  value: expect.objectContaining({ content: 'Hello' }),
155
169
  }),
156
170
  expect.objectContaining({
157
- sessionId: expect.any(String),
171
+ operationId: expect.any(String),
158
172
  }),
159
173
  );
160
174
 
@@ -166,6 +180,17 @@ describe('StreamingExecutor actions', () => {
166
180
  const messages = [createMockMessage({ role: 'user' })];
167
181
  const dispatchSpy = vi.spyOn(result.current, 'internal_dispatchMessage');
168
182
 
183
+ // Create operation for this test
184
+ const { operationId } = result.current.startOperation({
185
+ type: 'execAgentRuntime',
186
+ context: {
187
+ sessionId: TEST_IDS.SESSION_ID,
188
+ topicId: null,
189
+ messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
190
+ },
191
+ label: 'Test AI Generation',
192
+ });
193
+
169
194
  const streamSpy = vi
170
195
  .spyOn(chatService, 'createAssistantMessageStream')
171
196
  .mockImplementation(async ({ onMessageHandle, onFinish }) => {
@@ -180,6 +205,7 @@ describe('StreamingExecutor actions', () => {
180
205
  messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
181
206
  model: 'gpt-4o-mini',
182
207
  provider: 'openai',
208
+ operationId,
183
209
  });
184
210
  });
185
211
 
@@ -190,7 +216,7 @@ describe('StreamingExecutor actions', () => {
190
216
  value: expect.objectContaining({ reasoning: { content: 'Thinking...' } }),
191
217
  }),
192
218
  expect.objectContaining({
193
- sessionId: expect.any(String),
219
+ operationId: expect.any(String),
194
220
  }),
195
221
  );
196
222
 
@@ -236,6 +262,17 @@ describe('StreamingExecutor actions', () => {
236
262
  const messages = [createMockMessage({ role: 'user' })];
237
263
  const dispatchSpy = vi.spyOn(result.current, 'internal_dispatchMessage');
238
264
 
265
+ // Create operation for this test
266
+ const { operationId } = result.current.startOperation({
267
+ type: 'execAgentRuntime',
268
+ context: {
269
+ sessionId: TEST_IDS.SESSION_ID,
270
+ topicId: null,
271
+ messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
272
+ },
273
+ label: 'Test AI Generation',
274
+ });
275
+
239
276
  const streamSpy = vi
240
277
  .spyOn(chatService, 'createAssistantMessageStream')
241
278
  .mockImplementation(async ({ onMessageHandle, onFinish }) => {
@@ -255,6 +292,7 @@ describe('StreamingExecutor actions', () => {
255
292
  messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
256
293
  model: 'gpt-4o-mini',
257
294
  provider: 'openai',
295
+ operationId,
258
296
  });
259
297
  });
260
298
 
@@ -269,7 +307,7 @@ describe('StreamingExecutor actions', () => {
269
307
  }),
270
308
  }),
271
309
  expect.objectContaining({
272
- sessionId: expect.any(String),
310
+ operationId: expect.any(String),
273
311
  }),
274
312
  );
275
313
 
@@ -281,6 +319,17 @@ describe('StreamingExecutor actions', () => {
281
319
  const messages = [createMockMessage({ role: 'user' })];
282
320
  const dispatchSpy = vi.spyOn(result.current, 'internal_dispatchMessage');
283
321
 
322
+ // Create operation for this test
323
+ const { operationId } = result.current.startOperation({
324
+ type: 'execAgentRuntime',
325
+ context: {
326
+ sessionId: TEST_IDS.SESSION_ID,
327
+ topicId: null,
328
+ messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
329
+ },
330
+ label: 'Test AI Generation',
331
+ });
332
+
284
333
  const streamSpy = vi
285
334
  .spyOn(chatService, 'createAssistantMessageStream')
286
335
  .mockImplementation(async ({ onMessageHandle, onFinish }) => {
@@ -298,6 +347,7 @@ describe('StreamingExecutor actions', () => {
298
347
  messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
299
348
  model: 'gpt-4o-mini',
300
349
  provider: 'openai',
350
+ operationId,
301
351
  });
302
352
  });
303
353
 
@@ -310,7 +360,7 @@ describe('StreamingExecutor actions', () => {
310
360
  }),
311
361
  }),
312
362
  expect.objectContaining({
313
- sessionId: expect.any(String),
363
+ operationId: expect.any(String),
314
364
  }),
315
365
  );
316
366
 
@@ -370,7 +420,7 @@ describe('StreamingExecutor actions', () => {
370
420
  expect.objectContaining({ traceId }),
371
421
  expect.objectContaining({
372
422
  sessionId: expect.any(String),
373
- topicId: undefined,
423
+ topicId: expect.any(String),
374
424
  }),
375
425
  );
376
426
 
@@ -394,7 +444,11 @@ describe('StreamingExecutor actions', () => {
394
444
  } as UIChatMessage;
395
445
  const messages = [userMessage];
396
446
 
397
- const streamSpy = vi.spyOn(chatService, 'createAssistantMessageStream');
447
+ const streamSpy = vi
448
+ .spyOn(chatService, 'createAssistantMessageStream')
449
+ .mockImplementation(async ({ onFinish }) => {
450
+ await onFinish?.(TEST_CONTENT.AI_RESPONSE, {} as any);
451
+ });
398
452
 
399
453
  await act(async () => {
400
454
  await result.current.internal_execAgentRuntime({
@@ -404,8 +458,215 @@ describe('StreamingExecutor actions', () => {
404
458
  });
405
459
  });
406
460
 
461
+ // Verify agent runtime executed successfully
407
462
  expect(streamSpy).toHaveBeenCalled();
408
- expect(result.current.refreshMessages).toHaveBeenCalled();
463
+
464
+ // Verify operation was completed
465
+ const operations = Object.values(result.current.operations);
466
+ const execOperation = operations.find((op) => op.type === 'execAgentRuntime');
467
+ expect(execOperation?.status).toBe('completed');
468
+
469
+ streamSpy.mockRestore();
470
+ });
471
+
472
+ it('should stop agent runtime loop when operation is cancelled before step execution', async () => {
473
+ act(() => {
474
+ useChatStore.setState({ internal_execAgentRuntime: realExecAgentRuntime });
475
+ });
476
+
477
+ const { result } = renderHook(() => useChatStore());
478
+ const userMessage = {
479
+ id: TEST_IDS.USER_MESSAGE_ID,
480
+ role: 'user',
481
+ content: TEST_CONTENT.USER_MESSAGE,
482
+ sessionId: TEST_IDS.SESSION_ID,
483
+ topicId: TEST_IDS.TOPIC_ID,
484
+ } as UIChatMessage;
485
+
486
+ let streamCallCount = 0;
487
+ let cancelDuringFirstCall = false;
488
+ const streamSpy = vi
489
+ .spyOn(chatService, 'createAssistantMessageStream')
490
+ .mockImplementation(async ({ onFinish }) => {
491
+ streamCallCount++;
492
+
493
+ // Cancel during the first LLM call to simulate mid-execution cancellation
494
+ if (streamCallCount === 1) {
495
+ const operations = Object.values(result.current.operations);
496
+ const execOperation = operations.find((op) => op.type === 'execAgentRuntime');
497
+ if (execOperation) {
498
+ act(() => {
499
+ result.current.cancelOperation(execOperation.id, 'user_cancelled');
500
+ });
501
+ cancelDuringFirstCall = true;
502
+ }
503
+ }
504
+
505
+ await onFinish?.(TEST_CONTENT.AI_RESPONSE, {
506
+ toolCalls: [
507
+ { id: 'tool-1', type: 'function', function: { name: 'test', arguments: '{}' } },
508
+ ],
509
+ } as any);
510
+ });
511
+
512
+ await act(async () => {
513
+ await result.current.internal_execAgentRuntime({
514
+ messages: [userMessage],
515
+ parentMessageId: userMessage.id,
516
+ parentMessageType: 'user',
517
+ });
518
+ });
519
+
520
+ // Verify cancellation happened during execution
521
+ expect(cancelDuringFirstCall).toBe(true);
522
+ // The loop should stop after first call, not continue to second LLM call after tool execution
523
+ expect(streamCallCount).toBe(1);
524
+
525
+ streamSpy.mockRestore();
526
+ });
527
+
528
+ it('should stop agent runtime loop when operation is cancelled after step completion', async () => {
529
+ act(() => {
530
+ useChatStore.setState({ internal_execAgentRuntime: realExecAgentRuntime });
531
+ });
532
+
533
+ const { result } = renderHook(() => useChatStore());
534
+ const userMessage = {
535
+ id: TEST_IDS.USER_MESSAGE_ID,
536
+ role: 'user',
537
+ content: TEST_CONTENT.USER_MESSAGE,
538
+ sessionId: TEST_IDS.SESSION_ID,
539
+ topicId: TEST_IDS.TOPIC_ID,
540
+ } as UIChatMessage;
541
+
542
+ let streamCallCount = 0;
543
+ let cancelledAfterStep = false;
544
+
545
+ const streamSpy = vi
546
+ .spyOn(chatService, 'createAssistantMessageStream')
547
+ .mockImplementation(async ({ onFinish }) => {
548
+ streamCallCount++;
549
+
550
+ // First call - LLM returns tool calls
551
+ if (streamCallCount === 1) {
552
+ await onFinish?.(TEST_CONTENT.AI_RESPONSE, {
553
+ toolCalls: [
554
+ { id: 'tool-1', type: 'function', function: { name: 'test', arguments: '{}' } },
555
+ ],
556
+ } as any);
557
+
558
+ // Cancel immediately after LLM step completes
559
+ // This triggers the after-step cancellation check
560
+ await new Promise((resolve) => setTimeout(resolve, 20));
561
+ const operations = Object.values(result.current.operations);
562
+ const execOperation = operations.find((op) => op.type === 'execAgentRuntime');
563
+ if (execOperation && execOperation.status === 'running') {
564
+ act(() => {
565
+ result.current.cancelOperation(execOperation.id, 'user_cancelled');
566
+ });
567
+ cancelledAfterStep = true;
568
+ }
569
+ }
570
+ });
571
+
572
+ await act(async () => {
573
+ await result.current.internal_execAgentRuntime({
574
+ messages: [userMessage],
575
+ parentMessageId: userMessage.id,
576
+ parentMessageType: 'user',
577
+ });
578
+ });
579
+
580
+ // Verify cancellation happened after step completion
581
+ expect(cancelledAfterStep).toBe(true);
582
+
583
+ // Verify that only one LLM call was made (no tool execution happened)
584
+ expect(streamCallCount).toBe(1);
585
+
586
+ // Verify the execution stopped and didn't proceed to tool calling
587
+ const operations = Object.values(result.current.operations);
588
+ const toolOperations = operations.filter((op) => op.type === 'toolCalling');
589
+
590
+ // If any tool operations were started, they should have been cancelled
591
+ if (toolOperations.length > 0) {
592
+ expect(toolOperations.every((op) => op.status === 'cancelled')).toBe(true);
593
+ }
594
+
595
+ streamSpy.mockRestore();
596
+ });
597
+
598
+ it('should resolve aborted tools when cancelled after LLM returns tool calls', async () => {
599
+ act(() => {
600
+ useChatStore.setState({ internal_execAgentRuntime: realExecAgentRuntime });
601
+ });
602
+
603
+ const { result } = renderHook(() => useChatStore());
604
+ const userMessage = {
605
+ id: TEST_IDS.USER_MESSAGE_ID,
606
+ role: 'user',
607
+ content: TEST_CONTENT.USER_MESSAGE,
608
+ sessionId: TEST_IDS.SESSION_ID,
609
+ topicId: TEST_IDS.TOPIC_ID,
610
+ } as UIChatMessage;
611
+
612
+ let cancelledAfterLLM = false;
613
+ let streamCallCount = 0;
614
+
615
+ const streamSpy = vi
616
+ .spyOn(chatService, 'createAssistantMessageStream')
617
+ .mockImplementation(async ({ onFinish }) => {
618
+ streamCallCount++;
619
+
620
+ // First call - LLM returns with tool calls
621
+ if (streamCallCount === 1) {
622
+ await onFinish?.(TEST_CONTENT.AI_RESPONSE, {
623
+ toolCalls: [
624
+ {
625
+ id: 'tool-1',
626
+ type: 'function',
627
+ function: { name: 'weatherQuery', arguments: '{"city":"Beijing"}' },
628
+ },
629
+ {
630
+ id: 'tool-2',
631
+ type: 'function',
632
+ function: { name: 'calculator', arguments: '{"expression":"1+1"}' },
633
+ },
634
+ ],
635
+ } as any);
636
+
637
+ // User cancels after LLM completes but before tool execution
638
+ await new Promise((resolve) => setTimeout(resolve, 20));
639
+ const operations = Object.values(result.current.operations);
640
+ const execOperation = operations.find((op) => op.type === 'execAgentRuntime');
641
+ if (execOperation && execOperation.status === 'running') {
642
+ act(() => {
643
+ result.current.cancelOperation(execOperation.id, 'user_cancelled');
644
+ });
645
+ cancelledAfterLLM = true;
646
+ }
647
+ }
648
+ });
649
+
650
+ await act(async () => {
651
+ await result.current.internal_execAgentRuntime({
652
+ messages: [userMessage],
653
+ parentMessageId: userMessage.id,
654
+ parentMessageType: 'user',
655
+ });
656
+ });
657
+
658
+ // Verify cancellation happened after LLM call
659
+ expect(cancelledAfterLLM).toBe(true);
660
+
661
+ // Verify only one LLM call was made (no tool execution happened)
662
+ expect(streamCallCount).toBe(1);
663
+
664
+ // Verify the agent runtime completed (not just cancelled mid-flight)
665
+ const operations = Object.values(result.current.operations);
666
+ const execOperation = operations.find((op) => op.type === 'execAgentRuntime');
667
+ expect(execOperation?.status).toBe('completed');
668
+
669
+ streamSpy.mockRestore();
409
670
  });
410
671
 
411
672
  it('should use provided sessionId/topicId for trace parameters', async () => {
@@ -538,16 +799,24 @@ describe('StreamingExecutor actions', () => {
538
799
  await onFinish?.(TEST_CONTENT.AI_RESPONSE, {});
539
800
  });
540
801
 
802
+ // Create operation with specific context
803
+ const { operationId } = result.current.startOperation({
804
+ type: 'execAgentRuntime',
805
+ context: {
806
+ sessionId: contextSessionId,
807
+ topicId: contextTopicId,
808
+ messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
809
+ },
810
+ label: 'Test AI Generation',
811
+ });
812
+
541
813
  await act(async () => {
542
814
  await result.current.internal_fetchAIChatMessage({
543
815
  messages,
544
816
  messageId: TEST_IDS.ASSISTANT_MESSAGE_ID,
545
817
  model: 'gpt-4o-mini',
546
818
  provider: 'openai',
547
- params: {
548
- sessionId: contextSessionId,
549
- topicId: contextTopicId,
550
- },
819
+ operationId,
551
820
  });
552
821
  });
553
822
 
@@ -556,8 +825,7 @@ describe('StreamingExecutor actions', () => {
556
825
  TEST_CONTENT.AI_RESPONSE,
557
826
  expect.any(Object),
558
827
  {
559
- sessionId: contextSessionId,
560
- topicId: contextTopicId,
828
+ operationId: expect.any(String),
561
829
  },
562
830
  );
563
831
 
@@ -598,8 +866,7 @@ describe('StreamingExecutor actions', () => {
598
866
  TEST_CONTENT.AI_RESPONSE,
599
867
  expect.any(Object),
600
868
  {
601
- sessionId: 'active-session',
602
- topicId: undefined,
869
+ operationId: undefined,
603
870
  },
604
871
  );
605
872
 
@@ -17,68 +17,6 @@ afterEach(() => {
17
17
  });
18
18
 
19
19
  describe('StreamingStates actions', () => {
20
- describe('internal_toggleChatLoading', () => {
21
- it('should enable loading state with new abort controller', () => {
22
- const { result } = renderHook(() => useChatStore());
23
-
24
- act(() => {
25
- result.current.internal_toggleChatLoading(true, TEST_IDS.MESSAGE_ID, 'test-action');
26
- });
27
-
28
- const state = useChatStore.getState();
29
- expect(state.chatLoadingIdsAbortController).toBeInstanceOf(AbortController);
30
- expect(state.chatLoadingIds).toEqual([TEST_IDS.MESSAGE_ID]);
31
- });
32
-
33
- it('should disable loading state and clear abort controller', () => {
34
- const { result } = renderHook(() => useChatStore());
35
-
36
- act(() => {
37
- result.current.internal_toggleChatLoading(true, TEST_IDS.MESSAGE_ID, 'start');
38
- result.current.internal_toggleChatLoading(false, undefined, 'stop');
39
- });
40
-
41
- const state = useChatStore.getState();
42
- expect(state.chatLoadingIdsAbortController).toBeUndefined();
43
- expect(state.chatLoadingIds).toEqual([]);
44
- });
45
-
46
- it('should manage beforeunload event listener', () => {
47
- const { result } = renderHook(() => useChatStore());
48
- const addListenerSpy = vi.spyOn(window, 'addEventListener');
49
- const removeListenerSpy = vi.spyOn(window, 'removeEventListener');
50
-
51
- act(() => {
52
- result.current.internal_toggleChatLoading(true, TEST_IDS.MESSAGE_ID, 'start');
53
- });
54
-
55
- expect(addListenerSpy).toHaveBeenCalledWith('beforeunload', expect.any(Function));
56
-
57
- act(() => {
58
- result.current.internal_toggleChatLoading(false, undefined, 'stop');
59
- });
60
-
61
- expect(removeListenerSpy).toHaveBeenCalledWith('beforeunload', expect.any(Function));
62
- });
63
-
64
- it('should reuse existing abort controller', () => {
65
- const existingController = new AbortController();
66
-
67
- act(() => {
68
- useChatStore.setState({ chatLoadingIdsAbortController: existingController });
69
- });
70
-
71
- const { result } = renderHook(() => useChatStore());
72
-
73
- act(() => {
74
- result.current.internal_toggleChatLoading(true, TEST_IDS.MESSAGE_ID, 'test');
75
- });
76
-
77
- const state = useChatStore.getState();
78
- expect(state.chatLoadingIdsAbortController).toStrictEqual(existingController);
79
- });
80
- });
81
-
82
20
  describe('internal_toggleToolCallingStreaming', () => {
83
21
  it('should track tool calling stream status', () => {
84
22
  const { result } = renderHook(() => useChatStore());
@@ -126,54 +64,4 @@ describe('StreamingStates actions', () => {
126
64
  expect(state.searchWorkflowLoadingIds).toEqual([]);
127
65
  });
128
66
  });
129
-
130
- describe('internal_toggleChatReasoning', () => {
131
- it('should enable reasoning loading state', () => {
132
- const { result } = renderHook(() => useChatStore());
133
-
134
- act(() => {
135
- result.current.internal_toggleChatReasoning(true, TEST_IDS.MESSAGE_ID, 'test-action');
136
- });
137
-
138
- const state = useChatStore.getState();
139
- expect(state.reasoningLoadingIds).toEqual([TEST_IDS.MESSAGE_ID]);
140
- });
141
-
142
- it('should disable reasoning loading state', () => {
143
- const { result } = renderHook(() => useChatStore());
144
-
145
- act(() => {
146
- result.current.internal_toggleChatReasoning(true, TEST_IDS.MESSAGE_ID, 'start');
147
- result.current.internal_toggleChatReasoning(false, TEST_IDS.MESSAGE_ID, 'stop');
148
- });
149
-
150
- const state = useChatStore.getState();
151
- expect(state.reasoningLoadingIds).toEqual([]);
152
- });
153
- });
154
-
155
- describe('internal_toggleMessageInToolsCalling', () => {
156
- it('should enable tools calling state', () => {
157
- const { result } = renderHook(() => useChatStore());
158
-
159
- act(() => {
160
- result.current.internal_toggleMessageInToolsCalling(true, TEST_IDS.MESSAGE_ID);
161
- });
162
-
163
- const state = useChatStore.getState();
164
- expect(state.messageInToolsCallingIds).toEqual([TEST_IDS.MESSAGE_ID]);
165
- });
166
-
167
- it('should disable tools calling state', () => {
168
- const { result } = renderHook(() => useChatStore());
169
-
170
- act(() => {
171
- result.current.internal_toggleMessageInToolsCalling(true, TEST_IDS.MESSAGE_ID);
172
- result.current.internal_toggleMessageInToolsCalling(false, TEST_IDS.MESSAGE_ID);
173
- });
174
-
175
- const state = useChatStore.getState();
176
- expect(state.messageInToolsCallingIds).toEqual([]);
177
- });
178
- });
179
67
  });