@lobehub/lobehub 2.0.0-next.84 → 2.0.0-next.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/apps/desktop/src/main/modules/networkProxy/dispatcher.ts +16 -16
  3. package/apps/desktop/src/main/modules/networkProxy/tester.ts +11 -11
  4. package/apps/desktop/src/main/modules/networkProxy/urlBuilder.ts +3 -3
  5. package/apps/desktop/src/main/modules/networkProxy/validator.ts +10 -10
  6. package/changelog/v1.json +18 -0
  7. package/package.json +1 -1
  8. package/packages/agent-runtime/src/core/runtime.ts +36 -1
  9. package/packages/agent-runtime/src/types/event.ts +1 -0
  10. package/packages/agent-runtime/src/types/generalAgent.ts +16 -0
  11. package/packages/agent-runtime/src/types/instruction.ts +30 -0
  12. package/packages/agent-runtime/src/types/runtime.ts +7 -0
  13. package/packages/types/src/message/common/metadata.ts +3 -0
  14. package/packages/types/src/message/common/tools.ts +2 -2
  15. package/packages/types/src/tool/search/index.ts +8 -2
  16. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/V1Mobile/index.tsx +2 -2
  17. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/V1Mobile/useSend.ts +7 -2
  18. package/src/app/[variants]/(main)/chat/components/conversation/features/ChatInput/useSend.ts +15 -14
  19. package/src/app/[variants]/(main)/chat/session/features/SessionListContent/List/Item/index.tsx +2 -2
  20. package/src/app/[variants]/(main)/discover/(list)/features/Pagination.tsx +1 -1
  21. package/src/features/ChatInput/ActionBar/STT/browser.tsx +2 -2
  22. package/src/features/ChatInput/ActionBar/STT/openai.tsx +2 -2
  23. package/src/features/Conversation/Messages/Group/Tool/Inspector/index.tsx +1 -1
  24. package/src/features/Conversation/Messages/User/index.tsx +3 -3
  25. package/src/features/Conversation/Messages/index.tsx +3 -3
  26. package/src/features/Conversation/components/AutoScroll.tsx +2 -2
  27. package/src/services/search.ts +2 -2
  28. package/src/store/chat/agents/GeneralChatAgent.ts +98 -0
  29. package/src/store/chat/agents/__tests__/GeneralChatAgent.test.ts +366 -0
  30. package/src/store/chat/agents/__tests__/createAgentExecutors/call-llm.test.ts +1217 -0
  31. package/src/store/chat/agents/__tests__/createAgentExecutors/call-tool.test.ts +1976 -0
  32. package/src/store/chat/agents/__tests__/createAgentExecutors/finish.test.ts +453 -0
  33. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/index.ts +4 -0
  34. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockInstructions.ts +126 -0
  35. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockMessages.ts +94 -0
  36. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockOperations.ts +96 -0
  37. package/src/store/chat/agents/__tests__/createAgentExecutors/fixtures/mockStore.ts +138 -0
  38. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/assertions.ts +185 -0
  39. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/index.ts +3 -0
  40. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/operationTestUtils.ts +94 -0
  41. package/src/store/chat/agents/__tests__/createAgentExecutors/helpers/testExecutor.ts +139 -0
  42. package/src/store/chat/agents/__tests__/createAgentExecutors/request-human-approve.test.ts +545 -0
  43. package/src/store/chat/agents/__tests__/createAgentExecutors/resolve-aborted-tools.test.ts +686 -0
  44. package/src/store/chat/agents/createAgentExecutors.ts +313 -80
  45. package/src/store/chat/selectors.ts +1 -0
  46. package/src/store/chat/slices/aiChat/__tests__/ai-chat.integration.test.ts +667 -0
  47. package/src/store/chat/slices/aiChat/actions/__tests__/cancel-functionality.test.ts +137 -27
  48. package/src/store/chat/slices/aiChat/actions/__tests__/conversationControl.test.ts +163 -125
  49. package/src/store/chat/slices/aiChat/actions/__tests__/conversationLifecycle.test.ts +12 -2
  50. package/src/store/chat/slices/aiChat/actions/__tests__/fixtures.ts +0 -2
  51. package/src/store/chat/slices/aiChat/actions/__tests__/helpers.ts +0 -2
  52. package/src/store/chat/slices/aiChat/actions/__tests__/streamingExecutor.test.ts +286 -19
  53. package/src/store/chat/slices/aiChat/actions/__tests__/streamingStates.test.ts +0 -112
  54. package/src/store/chat/slices/aiChat/actions/conversationControl.ts +42 -99
  55. package/src/store/chat/slices/aiChat/actions/conversationLifecycle.ts +90 -57
  56. package/src/store/chat/slices/aiChat/actions/generateAIGroupChat.ts +5 -25
  57. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +220 -98
  58. package/src/store/chat/slices/aiChat/actions/streamingStates.ts +0 -34
  59. package/src/store/chat/slices/aiChat/initialState.ts +0 -28
  60. package/src/store/chat/slices/aiChat/selectors.test.ts +280 -0
  61. package/src/store/chat/slices/aiChat/selectors.ts +31 -7
  62. package/src/store/chat/slices/builtinTool/actions/__tests__/localSystem.test.ts +21 -30
  63. package/src/store/chat/slices/builtinTool/actions/__tests__/search.test.ts +29 -49
  64. package/src/store/chat/slices/builtinTool/actions/interpreter.ts +83 -48
  65. package/src/store/chat/slices/builtinTool/actions/localSystem.ts +78 -28
  66. package/src/store/chat/slices/builtinTool/actions/search.ts +146 -59
  67. package/src/store/chat/slices/builtinTool/selectors.test.ts +258 -0
  68. package/src/store/chat/slices/builtinTool/selectors.ts +25 -4
  69. package/src/store/chat/slices/message/action.test.ts +134 -16
  70. package/src/store/chat/slices/message/actions/internals.ts +33 -7
  71. package/src/store/chat/slices/message/actions/optimisticUpdate.ts +85 -52
  72. package/src/store/chat/slices/message/initialState.ts +0 -10
  73. package/src/store/chat/slices/message/selectors/messageState.ts +34 -12
  74. package/src/store/chat/slices/operation/__tests__/actions.test.ts +712 -16
  75. package/src/store/chat/slices/operation/__tests__/integration.test.ts +342 -0
  76. package/src/store/chat/slices/operation/__tests__/selectors.test.ts +257 -17
  77. package/src/store/chat/slices/operation/actions.ts +218 -11
  78. package/src/store/chat/slices/operation/selectors.ts +135 -6
  79. package/src/store/chat/slices/operation/types.ts +29 -3
  80. package/src/store/chat/slices/plugin/action.test.ts +30 -322
  81. package/src/store/chat/slices/plugin/actions/internals.ts +0 -14
  82. package/src/store/chat/slices/plugin/actions/optimisticUpdate.ts +21 -19
  83. package/src/store/chat/slices/plugin/actions/pluginTypes.ts +45 -27
  84. package/src/store/chat/slices/plugin/actions/publicApi.ts +3 -4
  85. package/src/store/chat/slices/plugin/actions/workflow.ts +0 -55
  86. package/src/store/chat/slices/thread/selectors/index.ts +4 -2
  87. package/src/store/chat/slices/translate/action.ts +54 -41
  88. package/src/tools/web-browsing/ExecutionRuntime/index.ts +5 -2
  89. package/src/tools/web-browsing/Portal/Search/Footer.tsx +11 -9
@@ -0,0 +1,1217 @@
1
+ import type { GeneralAgentCallLLMResultPayload } from '@lobechat/agent-runtime';
2
+ import { LOADING_FLAT } from '@lobechat/const';
3
+ import type { ChatToolPayload, UIChatMessage } from '@lobechat/types';
4
+ import { describe, expect, it, vi } from 'vitest';
5
+
6
+ import {
7
+ createAssistantMessage,
8
+ createCallLLMInstruction,
9
+ createMockStore,
10
+ createUserMessage,
11
+ } from './fixtures';
12
+ import {
13
+ createInitialState,
14
+ createTestContext,
15
+ executeWithMockContext,
16
+ expectMessageCreated,
17
+ expectNextContext,
18
+ expectValidExecutorResult,
19
+ } from './helpers';
20
+
21
+ describe('call_llm executor', () => {
22
+ describe('Basic Behavior', () => {
23
+ it('should create assistant message with LOADING_FLAT content', async () => {
24
+ // Given
25
+ const mockStore = createMockStore();
26
+ const context = createTestContext({ sessionId: 'test-session', topicId: 'test-topic' });
27
+ const instruction = createCallLLMInstruction({
28
+ model: 'gpt-4',
29
+ provider: 'openai',
30
+ messages: [createUserMessage()],
31
+ });
32
+ const state = createInitialState({ sessionId: 'test-session' });
33
+
34
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
35
+ content: 'AI response',
36
+ finishType: 'stop',
37
+ isFunctionCall: false,
38
+ });
39
+ mockStore.dbMessagesMap[context.messageKey] = [];
40
+
41
+ // When
42
+ const result = await executeWithMockContext({
43
+ executor: 'call_llm',
44
+ instruction,
45
+ state,
46
+ mockStore,
47
+ context,
48
+ });
49
+
50
+ // Then
51
+ expectMessageCreated(mockStore, 'assistant');
52
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
53
+ expect.objectContaining({
54
+ content: LOADING_FLAT,
55
+ role: 'assistant',
56
+ model: 'gpt-4',
57
+ provider: 'openai',
58
+ sessionId: 'test-session',
59
+ topicId: 'test-topic',
60
+ }),
61
+ );
62
+ });
63
+
64
+ it('should call internal_fetchAIChatMessage with correct params', async () => {
65
+ // Given
66
+ const mockStore = createMockStore();
67
+ const context = createTestContext();
68
+ const userMsg = createUserMessage({ content: 'Hello' });
69
+ const instruction = createCallLLMInstruction({
70
+ model: 'gpt-4',
71
+ provider: 'openai',
72
+ messages: [userMsg],
73
+ });
74
+ const state = createInitialState();
75
+
76
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
77
+ content: 'AI response',
78
+ finishType: 'stop',
79
+ isFunctionCall: false,
80
+ });
81
+ mockStore.dbMessagesMap[context.messageKey] = [];
82
+
83
+ // When
84
+ await executeWithMockContext({
85
+ executor: 'call_llm',
86
+ instruction,
87
+ state,
88
+ mockStore,
89
+ context,
90
+ });
91
+
92
+ // Then
93
+ expect(mockStore.internal_fetchAIChatMessage).toHaveBeenCalledWith(
94
+ expect.objectContaining({
95
+ messageId: expect.any(String),
96
+ messages: [userMsg], // Should exclude assistant message
97
+ model: 'gpt-4',
98
+ provider: 'openai',
99
+ operationId: context.operationId,
100
+ }),
101
+ );
102
+ });
103
+
104
+ it('should associate message with operation', async () => {
105
+ // Given
106
+ const mockStore = createMockStore();
107
+ const context = createTestContext();
108
+ const instruction = createCallLLMInstruction();
109
+ const state = createInitialState();
110
+
111
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
112
+ content: 'AI response',
113
+ finishType: 'stop',
114
+ isFunctionCall: false,
115
+ });
116
+ mockStore.dbMessagesMap[context.messageKey] = [];
117
+
118
+ // When
119
+ await executeWithMockContext({
120
+ executor: 'call_llm',
121
+ instruction,
122
+ state,
123
+ mockStore,
124
+ context,
125
+ });
126
+
127
+ // Then
128
+ expect(mockStore.associateMessageWithOperation).toHaveBeenCalledWith(
129
+ expect.any(String),
130
+ context.operationId,
131
+ );
132
+ });
133
+
134
+ it('should return correct result structure with events and newState', async () => {
135
+ // Given
136
+ const mockStore = createMockStore();
137
+ const context = createTestContext();
138
+ const instruction = createCallLLMInstruction();
139
+ const state = createInitialState();
140
+
141
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
142
+ content: 'AI response',
143
+ finishType: 'stop',
144
+ isFunctionCall: false,
145
+ });
146
+ mockStore.dbMessagesMap[context.messageKey] = [];
147
+
148
+ // When
149
+ const result = await executeWithMockContext({
150
+ executor: 'call_llm',
151
+ instruction,
152
+ state,
153
+ mockStore,
154
+ context,
155
+ });
156
+
157
+ // Then
158
+ expectValidExecutorResult(result);
159
+ expect(result.events).toEqual([]);
160
+ expect(result.newState).toBeDefined();
161
+ expect(result.nextContext).toBeDefined();
162
+ });
163
+ });
164
+
165
+ describe('Skip Create First Message Mode', () => {
166
+ it('should reuse parentId when skipCreateFirstMessage is true', async () => {
167
+ // Given
168
+ const mockStore = createMockStore();
169
+ const parentId = 'msg_existing';
170
+ const context = createTestContext({ parentId });
171
+ const instruction = createCallLLMInstruction();
172
+ const state = createInitialState();
173
+
174
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
175
+ content: 'AI response',
176
+ finishType: 'stop',
177
+ isFunctionCall: false,
178
+ });
179
+ mockStore.dbMessagesMap[context.messageKey] = [];
180
+
181
+ // When
182
+ await executeWithMockContext({
183
+ executor: 'call_llm',
184
+ instruction,
185
+ state,
186
+ mockStore,
187
+ context,
188
+ skipCreateFirstMessage: true,
189
+ });
190
+
191
+ // Then
192
+ expect(mockStore.optimisticCreateMessage).not.toHaveBeenCalled();
193
+ expect(mockStore.internal_fetchAIChatMessage).toHaveBeenCalledWith(
194
+ expect.objectContaining({
195
+ messageId: parentId,
196
+ }),
197
+ );
198
+ });
199
+ });
200
+
201
+ describe('Parent Message ID Handling', () => {
202
+ it('should use llmPayload.parentMessageId if provided', async () => {
203
+ // Given
204
+ const mockStore = createMockStore();
205
+ const context = createTestContext({ parentId: 'msg_context_parent' });
206
+ const instruction = createCallLLMInstruction({
207
+ parentMessageId: 'msg_payload_parent',
208
+ });
209
+ const state = createInitialState();
210
+
211
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
212
+ content: 'AI response',
213
+ finishType: 'stop',
214
+ isFunctionCall: false,
215
+ });
216
+ mockStore.dbMessagesMap[context.messageKey] = [];
217
+
218
+ // When
219
+ await executeWithMockContext({
220
+ executor: 'call_llm',
221
+ instruction,
222
+ state,
223
+ mockStore,
224
+ context,
225
+ });
226
+
227
+ // Then
228
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
229
+ expect.objectContaining({
230
+ parentId: 'msg_payload_parent',
231
+ }),
232
+ );
233
+ });
234
+
235
+ it('should fall back to context.parentId if parentMessageId not provided', async () => {
236
+ // Given
237
+ const mockStore = createMockStore();
238
+ const context = createTestContext({ parentId: 'msg_context_parent' });
239
+ const instruction = createCallLLMInstruction({
240
+ parentMessageId: undefined,
241
+ });
242
+ const state = createInitialState();
243
+
244
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
245
+ content: 'AI response',
246
+ finishType: 'stop',
247
+ isFunctionCall: false,
248
+ });
249
+ mockStore.dbMessagesMap[context.messageKey] = [];
250
+
251
+ // When
252
+ await executeWithMockContext({
253
+ executor: 'call_llm',
254
+ instruction,
255
+ state,
256
+ mockStore,
257
+ context,
258
+ });
259
+
260
+ // Then
261
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
262
+ expect.objectContaining({
263
+ parentId: 'msg_context_parent',
264
+ }),
265
+ );
266
+ });
267
+ });
268
+
269
+ describe('Usage Tracking', () => {
270
+ it('should accumulate LLM usage from currentStepUsage', async () => {
271
+ // Given
272
+ const mockStore = createMockStore();
273
+ const context = createTestContext();
274
+ const instruction = createCallLLMInstruction({
275
+ model: 'gpt-4',
276
+ provider: 'openai',
277
+ });
278
+ const state = createInitialState({
279
+ usage: {
280
+ humanInteraction: {
281
+ approvalRequests: 0,
282
+ promptRequests: 0,
283
+ selectRequests: 0,
284
+ totalWaitingTimeMs: 0,
285
+ },
286
+ llm: {
287
+ apiCalls: 1,
288
+ processingTimeMs: 0,
289
+ tokens: {
290
+ input: 100,
291
+ output: 50,
292
+ total: 150,
293
+ },
294
+ },
295
+ tools: {
296
+ byTool: [],
297
+ totalCalls: 0,
298
+ totalTimeMs: 0,
299
+ },
300
+ },
301
+ });
302
+
303
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
304
+ content: 'AI response',
305
+ finishType: 'stop',
306
+ isFunctionCall: false,
307
+ usage: {
308
+ totalInputTokens: 50,
309
+ totalOutputTokens: 30,
310
+ totalTokens: 80,
311
+ },
312
+ });
313
+ mockStore.dbMessagesMap[context.messageKey] = [];
314
+
315
+ // When
316
+ const result = await executeWithMockContext({
317
+ executor: 'call_llm',
318
+ instruction,
319
+ state,
320
+ mockStore,
321
+ context,
322
+ });
323
+
324
+ // Then
325
+ expect(result.newState.usage).toBeDefined();
326
+ expect(result.newState.usage.llm.tokens.input).toBeGreaterThan(state.usage.llm.tokens.input);
327
+ expect(result.newState.usage.llm.tokens.output).toBeGreaterThan(
328
+ state.usage.llm.tokens.output,
329
+ );
330
+ });
331
+
332
+ it('should update state.usage and state.cost correctly', async () => {
333
+ // Given
334
+ const mockStore = createMockStore();
335
+ const context = createTestContext();
336
+ const instruction = createCallLLMInstruction({
337
+ model: 'gpt-4',
338
+ provider: 'openai',
339
+ });
340
+ const state = createInitialState({
341
+ usage: {
342
+ humanInteraction: {
343
+ approvalRequests: 0,
344
+ promptRequests: 0,
345
+ selectRequests: 0,
346
+ totalWaitingTimeMs: 0,
347
+ },
348
+ llm: {
349
+ apiCalls: 0,
350
+ processingTimeMs: 0,
351
+ tokens: {
352
+ input: 0,
353
+ output: 0,
354
+ total: 0,
355
+ },
356
+ },
357
+ tools: {
358
+ byTool: [],
359
+ totalCalls: 0,
360
+ totalTimeMs: 0,
361
+ },
362
+ },
363
+ });
364
+
365
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
366
+ content: 'AI response',
367
+ finishType: 'stop',
368
+ isFunctionCall: false,
369
+ usage: {
370
+ totalInputTokens: 100,
371
+ totalOutputTokens: 50,
372
+ totalTokens: 150,
373
+ cost: 0.002,
374
+ },
375
+ });
376
+ mockStore.dbMessagesMap[context.messageKey] = [];
377
+
378
+ // When
379
+ const result = await executeWithMockContext({
380
+ executor: 'call_llm',
381
+ instruction,
382
+ state,
383
+ mockStore,
384
+ context,
385
+ });
386
+
387
+ // Then
388
+ expect(result.newState.usage).toBeDefined();
389
+ expect(result.newState.cost).toBeDefined();
390
+ });
391
+
392
+ it('should handle no usage data returned', async () => {
393
+ // Given
394
+ const mockStore = createMockStore();
395
+ const context = createTestContext();
396
+ const instruction = createCallLLMInstruction();
397
+ const state = createInitialState();
398
+
399
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
400
+ content: 'AI response',
401
+ finishType: 'stop',
402
+ isFunctionCall: false,
403
+ usage: undefined,
404
+ });
405
+ mockStore.dbMessagesMap[context.messageKey] = [];
406
+
407
+ // When
408
+ const result = await executeWithMockContext({
409
+ executor: 'call_llm',
410
+ instruction,
411
+ state,
412
+ mockStore,
413
+ context,
414
+ });
415
+
416
+ // Then - should preserve original usage
417
+ expect(result.newState.usage).toEqual(state.usage);
418
+ });
419
+ });
420
+
421
+ describe('Abort Handling', () => {
422
+ it('should return nextContext with phase: human_abort when finishType is abort', async () => {
423
+ // Given
424
+ const mockStore = createMockStore();
425
+ const context = createTestContext();
426
+ const instruction = createCallLLMInstruction();
427
+ const state = createInitialState({ stepCount: 3 });
428
+
429
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
430
+ content: 'Partial response',
431
+ finishType: 'abort',
432
+ isFunctionCall: false,
433
+ });
434
+ mockStore.dbMessagesMap[context.messageKey] = [];
435
+
436
+ // When
437
+ const result = await executeWithMockContext({
438
+ executor: 'call_llm',
439
+ instruction,
440
+ state,
441
+ mockStore,
442
+ context,
443
+ });
444
+
445
+ // Then
446
+ expectNextContext(result, 'human_abort');
447
+ expect(result.nextContext!.session!.status).toBe('running');
448
+ expect(result.nextContext!.session!.stepCount).toBe(4);
449
+ });
450
+
451
+ it('should include correct payload with reason and result when aborted', async () => {
452
+ // Given
453
+ const mockStore = createMockStore();
454
+ const context = createTestContext();
455
+ const instruction = createCallLLMInstruction();
456
+ const state = createInitialState();
457
+
458
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
459
+ content: 'Partial response',
460
+ finishType: 'abort',
461
+ isFunctionCall: false,
462
+ tool_calls: undefined,
463
+ });
464
+ mockStore.dbMessagesMap[context.messageKey] = [];
465
+
466
+ // When
467
+ const result = await executeWithMockContext({
468
+ executor: 'call_llm',
469
+ instruction,
470
+ state,
471
+ mockStore,
472
+ context,
473
+ });
474
+
475
+ // Then
476
+ const payload = result.nextContext!.payload as GeneralAgentCallLLMResultPayload;
477
+ expect(payload).toMatchObject({
478
+ reason: 'user_cancelled',
479
+ hasToolsCalling: false,
480
+ result: {
481
+ content: 'Partial response',
482
+ tool_calls: undefined,
483
+ },
484
+ });
485
+ });
486
+
487
+ it('should include toolsCalling in abort payload when tools were being called', async () => {
488
+ // Given
489
+ const mockStore = createMockStore();
490
+ const context = createTestContext();
491
+ const instruction = createCallLLMInstruction();
492
+ const state = createInitialState();
493
+
494
+ const toolCalls: ChatToolPayload[] = [
495
+ {
496
+ id: 'tool_1',
497
+ identifier: 'lobe-web-browsing',
498
+ apiName: 'search',
499
+ arguments: JSON.stringify({ query: 'test' }),
500
+ type: 'default',
501
+ },
502
+ ];
503
+
504
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
505
+ content: '',
506
+ finishType: 'abort',
507
+ isFunctionCall: true,
508
+ tools: toolCalls,
509
+ tool_calls: [{ id: 'tool_1', type: 'function', function: { name: 'search' } }],
510
+ });
511
+ mockStore.dbMessagesMap[context.messageKey] = [];
512
+
513
+ // When
514
+ const result = await executeWithMockContext({
515
+ executor: 'call_llm',
516
+ instruction,
517
+ state,
518
+ mockStore,
519
+ context,
520
+ });
521
+
522
+ // Then
523
+ const payload = result.nextContext!.payload as GeneralAgentCallLLMResultPayload;
524
+ expect(payload).toMatchObject({
525
+ reason: 'user_cancelled',
526
+ hasToolsCalling: true,
527
+ toolsCalling: toolCalls,
528
+ });
529
+ });
530
+
531
+ it('should not throw error on abort', async () => {
532
+ // Given
533
+ const mockStore = createMockStore();
534
+ const context = createTestContext();
535
+ const instruction = createCallLLMInstruction();
536
+ const state = createInitialState();
537
+
538
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
539
+ content: 'Partial',
540
+ finishType: 'abort',
541
+ isFunctionCall: false,
542
+ });
543
+ mockStore.dbMessagesMap[context.messageKey] = [];
544
+
545
+ // When & Then - should not throw
546
+ const result = await executeWithMockContext({
547
+ executor: 'call_llm',
548
+ instruction,
549
+ state,
550
+ mockStore,
551
+ context,
552
+ });
553
+
554
+ expect(result).toBeDefined();
555
+ expect(result.nextContext!.phase).toBe('human_abort');
556
+ });
557
+ });
558
+
559
+ describe('Normal Completion', () => {
560
+ it('should return nextContext with phase: llm_result on normal completion', async () => {
561
+ // Given
562
+ const mockStore = createMockStore();
563
+ const context = createTestContext();
564
+ const instruction = createCallLLMInstruction();
565
+ const state = createInitialState();
566
+
567
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
568
+ content: 'AI response',
569
+ finishType: 'stop',
570
+ isFunctionCall: false,
571
+ });
572
+ mockStore.dbMessagesMap[context.messageKey] = [];
573
+
574
+ // When
575
+ const result = await executeWithMockContext({
576
+ executor: 'call_llm',
577
+ instruction,
578
+ state,
579
+ mockStore,
580
+ context,
581
+ });
582
+
583
+ // Then
584
+ expectNextContext(result, 'llm_result');
585
+ });
586
+
587
+ it('should include hasToolsCalling and result in llm_result payload', async () => {
588
+ // Given
589
+ const mockStore = createMockStore();
590
+ const context = createTestContext();
591
+ const instruction = createCallLLMInstruction();
592
+ const state = createInitialState();
593
+
594
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
595
+ content: 'Here is the result',
596
+ finishType: 'stop',
597
+ isFunctionCall: false,
598
+ tool_calls: undefined,
599
+ });
600
+ mockStore.dbMessagesMap[context.messageKey] = [];
601
+
602
+ // When
603
+ const result = await executeWithMockContext({
604
+ executor: 'call_llm',
605
+ instruction,
606
+ state,
607
+ mockStore,
608
+ context,
609
+ });
610
+
611
+ // Then
612
+ const payload = result.nextContext!.payload as GeneralAgentCallLLMResultPayload;
613
+ expect(payload).toMatchObject({
614
+ hasToolsCalling: false,
615
+ result: {
616
+ content: 'Here is the result',
617
+ tool_calls: undefined,
618
+ },
619
+ });
620
+ });
621
+
622
+ it('should include toolCalling when LLM returns tools', async () => {
623
+ // Given
624
+ const mockStore = createMockStore();
625
+ const context = createTestContext();
626
+ const instruction = createCallLLMInstruction();
627
+ const state = createInitialState();
628
+
629
+ const toolCalls: ChatToolPayload[] = [
630
+ {
631
+ id: 'tool_1',
632
+ identifier: 'lobe-web-browsing',
633
+ apiName: 'search',
634
+ arguments: JSON.stringify({ query: 'AI news' }),
635
+ type: 'default',
636
+ },
637
+ ];
638
+
639
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
640
+ content: '',
641
+ finishType: 'tool_calls',
642
+ isFunctionCall: true,
643
+ tools: toolCalls,
644
+ tool_calls: [{ id: 'tool_1', type: 'function', function: { name: 'search' } }],
645
+ });
646
+ mockStore.dbMessagesMap[context.messageKey] = [];
647
+
648
+ // When
649
+ const result = await executeWithMockContext({
650
+ executor: 'call_llm',
651
+ instruction,
652
+ state,
653
+ mockStore,
654
+ context,
655
+ });
656
+
657
+ // Then
658
+ const payload = result.nextContext!.payload as GeneralAgentCallLLMResultPayload;
659
+ expect(payload).toMatchObject({
660
+ hasToolsCalling: true,
661
+ toolsCalling: toolCalls,
662
+ });
663
+ });
664
+
665
+ it('should increment stepCount', async () => {
666
+ // Given
667
+ const mockStore = createMockStore();
668
+ const context = createTestContext();
669
+ const instruction = createCallLLMInstruction();
670
+ const state = createInitialState({ stepCount: 5 });
671
+
672
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
673
+ content: 'AI response',
674
+ finishType: 'stop',
675
+ isFunctionCall: false,
676
+ });
677
+ mockStore.dbMessagesMap[context.messageKey] = [];
678
+
679
+ // When
680
+ const result = await executeWithMockContext({
681
+ executor: 'call_llm',
682
+ instruction,
683
+ state,
684
+ mockStore,
685
+ context,
686
+ });
687
+
688
+ // Then
689
+ expect(result.nextContext!.session!.stepCount).toBe(6);
690
+ });
691
+
692
+ it('should include stepUsage in nextContext', async () => {
693
+ // Given
694
+ const mockStore = createMockStore();
695
+ const context = createTestContext();
696
+ const instruction = createCallLLMInstruction();
697
+ const state = createInitialState({
698
+ usage: {
699
+ humanInteraction: {
700
+ approvalRequests: 0,
701
+ promptRequests: 0,
702
+ selectRequests: 0,
703
+ totalWaitingTimeMs: 0,
704
+ },
705
+ llm: {
706
+ apiCalls: 0,
707
+ processingTimeMs: 0,
708
+ tokens: {
709
+ input: 0,
710
+ output: 0,
711
+ total: 0,
712
+ },
713
+ },
714
+ tools: {
715
+ byTool: [],
716
+ totalCalls: 0,
717
+ totalTimeMs: 0,
718
+ },
719
+ },
720
+ });
721
+
722
+ const stepUsage = {
723
+ totalInputTokens: 100,
724
+ totalOutputTokens: 50,
725
+ totalTokens: 150,
726
+ };
727
+
728
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
729
+ content: 'AI response',
730
+ finishType: 'stop',
731
+ isFunctionCall: false,
732
+ usage: stepUsage,
733
+ });
734
+ mockStore.dbMessagesMap[context.messageKey] = [];
735
+
736
+ // When
737
+ const result = await executeWithMockContext({
738
+ executor: 'call_llm',
739
+ instruction,
740
+ state,
741
+ mockStore,
742
+ context,
743
+ });
744
+
745
+ // Then
746
+ expect(result.nextContext!.stepUsage).toEqual(stepUsage);
747
+ });
748
+ });
749
+
750
+ describe('State Management', () => {
751
+ it('should update messages from dbMessagesMap', async () => {
752
+ // Given
753
+ const mockStore = createMockStore();
754
+ const context = createTestContext();
755
+ const instruction = createCallLLMInstruction();
756
+ const state = createInitialState({ messages: [] });
757
+
758
+ const updatedMessages = [
759
+ createUserMessage({ content: 'Hello' }),
760
+ createAssistantMessage({ content: 'Hi there' }),
761
+ ];
762
+
763
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
764
+ content: 'Hi there',
765
+ finishType: 'stop',
766
+ isFunctionCall: false,
767
+ });
768
+ mockStore.dbMessagesMap[context.messageKey] = updatedMessages;
769
+
770
+ // When
771
+ const result = await executeWithMockContext({
772
+ executor: 'call_llm',
773
+ instruction,
774
+ state,
775
+ mockStore,
776
+ context,
777
+ });
778
+
779
+ // Then
780
+ expect(result.newState.messages).toEqual(updatedMessages);
781
+ });
782
+
783
+ it('should preserve other state fields', async () => {
784
+ // Given
785
+ const mockStore = createMockStore();
786
+ const context = createTestContext();
787
+ const instruction = createCallLLMInstruction();
788
+ const state = createInitialState({
789
+ sessionId: 'test-session',
790
+ stepCount: 10,
791
+ status: 'running',
792
+ });
793
+
794
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
795
+ content: 'AI response',
796
+ finishType: 'stop',
797
+ isFunctionCall: false,
798
+ });
799
+ mockStore.dbMessagesMap[context.messageKey] = [];
800
+
801
+ // When
802
+ const result = await executeWithMockContext({
803
+ executor: 'call_llm',
804
+ instruction,
805
+ state,
806
+ mockStore,
807
+ context,
808
+ });
809
+
810
+ // Then
811
+ expect(result.newState.sessionId).toBe(state.sessionId);
812
+ expect(result.newState.stepCount).toBe(state.stepCount);
813
+ expect(result.newState.status).toBe(state.status);
814
+ });
815
+
816
+ it('should not mutate original state', async () => {
817
+ // Given
818
+ const mockStore = createMockStore();
819
+ const context = createTestContext();
820
+ const instruction = createCallLLMInstruction();
821
+ const state = createInitialState({
822
+ messages: [createUserMessage()],
823
+ });
824
+ const originalState = JSON.parse(JSON.stringify(state));
825
+
826
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
827
+ content: 'AI response',
828
+ finishType: 'stop',
829
+ isFunctionCall: false,
830
+ });
831
+ mockStore.dbMessagesMap[context.messageKey] = [createUserMessage(), createAssistantMessage()];
832
+
833
+ // When
834
+ const result = await executeWithMockContext({
835
+ executor: 'call_llm',
836
+ instruction,
837
+ state,
838
+ mockStore,
839
+ context,
840
+ });
841
+
842
+ // Then
843
+ expect(state).toEqual(originalState);
844
+ expect(result.newState).not.toBe(state);
845
+ });
846
+ });
847
+
848
+ describe('Edge Cases', () => {
849
+ it('should throw error when message creation fails', async () => {
850
+ // Given
851
+ const mockStore = createMockStore({
852
+ optimisticCreateMessage: vi.fn().mockResolvedValue(null),
853
+ });
854
+ const context = createTestContext();
855
+ const instruction = createCallLLMInstruction();
856
+ const state = createInitialState();
857
+
858
+ // When & Then
859
+ await expect(
860
+ executeWithMockContext({
861
+ executor: 'call_llm',
862
+ instruction,
863
+ state,
864
+ mockStore,
865
+ context,
866
+ }),
867
+ ).rejects.toThrow('Failed to create assistant message');
868
+ });
869
+
870
+ it('should handle empty messages array', async () => {
871
+ // Given
872
+ const mockStore = createMockStore();
873
+ const context = createTestContext();
874
+ const instruction = createCallLLMInstruction({
875
+ messages: [],
876
+ });
877
+ const state = createInitialState();
878
+
879
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
880
+ content: 'AI response',
881
+ finishType: 'stop',
882
+ isFunctionCall: false,
883
+ });
884
+ mockStore.dbMessagesMap[context.messageKey] = [];
885
+
886
+ // When
887
+ const result = await executeWithMockContext({
888
+ executor: 'call_llm',
889
+ instruction,
890
+ state,
891
+ mockStore,
892
+ context,
893
+ });
894
+
895
+ // Then
896
+ expect(mockStore.internal_fetchAIChatMessage).toHaveBeenCalledWith(
897
+ expect.objectContaining({
898
+ messages: [],
899
+ }),
900
+ );
901
+ expect(result).toBeDefined();
902
+ });
903
+
904
+ it('should handle multiple tools returned', async () => {
905
+ // Given
906
+ const mockStore = createMockStore();
907
+ const context = createTestContext();
908
+ const instruction = createCallLLMInstruction();
909
+ const state = createInitialState();
910
+
911
+ const toolCalls: ChatToolPayload[] = [
912
+ {
913
+ id: 'tool_1',
914
+ identifier: 'lobe-web-browsing',
915
+ apiName: 'search',
916
+ arguments: JSON.stringify({ query: 'AI' }),
917
+ type: 'default',
918
+ },
919
+ {
920
+ id: 'tool_2',
921
+ identifier: 'lobe-web-browsing',
922
+ apiName: 'craw',
923
+ arguments: JSON.stringify({ url: 'https://example.com' }),
924
+ type: 'default',
925
+ },
926
+ {
927
+ id: 'tool_3',
928
+ identifier: 'lobe-image-generator',
929
+ apiName: 'generate',
930
+ arguments: JSON.stringify({ prompt: 'AI art' }),
931
+ type: 'default',
932
+ },
933
+ ];
934
+
935
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
936
+ content: '',
937
+ finishType: 'tool_calls',
938
+ isFunctionCall: true,
939
+ tools: toolCalls,
940
+ tool_calls: toolCalls.map((t) => ({ id: t.id, type: 'function' as const })),
941
+ });
942
+ mockStore.dbMessagesMap[context.messageKey] = [];
943
+
944
+ // When
945
+ const result = await executeWithMockContext({
946
+ executor: 'call_llm',
947
+ instruction,
948
+ state,
949
+ mockStore,
950
+ context,
951
+ });
952
+
953
+ // Then
954
+ const payload = result.nextContext!.payload as GeneralAgentCallLLMResultPayload;
955
+ expect(payload.toolsCalling).toHaveLength(3);
956
+ expect(payload.hasToolsCalling).toBe(true);
957
+ });
958
+
959
+ it('should handle empty dbMessagesMap', async () => {
960
+ // Given
961
+ const mockStore = createMockStore();
962
+ const context = createTestContext();
963
+ const instruction = createCallLLMInstruction();
964
+ const state = createInitialState({ messages: [createUserMessage()] });
965
+
966
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
967
+ content: 'AI response',
968
+ finishType: 'stop',
969
+ isFunctionCall: false,
970
+ });
971
+ // dbMessagesMap[messageKey] doesn't exist
972
+
973
+ // When
974
+ const result = await executeWithMockContext({
975
+ executor: 'call_llm',
976
+ instruction,
977
+ state,
978
+ mockStore,
979
+ context,
980
+ });
981
+
982
+ // Then - should default to empty array
983
+ expect(result.newState.messages).toEqual([]);
984
+ });
985
+ });
986
+
987
+ describe('Message Filtering', () => {
988
+ it('should exclude assistant message from messages sent to LLM', async () => {
989
+ // Given
990
+ const mockStore = createMockStore();
991
+ const context = createTestContext();
992
+
993
+ const userMsg = createUserMessage({ id: 'msg_user', content: 'Hello' });
994
+ const assistantMsg = createAssistantMessage({ id: 'msg_assistant', content: 'Loading...' });
995
+
996
+ const instruction = createCallLLMInstruction({
997
+ messages: [userMsg, assistantMsg],
998
+ });
999
+ const state = createInitialState();
1000
+
1001
+ mockStore.optimisticCreateMessage = vi.fn().mockResolvedValue({
1002
+ id: 'msg_assistant',
1003
+ role: 'assistant',
1004
+ content: LOADING_FLAT,
1005
+ });
1006
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1007
+ content: 'AI response',
1008
+ finishType: 'stop',
1009
+ isFunctionCall: false,
1010
+ });
1011
+ mockStore.dbMessagesMap[context.messageKey] = [];
1012
+
1013
+ // When
1014
+ await executeWithMockContext({
1015
+ executor: 'call_llm',
1016
+ instruction,
1017
+ state,
1018
+ mockStore,
1019
+ context,
1020
+ });
1021
+
1022
+ // Then - should filter out the assistant message with matching ID
1023
+ expect(mockStore.internal_fetchAIChatMessage).toHaveBeenCalledWith(
1024
+ expect.objectContaining({
1025
+ messages: expect.not.arrayContaining([expect.objectContaining({ id: 'msg_assistant' })]),
1026
+ }),
1027
+ );
1028
+ });
1029
+ });
1030
+
1031
+ describe('Different Model Configurations', () => {
1032
+ it('should handle different model and provider combinations', async () => {
1033
+ // Given
1034
+ const mockStore = createMockStore();
1035
+ const context = createTestContext();
1036
+ const instruction = createCallLLMInstruction({
1037
+ model: 'claude-3-opus',
1038
+ provider: 'anthropic',
1039
+ });
1040
+ const state = createInitialState();
1041
+
1042
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1043
+ content: 'Claude response',
1044
+ finishType: 'stop',
1045
+ isFunctionCall: false,
1046
+ });
1047
+ mockStore.dbMessagesMap[context.messageKey] = [];
1048
+
1049
+ // When
1050
+ const result = await executeWithMockContext({
1051
+ executor: 'call_llm',
1052
+ instruction,
1053
+ state,
1054
+ mockStore,
1055
+ context,
1056
+ });
1057
+
1058
+ // Then
1059
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
1060
+ expect.objectContaining({
1061
+ model: 'claude-3-opus',
1062
+ provider: 'anthropic',
1063
+ }),
1064
+ );
1065
+ expect(mockStore.internal_fetchAIChatMessage).toHaveBeenCalledWith(
1066
+ expect.objectContaining({
1067
+ model: 'claude-3-opus',
1068
+ provider: 'anthropic',
1069
+ }),
1070
+ );
1071
+ });
1072
+ });
1073
+
1074
+ describe('Context Propagation', () => {
1075
+ it('should include correct messageCount in nextContext', async () => {
1076
+ // Given
1077
+ const mockStore = createMockStore();
1078
+ const context = createTestContext();
1079
+ const instruction = createCallLLMInstruction();
1080
+ const state = createInitialState();
1081
+
1082
+ const messages = [
1083
+ createUserMessage(),
1084
+ createAssistantMessage(),
1085
+ createUserMessage(),
1086
+ createAssistantMessage(),
1087
+ ];
1088
+
1089
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1090
+ content: 'AI response',
1091
+ finishType: 'stop',
1092
+ isFunctionCall: false,
1093
+ });
1094
+ mockStore.dbMessagesMap[context.messageKey] = messages;
1095
+
1096
+ // When
1097
+ const result = await executeWithMockContext({
1098
+ executor: 'call_llm',
1099
+ instruction,
1100
+ state,
1101
+ mockStore,
1102
+ context,
1103
+ });
1104
+
1105
+ // Then
1106
+ expect(result.nextContext!.session!.messageCount).toBe(4);
1107
+ });
1108
+
1109
+ it('should preserve sessionId in nextContext', async () => {
1110
+ // Given
1111
+ const mockStore = createMockStore();
1112
+ const context = createTestContext();
1113
+ const instruction = createCallLLMInstruction();
1114
+ const state = createInitialState({ sessionId: 'custom-session-123' });
1115
+
1116
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1117
+ content: 'AI response',
1118
+ finishType: 'stop',
1119
+ isFunctionCall: false,
1120
+ });
1121
+ mockStore.dbMessagesMap[context.messageKey] = [];
1122
+
1123
+ // When
1124
+ const result = await executeWithMockContext({
1125
+ executor: 'call_llm',
1126
+ instruction,
1127
+ state,
1128
+ mockStore,
1129
+ context,
1130
+ });
1131
+
1132
+ // Then
1133
+ expect(result.nextContext!.session!.sessionId).toBe('custom-session-123');
1134
+ });
1135
+ });
1136
+
1137
+ describe('Thread Support', () => {
1138
+ it('should handle threadId when provided in operation context', async () => {
1139
+ // Given
1140
+ const mockStore = createMockStore();
1141
+ const context = createTestContext({ sessionId: 'test-session', topicId: 'test-topic' });
1142
+ const threadId = 'thread_123';
1143
+
1144
+ // Setup operation with threadId
1145
+ mockStore.operations[context.operationId] = {
1146
+ id: context.operationId,
1147
+ type: 'execAgentRuntime',
1148
+ status: 'running',
1149
+ context: {
1150
+ sessionId: 'test-session',
1151
+ topicId: 'test-topic',
1152
+ threadId,
1153
+ },
1154
+ abortController: new AbortController(),
1155
+ metadata: { startTime: Date.now() },
1156
+ childOperationIds: [],
1157
+ };
1158
+
1159
+ const instruction = createCallLLMInstruction();
1160
+ const state = createInitialState();
1161
+
1162
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1163
+ content: 'AI response',
1164
+ finishType: 'stop',
1165
+ isFunctionCall: false,
1166
+ });
1167
+ mockStore.dbMessagesMap[context.messageKey] = [];
1168
+
1169
+ // When
1170
+ const result = await executeWithMockContext({
1171
+ executor: 'call_llm',
1172
+ instruction,
1173
+ state,
1174
+ mockStore,
1175
+ context,
1176
+ });
1177
+
1178
+ // Then
1179
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
1180
+ expect.objectContaining({
1181
+ threadId,
1182
+ }),
1183
+ );
1184
+ });
1185
+
1186
+ it('should handle undefined threadId', async () => {
1187
+ // Given
1188
+ const mockStore = createMockStore();
1189
+ const context = createTestContext();
1190
+ const instruction = createCallLLMInstruction();
1191
+ const state = createInitialState();
1192
+
1193
+ mockStore.internal_fetchAIChatMessage = vi.fn().mockResolvedValue({
1194
+ content: 'AI response',
1195
+ finishType: 'stop',
1196
+ isFunctionCall: false,
1197
+ });
1198
+ mockStore.dbMessagesMap[context.messageKey] = [];
1199
+
1200
+ // When
1201
+ await executeWithMockContext({
1202
+ executor: 'call_llm',
1203
+ instruction,
1204
+ state,
1205
+ mockStore,
1206
+ context,
1207
+ });
1208
+
1209
+ // Then
1210
+ expect(mockStore.optimisticCreateMessage).toHaveBeenCalledWith(
1211
+ expect.objectContaining({
1212
+ threadId: undefined,
1213
+ }),
1214
+ );
1215
+ });
1216
+ });
1217
+ });