@lobehub/chat 1.99.5 → 1.99.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@ import { LobeChatPluginManifest } from '@lobehub/chat-plugin-sdk';
2
2
  import { act } from '@testing-library/react';
3
3
  import { merge } from 'lodash-es';
4
4
  import OpenAI from 'openai';
5
- import { beforeEach, describe, expect, it, vi } from 'vitest';
5
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
6
6
 
7
7
  import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
8
8
  import {
@@ -36,7 +36,8 @@ import { modelConfigSelectors } from '@/store/user/selectors';
36
36
  import { UserSettingsState, initialSettingsState } from '@/store/user/slices/settings/initialState';
37
37
  import { DalleManifest } from '@/tools/dalle';
38
38
  import { WebBrowsingManifest } from '@/tools/web-browsing';
39
- import { ChatMessage } from '@/types/message';
39
+ import { ChatErrorType } from '@/types/fetch';
40
+ import { ChatImageItem, ChatMessage } from '@/types/message';
40
41
  import { ChatStreamPayload, type OpenAIChatMessage } from '@/types/openai/chat';
41
42
  import { LobeTool } from '@/types/tool';
42
43
 
@@ -58,15 +59,48 @@ vi.mock('@/utils/fetch', async (importOriginal) => {
58
59
  return { ...(module as any), getMessageError: vi.fn() };
59
60
  });
60
61
 
61
- beforeEach(() => {
62
+ // Mock image processing utilities
63
+ vi.mock('@/utils/url', () => ({
64
+ isLocalUrl: vi.fn(),
65
+ }));
66
+
67
+ vi.mock('@/utils/imageToBase64', () => ({
68
+ imageUrlToBase64: vi.fn(),
69
+ }));
70
+
71
+ vi.mock('@/libs/model-runtime/utils/uriParser', () => ({
72
+ parseDataUri: vi.fn(),
73
+ }));
74
+
75
+ afterEach(() => {
76
+ vi.restoreAllMocks();
77
+ });
78
+
79
+ beforeEach(async () => {
62
80
  // 清除所有模块的缓存
63
81
  vi.resetModules();
82
+
64
83
  // 默认设置 isServerMode 为 false
65
84
  vi.mock('@/const/version', () => ({
66
85
  isServerMode: false,
67
86
  isDeprecatedEdition: true,
68
87
  isDesktop: false,
69
88
  }));
89
+
90
+ // Reset all mocks
91
+ vi.clearAllMocks();
92
+
93
+ // Set default mock return values for image processing utilities
94
+ const { isLocalUrl } = await import('@/utils/url');
95
+ const { imageUrlToBase64 } = await import('@/utils/imageToBase64');
96
+ const { parseDataUri } = await import('@/libs/model-runtime/utils/uriParser');
97
+
98
+ vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
99
+ vi.mocked(isLocalUrl).mockReturnValue(false);
100
+ vi.mocked(imageUrlToBase64).mockResolvedValue({
101
+ base64: 'mock-base64',
102
+ mimeType: 'image/jpeg',
103
+ });
70
104
  });
71
105
 
72
106
  // mock auth
@@ -142,6 +176,164 @@ describe('ChatService', () => {
142
176
  );
143
177
  });
144
178
 
179
+ describe('extendParams functionality', () => {
180
+ it('should add reasoning parameters when model supports enableReasoning and user enables it', async () => {
181
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
182
+ const messages = [{ content: 'Test reasoning', role: 'user' }] as ChatMessage[];
183
+
184
+ // Mock aiModelSelectors for extend params support
185
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
186
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['enableReasoning']);
187
+
188
+ // Mock agent chat config with reasoning enabled
189
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
190
+ enableReasoning: true,
191
+ reasoningBudgetToken: 2048,
192
+ searchMode: 'off',
193
+ } as any);
194
+
195
+ await chatService.createAssistantMessage({
196
+ messages,
197
+ model: 'deepseek-reasoner',
198
+ provider: 'deepseek',
199
+ plugins: [],
200
+ });
201
+
202
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
203
+ expect.objectContaining({
204
+ thinking: {
205
+ budget_tokens: 2048,
206
+ type: 'enabled',
207
+ },
208
+ }),
209
+ undefined,
210
+ );
211
+ });
212
+
213
+ it('should disable reasoning when model supports enableReasoning but user disables it', async () => {
214
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
215
+ const messages = [{ content: 'Test no reasoning', role: 'user' }] as ChatMessage[];
216
+
217
+ // Mock aiModelSelectors for extend params support
218
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
219
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['enableReasoning']);
220
+
221
+ // Mock agent chat config with reasoning disabled
222
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
223
+ enableReasoning: false,
224
+ searchMode: 'off',
225
+ } as any);
226
+
227
+ await chatService.createAssistantMessage({
228
+ messages,
229
+ model: 'deepseek-reasoner',
230
+ provider: 'deepseek',
231
+ plugins: [],
232
+ });
233
+
234
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
235
+ expect.objectContaining({
236
+ thinking: {
237
+ budget_tokens: 0,
238
+ type: 'disabled',
239
+ },
240
+ }),
241
+ undefined,
242
+ );
243
+ });
244
+
245
+ it('should use default budget when reasoningBudgetToken is not set', async () => {
246
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
247
+ const messages = [{ content: 'Test default budget', role: 'user' }] as ChatMessage[];
248
+
249
+ // Mock aiModelSelectors for extend params support
250
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
251
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['enableReasoning']);
252
+
253
+ // Mock agent chat config with reasoning enabled but no custom budget
254
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
255
+ enableReasoning: true,
256
+ // reasoningBudgetToken is undefined
257
+ searchMode: 'off',
258
+ } as any);
259
+
260
+ await chatService.createAssistantMessage({
261
+ messages,
262
+ model: 'deepseek-reasoner',
263
+ provider: 'deepseek',
264
+ plugins: [],
265
+ });
266
+
267
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
268
+ expect.objectContaining({
269
+ thinking: {
270
+ budget_tokens: 1024, // default value
271
+ type: 'enabled',
272
+ },
273
+ }),
274
+ undefined,
275
+ );
276
+ });
277
+
278
+ it('should set reasoning_effort when model supports reasoningEffort and user configures it', async () => {
279
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
280
+ const messages = [{ content: 'Test reasoning effort', role: 'user' }] as ChatMessage[];
281
+
282
+ // Mock aiModelSelectors for extend params support
283
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
284
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['reasoningEffort']);
285
+
286
+ // Mock agent chat config with reasoning effort set
287
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
288
+ reasoningEffort: 'high',
289
+ searchMode: 'off',
290
+ } as any);
291
+
292
+ await chatService.createAssistantMessage({
293
+ messages,
294
+ model: 'test-model',
295
+ provider: 'test-provider',
296
+ plugins: [],
297
+ });
298
+
299
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
300
+ expect.objectContaining({
301
+ reasoning_effort: 'high',
302
+ }),
303
+ undefined,
304
+ );
305
+ });
306
+
307
+ it('should set thinkingBudget when model supports thinkingBudget and user configures it', async () => {
308
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
309
+ const messages = [{ content: 'Test thinking budget', role: 'user' }] as ChatMessage[];
310
+
311
+ // Mock aiModelSelectors for extend params support
312
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
313
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['thinkingBudget']);
314
+
315
+ // Mock agent chat config with thinking budget set
316
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
317
+ thinkingBudget: 5000,
318
+ searchMode: 'off',
319
+ } as any);
320
+
321
+ await chatService.createAssistantMessage({
322
+ messages,
323
+ model: 'test-model',
324
+ provider: 'test-provider',
325
+ plugins: [],
326
+ });
327
+
328
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
329
+ expect.objectContaining({
330
+ thinkingBudget: 5000,
331
+ }),
332
+ undefined,
333
+ );
334
+ });
335
+ });
336
+
145
337
  describe('should handle content correctly for vision models', () => {
146
338
  it('should include image content when with vision model', async () => {
147
339
  const messages = [
@@ -209,6 +401,263 @@ describe('ChatService', () => {
209
401
  });
210
402
  });
211
403
 
404
+ describe('local image URL conversion', () => {
405
+ it('should convert local image URLs to base64 and call processImageList', async () => {
406
+ const { isLocalUrl } = await import('@/utils/url');
407
+ const { imageUrlToBase64 } = await import('@/utils/imageToBase64');
408
+ const { parseDataUri } = await import('@/libs/model-runtime/utils/uriParser');
409
+
410
+ // Mock for local URL
411
+ vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
412
+ vi.mocked(isLocalUrl).mockReturnValue(true); // This is a local URL
413
+ vi.mocked(imageUrlToBase64).mockResolvedValue({
414
+ base64: 'converted-base64-content',
415
+ mimeType: 'image/png',
416
+ });
417
+
418
+ const messages = [
419
+ {
420
+ content: 'Hello',
421
+ role: 'user',
422
+ imageList: [
423
+ {
424
+ id: 'file1',
425
+ url: 'http://127.0.0.1:3000/uploads/image.png', // Real local URL
426
+ alt: 'local-image.png',
427
+ },
428
+ ],
429
+ createdAt: Date.now(),
430
+ id: 'test-id',
431
+ meta: {},
432
+ updatedAt: Date.now(),
433
+ },
434
+ ] as ChatMessage[];
435
+
436
+ // Spy on processImageList method
437
+ const processImageListSpy = vi.spyOn(chatService as any, 'processImageList');
438
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
439
+
440
+ await chatService.createAssistantMessage({
441
+ messages,
442
+ plugins: [],
443
+ model: 'gpt-4-vision-preview',
444
+ });
445
+
446
+ // Verify processImageList was called with correct arguments
447
+ expect(processImageListSpy).toHaveBeenCalledWith({
448
+ imageList: [
449
+ {
450
+ id: 'file1',
451
+ url: 'http://127.0.0.1:3000/uploads/image.png',
452
+ alt: 'local-image.png',
453
+ },
454
+ ],
455
+ model: 'gpt-4-vision-preview',
456
+ provider: undefined,
457
+ });
458
+
459
+ // Verify the utility functions were called
460
+ expect(parseDataUri).toHaveBeenCalledWith('http://127.0.0.1:3000/uploads/image.png');
461
+ expect(isLocalUrl).toHaveBeenCalledWith('http://127.0.0.1:3000/uploads/image.png');
462
+ expect(imageUrlToBase64).toHaveBeenCalledWith('http://127.0.0.1:3000/uploads/image.png');
463
+
464
+ // Verify the final result contains base64 converted URL
465
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
466
+ {
467
+ messages: [
468
+ {
469
+ content: [
470
+ {
471
+ text: 'Hello',
472
+ type: 'text',
473
+ },
474
+ {
475
+ image_url: {
476
+ detail: 'auto',
477
+ url: 'data:image/png;base64,converted-base64-content',
478
+ },
479
+ type: 'image_url',
480
+ },
481
+ ],
482
+ role: 'user',
483
+ },
484
+ ],
485
+ model: 'gpt-4-vision-preview',
486
+ },
487
+ undefined,
488
+ );
489
+ });
490
+
491
+ it('should not convert remote URLs to base64 and call processImageList', async () => {
492
+ const { isLocalUrl } = await import('@/utils/url');
493
+ const { imageUrlToBase64 } = await import('@/utils/imageToBase64');
494
+ const { parseDataUri } = await import('@/libs/model-runtime/utils/uriParser');
495
+
496
+ // Mock for remote URL
497
+ vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
498
+ vi.mocked(isLocalUrl).mockReturnValue(false); // This is NOT a local URL
499
+ vi.mocked(imageUrlToBase64).mockClear(); // Clear to ensure it's not called
500
+
501
+ const messages = [
502
+ {
503
+ content: 'Hello',
504
+ role: 'user',
505
+ imageList: [
506
+ {
507
+ id: 'file1',
508
+ url: 'https://example.com/remote-image.jpg', // Remote URL
509
+ alt: 'remote-image.jpg',
510
+ },
511
+ ],
512
+ createdAt: Date.now(),
513
+ id: 'test-id-2',
514
+ meta: {},
515
+ updatedAt: Date.now(),
516
+ },
517
+ ] as ChatMessage[];
518
+
519
+ // Spy on processImageList method
520
+ const processImageListSpy = vi.spyOn(chatService as any, 'processImageList');
521
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
522
+
523
+ await chatService.createAssistantMessage({
524
+ messages,
525
+ plugins: [],
526
+ model: 'gpt-4-vision-preview',
527
+ });
528
+
529
+ // Verify processImageList was called
530
+ expect(processImageListSpy).toHaveBeenCalledWith({
531
+ imageList: [
532
+ {
533
+ id: 'file1',
534
+ url: 'https://example.com/remote-image.jpg',
535
+ alt: 'remote-image.jpg',
536
+ },
537
+ ],
538
+ model: 'gpt-4-vision-preview',
539
+ provider: undefined,
540
+ });
541
+
542
+ // Verify the utility functions were called
543
+ expect(parseDataUri).toHaveBeenCalledWith('https://example.com/remote-image.jpg');
544
+ expect(isLocalUrl).toHaveBeenCalledWith('https://example.com/remote-image.jpg');
545
+ expect(imageUrlToBase64).not.toHaveBeenCalled(); // Should NOT be called for remote URLs
546
+
547
+ // Verify the final result preserves original URL
548
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
549
+ {
550
+ messages: [
551
+ {
552
+ content: [
553
+ {
554
+ text: 'Hello',
555
+ type: 'text',
556
+ },
557
+ {
558
+ image_url: { detail: 'auto', url: 'https://example.com/remote-image.jpg' },
559
+ type: 'image_url',
560
+ },
561
+ ],
562
+ role: 'user',
563
+ },
564
+ ],
565
+ model: 'gpt-4-vision-preview',
566
+ },
567
+ undefined,
568
+ );
569
+ });
570
+
571
+ it('should handle mixed local and remote URLs correctly', async () => {
572
+ const { isLocalUrl } = await import('@/utils/url');
573
+ const { imageUrlToBase64 } = await import('@/utils/imageToBase64');
574
+ const { parseDataUri } = await import('@/libs/model-runtime/utils/uriParser');
575
+
576
+ // Mock parseDataUri to always return url type
577
+ vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
578
+
579
+ // Mock isLocalUrl to return true only for 127.0.0.1 URLs
580
+ vi.mocked(isLocalUrl).mockImplementation((url: string) => {
581
+ return new URL(url).hostname === '127.0.0.1';
582
+ });
583
+
584
+ // Mock imageUrlToBase64 for conversion
585
+ vi.mocked(imageUrlToBase64).mockResolvedValue({
586
+ base64: 'local-file-base64',
587
+ mimeType: 'image/jpeg',
588
+ });
589
+
590
+ const messages = [
591
+ {
592
+ content: 'Multiple images',
593
+ role: 'user',
594
+ imageList: [
595
+ {
596
+ id: 'local1',
597
+ url: 'http://127.0.0.1:3000/local1.jpg', // Local URL
598
+ alt: 'local1.jpg',
599
+ },
600
+ {
601
+ id: 'remote1',
602
+ url: 'https://example.com/remote1.png', // Remote URL
603
+ alt: 'remote1.png',
604
+ },
605
+ {
606
+ id: 'local2',
607
+ url: 'http://127.0.0.1:8080/local2.gif', // Another local URL
608
+ alt: 'local2.gif',
609
+ },
610
+ ],
611
+ createdAt: Date.now(),
612
+ id: 'test-id-3',
613
+ meta: {},
614
+ updatedAt: Date.now(),
615
+ },
616
+ ] as ChatMessage[];
617
+
618
+ const processImageListSpy = vi.spyOn(chatService as any, 'processImageList');
619
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
620
+
621
+ await chatService.createAssistantMessage({
622
+ messages,
623
+ plugins: [],
624
+ model: 'gpt-4-vision-preview',
625
+ });
626
+
627
+ // Verify processImageList was called
628
+ expect(processImageListSpy).toHaveBeenCalledWith({
629
+ imageList: [
630
+ { id: 'local1', url: 'http://127.0.0.1:3000/local1.jpg', alt: 'local1.jpg' },
631
+ { id: 'remote1', url: 'https://example.com/remote1.png', alt: 'remote1.png' },
632
+ { id: 'local2', url: 'http://127.0.0.1:8080/local2.gif', alt: 'local2.gif' },
633
+ ],
634
+ model: 'gpt-4-vision-preview',
635
+ provider: undefined,
636
+ });
637
+
638
+ // Verify isLocalUrl was called for each image
639
+ expect(isLocalUrl).toHaveBeenCalledWith('http://127.0.0.1:3000/local1.jpg');
640
+ expect(isLocalUrl).toHaveBeenCalledWith('https://example.com/remote1.png');
641
+ expect(isLocalUrl).toHaveBeenCalledWith('http://127.0.0.1:8080/local2.gif');
642
+
643
+ // Verify imageUrlToBase64 was called only for local URLs
644
+ expect(imageUrlToBase64).toHaveBeenCalledWith('http://127.0.0.1:3000/local1.jpg');
645
+ expect(imageUrlToBase64).toHaveBeenCalledWith('http://127.0.0.1:8080/local2.gif');
646
+ expect(imageUrlToBase64).toHaveBeenCalledTimes(2); // Only for local URLs
647
+
648
+ // Verify the final result has correct URLs
649
+ const callArgs = getChatCompletionSpy.mock.calls[0][0];
650
+ const imageContent = (callArgs.messages?.[0].content as any[])?.filter(
651
+ (c) => c.type === 'image_url',
652
+ );
653
+
654
+ expect(imageContent).toHaveLength(3);
655
+ expect(imageContent[0].image_url.url).toBe('data:image/jpeg;base64,local-file-base64'); // Local converted
656
+ expect(imageContent[1].image_url.url).toBe('https://example.com/remote1.png'); // Remote preserved
657
+ expect(imageContent[2].image_url.url).toBe('data:image/jpeg;base64,local-file-base64'); // Local converted
658
+ });
659
+ });
660
+
212
661
  describe('with tools messages', () => {
213
662
  it('should inject a tool system role for models with tools', async () => {
214
663
  const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
@@ -607,6 +1056,15 @@ describe('ChatService', () => {
607
1056
  });
608
1057
 
609
1058
  describe('getChatCompletion', () => {
1059
+ let mockFetchSSE: any;
1060
+
1061
+ beforeEach(async () => {
1062
+ // Setup common fetchSSE mock for getChatCompletion tests
1063
+ const { fetchSSE } = await import('@/utils/fetch');
1064
+ mockFetchSSE = vi.fn().mockResolvedValue(new Response('mock response'));
1065
+ vi.mocked(fetchSSE).mockImplementation(mockFetchSSE);
1066
+ });
1067
+
610
1068
  it('should make a POST request with the correct payload', async () => {
611
1069
  const params: Partial<ChatStreamPayload> = {
612
1070
  model: 'test-model',
@@ -622,12 +1080,16 @@ describe('ChatService', () => {
622
1080
 
623
1081
  await chatService.getChatCompletion(params, options);
624
1082
 
625
- expect(global.fetch).toHaveBeenCalledWith(expect.any(String), {
626
- body: JSON.stringify(expectedPayload),
627
- headers: expect.any(Object),
628
- method: 'POST',
629
- });
1083
+ expect(mockFetchSSE).toHaveBeenCalledWith(
1084
+ expect.any(String),
1085
+ expect.objectContaining({
1086
+ body: JSON.stringify(expectedPayload),
1087
+ headers: expect.any(Object),
1088
+ method: 'POST',
1089
+ }),
1090
+ );
630
1091
  });
1092
+
631
1093
  it('should make a POST request without response in non-openai provider payload', async () => {
632
1094
  const params: Partial<ChatStreamPayload> = {
633
1095
  model: 'deepseek-reasoner',
@@ -647,52 +1109,52 @@ describe('ChatService', () => {
647
1109
 
648
1110
  await chatService.getChatCompletion(params, options);
649
1111
 
650
- expect(global.fetch).toHaveBeenCalledWith(expect.any(String), {
651
- body: JSON.stringify(expectedPayload),
652
- headers: expect.any(Object),
653
- method: 'POST',
654
- });
1112
+ expect(mockFetchSSE).toHaveBeenCalledWith(
1113
+ expect.any(String),
1114
+ expect.objectContaining({
1115
+ body: JSON.stringify(expectedPayload),
1116
+ headers: expect.any(Object),
1117
+ method: 'POST',
1118
+ }),
1119
+ );
655
1120
  });
656
1121
 
657
- it('should throw InvalidAccessCode error when enableFetchOnClient is true and auth is enabled but user is not signed in', async () => {
658
- // Mock userStore
659
- const mockUserStore = {
660
- enableAuth: () => true,
661
- isSignedIn: false,
662
- };
663
-
664
- // Mock modelConfigSelectors
665
- const mockModelConfigSelectors = {
666
- isProviderFetchOnClient: () => () => true,
667
- };
1122
+ it('should return InvalidAccessCode error when enableFetchOnClient is true and auth is enabled but user is not signed in', async () => {
1123
+ // Mock fetchSSE to call onErrorHandle with the error
1124
+ const { fetchSSE } = await import('@/utils/fetch');
1125
+
1126
+ const mockFetchSSEWithError = vi.fn().mockImplementation((url, options) => {
1127
+ // Simulate the error being caught and passed to onErrorHandle
1128
+ if (options.onErrorHandle) {
1129
+ const error = {
1130
+ errorType: ChatErrorType.InvalidAccessCode,
1131
+ error: new Error('InvalidAccessCode'),
1132
+ };
1133
+ options.onErrorHandle(error, { errorType: ChatErrorType.InvalidAccessCode });
1134
+ }
1135
+ return Promise.resolve(new Response(''));
1136
+ });
668
1137
 
669
- vi.spyOn(useUserStore, 'getState').mockImplementationOnce(() => mockUserStore as any);
670
- vi.spyOn(modelConfigSelectors, 'isProviderFetchOnClient').mockImplementationOnce(
671
- mockModelConfigSelectors.isProviderFetchOnClient,
672
- );
1138
+ vi.mocked(fetchSSE).mockImplementation(mockFetchSSEWithError);
673
1139
 
674
1140
  const params: Partial<ChatStreamPayload> = {
675
1141
  model: 'test-model',
676
1142
  messages: [],
1143
+ provider: 'openai',
677
1144
  };
678
- const options = {};
679
- const expectedPayload = {
680
- model: DEFAULT_AGENT_CONFIG.model,
681
- stream: true,
682
- ...DEFAULT_AGENT_CONFIG.params,
683
- ...params,
684
- };
685
-
686
- const result = await chatService.getChatCompletion(params, options);
687
1145
 
688
- expect(global.fetch).toHaveBeenCalledWith(expect.any(String), {
689
- body: JSON.stringify(expectedPayload),
690
- headers: expect.objectContaining({
691
- 'Content-Type': 'application/json',
692
- }),
693
- method: 'POST',
1146
+ let errorHandled = false;
1147
+ const onErrorHandle = vi.fn((error: any) => {
1148
+ errorHandled = true;
1149
+ expect(error.errorType).toBe(ChatErrorType.InvalidAccessCode);
694
1150
  });
695
- expect(result.status).toBe(401);
1151
+
1152
+ // Call getChatCompletion with onErrorHandle to catch the error
1153
+ await chatService.getChatCompletion(params, { onErrorHandle });
1154
+
1155
+ // Verify that the error was handled
1156
+ expect(errorHandled).toBe(true);
1157
+ expect(onErrorHandle).toHaveBeenCalled();
696
1158
  });
697
1159
 
698
1160
  // Add more test cases to cover different scenarios and edge cases
@@ -717,10 +1179,29 @@ describe('ChatService', () => {
717
1179
 
718
1180
  describe('fetchPresetTaskResult', () => {
719
1181
  it('should handle successful chat completion response', async () => {
720
- // 模拟 fetch 抛出错误的情况
721
- vi.mocked(fetch).mockResolvedValueOnce(new Response('AI response'));
1182
+ // Mock getChatCompletion to simulate successful completion
1183
+ const getChatCompletionSpy = vi
1184
+ .spyOn(chatService, 'getChatCompletion')
1185
+ .mockImplementation(async (params, options) => {
1186
+ // Simulate successful response
1187
+ if (options?.onFinish) {
1188
+ options.onFinish('AI response', {
1189
+ type: 'done',
1190
+ observationId: null,
1191
+ toolCalls: undefined,
1192
+ traceId: null,
1193
+ });
1194
+ }
1195
+ if (options?.onMessageHandle) {
1196
+ options.onMessageHandle({ type: 'text', text: 'AI response' });
1197
+ }
1198
+ return Promise.resolve(new Response(''));
1199
+ });
1200
+
722
1201
  const params = {
723
- /* 填充参数 */
1202
+ messages: [{ content: 'Hello', role: 'user' as const }],
1203
+ model: 'gpt-4',
1204
+ provider: 'openai',
724
1205
  };
725
1206
 
726
1207
  const onMessageHandle = vi.fn();
@@ -748,25 +1229,31 @@ describe('ChatService', () => {
748
1229
  });
749
1230
  expect(onError).not.toHaveBeenCalled();
750
1231
  expect(onMessageHandle).toHaveBeenCalled();
751
- expect(onLoadingChange).toHaveBeenCalledWith(false); // 确认加载状态已经被设置为 false
1232
+ expect(onLoadingChange).toHaveBeenCalledWith(false); // Confirm loading state is set to false
752
1233
  expect(onLoadingChange).toHaveBeenCalledTimes(2);
753
1234
  });
754
1235
 
755
1236
  it('should handle error in chat completion', async () => {
756
- // 模拟 fetch 抛出错误的情况
757
- vi.mocked(fetch).mockResolvedValueOnce(
758
- new Response(null, { status: 404, statusText: 'Not Found' }),
759
- );
1237
+ // Mock getChatCompletion to simulate error
1238
+ const getChatCompletionSpy = vi
1239
+ .spyOn(chatService, 'getChatCompletion')
1240
+ .mockImplementation(async (params, options) => {
1241
+ // Simulate error response
1242
+ if (options?.onErrorHandle) {
1243
+ options.onErrorHandle({ message: 'translated_response.404', type: 404 });
1244
+ }
1245
+ return Promise.resolve(new Response(''));
1246
+ });
760
1247
 
761
1248
  const params = {
762
- /* 填充参数 */
1249
+ messages: [{ content: 'Hello', role: 'user' as const }],
1250
+ model: 'gpt-4',
1251
+ provider: 'openai',
763
1252
  };
764
1253
  const onError = vi.fn();
765
1254
  const onLoadingChange = vi.fn();
766
1255
  const abortController = new AbortController();
767
- const trace = {
768
- /* 填充跟踪信息 */
769
- };
1256
+ const trace = {};
770
1257
 
771
1258
  await chatService.fetchPresetTaskResult({
772
1259
  params,
@@ -780,7 +1267,7 @@ describe('ChatService', () => {
780
1267
  message: 'translated_response.404',
781
1268
  type: 404,
782
1269
  });
783
- expect(onLoadingChange).toHaveBeenCalledWith(false); // 确认加载状态已经被设置为 false
1270
+ expect(onLoadingChange).toHaveBeenCalledWith(false); // Confirm loading state is set to false
784
1271
  });
785
1272
  });
786
1273
 
@@ -910,6 +1397,18 @@ describe('ChatService', () => {
910
1397
  // 需要在修改模拟后重新导入相关模块
911
1398
  const { chatService } = await import('../chat');
912
1399
 
1400
+ // Mock processImageList to return expected image content
1401
+ const processImageListSpy = vi.spyOn(chatService as any, 'processImageList');
1402
+ processImageListSpy.mockImplementation(async () => {
1403
+ // Mock the expected return value for an image
1404
+ return [
1405
+ {
1406
+ image_url: { detail: 'auto', url: 'http://example.com/xxx0asd-dsd.png' },
1407
+ type: 'image_url',
1408
+ },
1409
+ ];
1410
+ });
1411
+
913
1412
  const messages = [
914
1413
  {
915
1414
  content: 'Hello',
@@ -941,7 +1440,7 @@ describe('ChatService', () => {
941
1440
  { content: 'Hey', role: 'assistant' }, // Regular user message
942
1441
  ] as ChatMessage[];
943
1442
 
944
- const output = chatService['processMessages']({
1443
+ const output = await chatService['processMessages']({
945
1444
  messages,
946
1445
  model: 'gpt-4o',
947
1446
  provider: 'openai',
@@ -1062,7 +1561,7 @@ describe('ChatService', () => {
1062
1561
  });
1063
1562
  });
1064
1563
 
1065
- it('should handle empty tool calls messages correctly', () => {
1564
+ it('should handle empty tool calls messages correctly', async () => {
1066
1565
  const messages = [
1067
1566
  {
1068
1567
  content: '## Tools\n\nYou can use these tools',
@@ -1075,7 +1574,7 @@ describe('ChatService', () => {
1075
1574
  },
1076
1575
  ] as ChatMessage[];
1077
1576
 
1078
- const result = chatService['processMessages']({
1577
+ const result = await chatService['processMessages']({
1079
1578
  messages,
1080
1579
  model: 'gpt-4',
1081
1580
  provider: 'openai',
@@ -1093,7 +1592,7 @@ describe('ChatService', () => {
1093
1592
  ]);
1094
1593
  });
1095
1594
 
1096
- it('should handle assistant messages with reasoning correctly', () => {
1595
+ it('should handle assistant messages with reasoning correctly', async () => {
1097
1596
  const messages = [
1098
1597
  {
1099
1598
  role: 'assistant',
@@ -1105,7 +1604,7 @@ describe('ChatService', () => {
1105
1604
  },
1106
1605
  ] as ChatMessage[];
1107
1606
 
1108
- const result = chatService['processMessages']({
1607
+ const result = await chatService['processMessages']({
1109
1608
  messages,
1110
1609
  model: 'gpt-4',
1111
1610
  provider: 'openai',
@@ -1128,6 +1627,70 @@ describe('ChatService', () => {
1128
1627
  },
1129
1628
  ]);
1130
1629
  });
1630
+
1631
+ it('should inject INBOX_GUIDE_SYSTEMROLE for welcome questions in inbox session', async () => {
1632
+ // Don't mock INBOX_GUIDE_SYSTEMROLE, use the real one
1633
+ const messages: ChatMessage[] = [
1634
+ {
1635
+ role: 'user',
1636
+ content: 'Hello, this is my first question',
1637
+ createdAt: Date.now(),
1638
+ id: 'test-welcome',
1639
+ meta: {},
1640
+ updatedAt: Date.now(),
1641
+ },
1642
+ ];
1643
+
1644
+ const result = await chatService['processMessages'](
1645
+ {
1646
+ messages,
1647
+ model: 'gpt-4',
1648
+ provider: 'openai',
1649
+ },
1650
+ {
1651
+ isWelcomeQuestion: true,
1652
+ trace: { sessionId: 'inbox' },
1653
+ },
1654
+ );
1655
+
1656
+ // Should have system message with inbox guide content
1657
+ const systemMessage = result.find((msg) => msg.role === 'system');
1658
+ expect(systemMessage).toBeDefined();
1659
+ // Check for characteristic content of the actual INBOX_GUIDE_SYSTEMROLE
1660
+ expect(systemMessage!.content).toContain('LobeChat Support Assistant');
1661
+ expect(systemMessage!.content).toContain('LobeHub');
1662
+ });
1663
+
1664
+ it('should inject historySummary into system message when provided', async () => {
1665
+ const historySummary = 'Previous conversation summary: User discussed AI topics.';
1666
+
1667
+ const messages: ChatMessage[] = [
1668
+ {
1669
+ role: 'user',
1670
+ content: 'Continue our discussion',
1671
+ createdAt: Date.now(),
1672
+ id: 'test-history',
1673
+ meta: {},
1674
+ updatedAt: Date.now(),
1675
+ },
1676
+ ];
1677
+
1678
+ const result = await chatService['processMessages'](
1679
+ {
1680
+ messages,
1681
+ model: 'gpt-4',
1682
+ provider: 'openai',
1683
+ },
1684
+ {
1685
+ historySummary,
1686
+ },
1687
+ );
1688
+
1689
+ // Should have system message with history summary
1690
+ const systemMessage = result.find((msg) => msg.role === 'system');
1691
+ expect(systemMessage).toBeDefined();
1692
+ expect(systemMessage!.content).toContain(historySummary);
1693
+ });
1131
1694
  });
1132
1695
  });
1133
1696
 
@@ -1139,6 +1702,379 @@ vi.mock('../_auth', async (importOriginal) => {
1139
1702
  return importOriginal();
1140
1703
  });
1141
1704
 
1705
+ describe('ChatService private methods', () => {
1706
+ describe('processImageList', () => {
1707
+ beforeEach(() => {
1708
+ vi.resetModules();
1709
+ });
1710
+
1711
+ it('should return empty array if model cannot use vision (non-deprecated)', async () => {
1712
+ vi.doMock('@/const/version', () => ({
1713
+ isServerMode: false,
1714
+ isDeprecatedEdition: false,
1715
+ isDesktop: false,
1716
+ }));
1717
+ const { aiModelSelectors } = await import('@/store/aiInfra');
1718
+ vi.spyOn(aiModelSelectors, 'isModelSupportVision').mockReturnValue(() => false);
1719
+
1720
+ const { chatService } = await import('../chat');
1721
+ const result = await chatService['processImageList']({
1722
+ imageList: [{ url: 'image_url', alt: '', id: 'test' } as ChatImageItem],
1723
+ model: 'any-model',
1724
+ provider: 'any-provider',
1725
+ });
1726
+ expect(result).toEqual([]);
1727
+ });
1728
+
1729
+ it('should process images if model can use vision (non-deprecated)', async () => {
1730
+ vi.doMock('@/const/version', () => ({
1731
+ isServerMode: false,
1732
+ isDeprecatedEdition: false,
1733
+ isDesktop: false,
1734
+ }));
1735
+ const { aiModelSelectors } = await import('@/store/aiInfra');
1736
+ vi.spyOn(aiModelSelectors, 'isModelSupportVision').mockReturnValue(() => true);
1737
+
1738
+ const { chatService } = await import('../chat');
1739
+ const result = await chatService['processImageList']({
1740
+ imageList: [{ url: 'image_url', alt: '', id: 'test' } as ChatImageItem],
1741
+ model: 'any-model',
1742
+ provider: 'any-provider',
1743
+ });
1744
+ expect(result.length).toBe(1);
1745
+ expect(result[0].type).toBe('image_url');
1746
+ });
1747
+
1748
+ it('should return empty array when vision disabled in deprecated edition', async () => {
1749
+ vi.doMock('@/const/version', () => ({
1750
+ isServerMode: false,
1751
+ isDeprecatedEdition: true,
1752
+ isDesktop: false,
1753
+ }));
1754
+
1755
+ const { modelProviderSelectors } = await import('@/store/user/selectors');
1756
+ const spy = vi
1757
+ .spyOn(modelProviderSelectors, 'isModelEnabledVision')
1758
+ .mockReturnValue(() => false);
1759
+
1760
+ const { chatService } = await import('../chat');
1761
+ const result = await chatService['processImageList']({
1762
+ imageList: [{ url: 'image_url', alt: '', id: 'test' } as ChatImageItem],
1763
+ model: 'any-model',
1764
+ provider: 'any-provider',
1765
+ });
1766
+
1767
+ expect(spy).toHaveBeenCalled();
1768
+ expect(result).toEqual([]);
1769
+ });
1770
+
1771
+ it('should process images when vision enabled in deprecated edition', async () => {
1772
+ vi.doMock('@/const/version', () => ({
1773
+ isServerMode: false,
1774
+ isDeprecatedEdition: true,
1775
+ isDesktop: false,
1776
+ }));
1777
+
1778
+ const { modelProviderSelectors } = await import('@/store/user/selectors');
1779
+ const spy = vi
1780
+ .spyOn(modelProviderSelectors, 'isModelEnabledVision')
1781
+ .mockReturnValue(() => true);
1782
+
1783
+ const { chatService } = await import('../chat');
1784
+ const result = await chatService['processImageList']({
1785
+ imageList: [{ url: 'image_url' } as ChatImageItem],
1786
+ model: 'any-model',
1787
+ provider: 'any-provider',
1788
+ });
1789
+
1790
+ expect(spy).toHaveBeenCalled();
1791
+ expect(result.length).toBe(1);
1792
+ expect(result[0].type).toBe('image_url');
1793
+ });
1794
+ });
1795
+
1796
+ describe('processMessages', () => {
1797
+ describe('getAssistantContent', () => {
1798
+ it('should handle assistant message with imageList and content', async () => {
1799
+ const messages: ChatMessage[] = [
1800
+ {
1801
+ role: 'assistant',
1802
+ content: 'Here is an image.',
1803
+ imageList: [{ id: 'img1', url: 'http://example.com/image.png', alt: 'test.png' }],
1804
+ createdAt: Date.now(),
1805
+ id: 'test-id',
1806
+ meta: {},
1807
+ updatedAt: Date.now(),
1808
+ },
1809
+ ];
1810
+ const result = await chatService['processMessages']({
1811
+ messages,
1812
+ model: 'gpt-4-vision-preview',
1813
+ provider: 'openai',
1814
+ });
1815
+
1816
+ expect(result[0].content).toEqual([
1817
+ { text: 'Here is an image.', type: 'text' },
1818
+ { image_url: { detail: 'auto', url: 'http://example.com/image.png' }, type: 'image_url' },
1819
+ ]);
1820
+ });
1821
+
1822
+ it('should handle assistant message with imageList but no content', async () => {
1823
+ const messages: ChatMessage[] = [
1824
+ {
1825
+ role: 'assistant',
1826
+ content: '',
1827
+ imageList: [{ id: 'img1', url: 'http://example.com/image.png', alt: 'test.png' }],
1828
+ createdAt: Date.now(),
1829
+ id: 'test-id-2',
1830
+ meta: {},
1831
+ updatedAt: Date.now(),
1832
+ },
1833
+ ];
1834
+ const result = await chatService['processMessages']({
1835
+ messages,
1836
+ model: 'gpt-4-vision-preview',
1837
+ provider: 'openai',
1838
+ });
1839
+
1840
+ expect(result[0].content).toEqual([
1841
+ { image_url: { detail: 'auto', url: 'http://example.com/image.png' }, type: 'image_url' },
1842
+ ]);
1843
+ });
1844
+ });
1845
+
1846
+ it('should not include tool_calls for assistant message if model does not support tools', async () => {
1847
+ // Mock isCanUseFC to return false
1848
+ vi.spyOn(
1849
+ (await import('@/store/aiInfra')).aiModelSelectors,
1850
+ 'isModelSupportToolUse',
1851
+ ).mockReturnValue(() => false);
1852
+
1853
+ const messages: ChatMessage[] = [
1854
+ {
1855
+ role: 'assistant',
1856
+ content: 'I have a tool call.',
1857
+ tools: [
1858
+ {
1859
+ id: 'tool_123',
1860
+ type: 'default',
1861
+ apiName: 'testApi',
1862
+ arguments: '{}',
1863
+ identifier: 'test-plugin',
1864
+ },
1865
+ ],
1866
+ createdAt: Date.now(),
1867
+ id: 'test-id-3',
1868
+ meta: {},
1869
+ updatedAt: Date.now(),
1870
+ },
1871
+ ];
1872
+
1873
+ const result = await chatService['processMessages']({
1874
+ messages,
1875
+ model: 'some-model-without-fc',
1876
+ provider: 'openai',
1877
+ });
1878
+
1879
+ expect(result[0].tool_calls).toBeUndefined();
1880
+ expect(result[0].content).toBe('I have a tool call.');
1881
+ });
1882
+ });
1883
+
1884
+ describe('reorderToolMessages', () => {
1885
+ it('should correctly reorder when a tool message appears before the assistant message', () => {
1886
+ const input: OpenAIChatMessage[] = [
1887
+ {
1888
+ role: 'system',
1889
+ content: 'System message',
1890
+ },
1891
+ {
1892
+ role: 'tool',
1893
+ tool_call_id: 'tool_call_1',
1894
+ name: 'test-plugin____testApi',
1895
+ content: 'Tool result',
1896
+ },
1897
+ {
1898
+ role: 'assistant',
1899
+ content: '',
1900
+ tool_calls: [
1901
+ { id: 'tool_call_1', type: 'function', function: { name: 'testApi', arguments: '{}' } },
1902
+ ],
1903
+ },
1904
+ ];
1905
+
1906
+ const output = chatService['reorderToolMessages'](input);
1907
+
1908
+ // Verify reordering logic works and covers line 688 hasPushed check
1909
+ // In this test, tool messages are duplicated but the second occurrence is skipped
1910
+ expect(output.length).toBe(4); // Original has 3, assistant will add corresponding tool message again
1911
+ expect(output[0].role).toBe('system');
1912
+ expect(output[1].role).toBe('tool');
1913
+ expect(output[2].role).toBe('assistant');
1914
+ expect(output[3].role).toBe('tool'); // Tool message added by assistant's tool_calls
1915
+ });
1916
+ });
1917
+
1918
+ describe('getChatCompletion', () => {
1919
+ it('should merge responseAnimation styles correctly', async () => {
1920
+ const { fetchSSE } = await import('@/utils/fetch');
1921
+ vi.mock('@/utils/fetch', async (importOriginal) => {
1922
+ const module = await importOriginal();
1923
+ return {
1924
+ ...(module as any),
1925
+ fetchSSE: vi.fn(),
1926
+ };
1927
+ });
1928
+
1929
+ // Mock provider config
1930
+ const { aiProviderSelectors } = await import('@/store/aiInfra');
1931
+ vi.spyOn(aiProviderSelectors, 'providerConfigById').mockReturnValue({
1932
+ id: 'openai',
1933
+ settings: {
1934
+ responseAnimation: 'slow',
1935
+ },
1936
+ } as any);
1937
+
1938
+ // Mock user preference
1939
+ const { userGeneralSettingsSelectors } = await import('@/store/user/selectors');
1940
+ vi.spyOn(userGeneralSettingsSelectors, 'transitionMode').mockReturnValue('smooth');
1941
+
1942
+ await chatService.getChatCompletion(
1943
+ { provider: 'openai', messages: [] },
1944
+ { responseAnimation: { speed: 20 } },
1945
+ );
1946
+
1947
+ expect(fetchSSE).toHaveBeenCalled();
1948
+ const fetchSSEOptions = (fetchSSE as any).mock.calls[0][1];
1949
+
1950
+ expect(fetchSSEOptions.responseAnimation).toEqual({
1951
+ speed: 20,
1952
+ text: 'fadeIn',
1953
+ toolsCalling: 'fadeIn',
1954
+ });
1955
+ });
1956
+ });
1957
+
1958
+ describe('extendParams', () => {
1959
+ it('should set enabledContextCaching to false when model supports disableContextCaching and user enables it', async () => {
1960
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
1961
+ const messages = [{ content: 'Test context caching', role: 'user' }] as ChatMessage[];
1962
+
1963
+ // Mock aiModelSelectors for extend params support
1964
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
1965
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
1966
+ 'disableContextCaching',
1967
+ ]);
1968
+
1969
+ // Mock agent chat config with context caching disabled
1970
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
1971
+ disableContextCaching: true,
1972
+ searchMode: 'off',
1973
+ } as any);
1974
+
1975
+ await chatService.createAssistantMessage({
1976
+ messages,
1977
+ model: 'test-model',
1978
+ provider: 'test-provider',
1979
+ plugins: [],
1980
+ });
1981
+
1982
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
1983
+ expect.objectContaining({
1984
+ enabledContextCaching: false,
1985
+ }),
1986
+ undefined,
1987
+ );
1988
+ });
1989
+
1990
+ it('should not set enabledContextCaching when disableContextCaching is false', async () => {
1991
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
1992
+ const messages = [{ content: 'Test context caching enabled', role: 'user' }] as ChatMessage[];
1993
+
1994
+ // Mock aiModelSelectors for extend params support
1995
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
1996
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
1997
+ 'disableContextCaching',
1998
+ ]);
1999
+
2000
+ // Mock agent chat config with context caching enabled (default)
2001
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
2002
+ disableContextCaching: false,
2003
+ searchMode: 'off',
2004
+ } as any);
2005
+
2006
+ await chatService.createAssistantMessage({
2007
+ messages,
2008
+ model: 'test-model',
2009
+ provider: 'test-provider',
2010
+ plugins: [],
2011
+ });
2012
+
2013
+ // enabledContextCaching should not be present in the call
2014
+ const callArgs = getChatCompletionSpy.mock.calls[0][0];
2015
+ expect(callArgs).not.toHaveProperty('enabledContextCaching');
2016
+ });
2017
+
2018
+ it('should set reasoning_effort when model supports reasoningEffort and user configures it', async () => {
2019
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
2020
+ const messages = [{ content: 'Test reasoning effort', role: 'user' }] as ChatMessage[];
2021
+
2022
+ // Mock aiModelSelectors for extend params support
2023
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
2024
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['reasoningEffort']);
2025
+
2026
+ // Mock agent chat config with reasoning effort set
2027
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
2028
+ reasoningEffort: 'high',
2029
+ searchMode: 'off',
2030
+ } as any);
2031
+
2032
+ await chatService.createAssistantMessage({
2033
+ messages,
2034
+ model: 'test-model',
2035
+ provider: 'test-provider',
2036
+ plugins: [],
2037
+ });
2038
+
2039
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
2040
+ expect.objectContaining({
2041
+ reasoning_effort: 'high',
2042
+ }),
2043
+ undefined,
2044
+ );
2045
+ });
2046
+
2047
+ it('should set thinkingBudget when model supports thinkingBudget and user configures it', async () => {
2048
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
2049
+ const messages = [{ content: 'Test thinking budget', role: 'user' }] as ChatMessage[];
2050
+
2051
+ // Mock aiModelSelectors for extend params support
2052
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(() => true);
2053
+ vi.spyOn(aiModelSelectors, 'modelExtendParams').mockReturnValue(() => ['thinkingBudget']);
2054
+
2055
+ // Mock agent chat config with thinking budget set
2056
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValue({
2057
+ thinkingBudget: 5000,
2058
+ searchMode: 'off',
2059
+ } as any);
2060
+
2061
+ await chatService.createAssistantMessage({
2062
+ messages,
2063
+ model: 'test-model',
2064
+ provider: 'test-provider',
2065
+ plugins: [],
2066
+ });
2067
+
2068
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
2069
+ expect.objectContaining({
2070
+ thinkingBudget: 5000,
2071
+ }),
2072
+ undefined,
2073
+ );
2074
+ });
2075
+ });
2076
+ });
2077
+
1142
2078
  describe('AgentRuntimeOnClient', () => {
1143
2079
  describe('initializeWithClientStore', () => {
1144
2080
  describe('should initialize with options correctly', () => {