@lobehub/chat 1.69.6 → 1.70.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/self-hosting/advanced/auth/clerk.zh-CN.mdx +1 -1
  4. package/docs/self-hosting/server-database/vercel.zh-CN.mdx +1 -1
  5. package/locales/ar/chat.json +7 -1
  6. package/locales/bg-BG/chat.json +7 -1
  7. package/locales/de-DE/chat.json +7 -1
  8. package/locales/en-US/chat.json +7 -1
  9. package/locales/es-ES/chat.json +7 -1
  10. package/locales/fa-IR/chat.json +7 -1
  11. package/locales/fr-FR/chat.json +7 -1
  12. package/locales/it-IT/chat.json +7 -1
  13. package/locales/ja-JP/chat.json +7 -1
  14. package/locales/ko-KR/chat.json +7 -1
  15. package/locales/nl-NL/chat.json +7 -1
  16. package/locales/pl-PL/chat.json +7 -1
  17. package/locales/pt-BR/chat.json +7 -1
  18. package/locales/ru-RU/chat.json +7 -1
  19. package/locales/tr-TR/chat.json +7 -1
  20. package/locales/vi-VN/chat.json +7 -1
  21. package/locales/zh-CN/chat.json +7 -1
  22. package/locales/zh-TW/chat.json +7 -1
  23. package/package.json +1 -1
  24. package/packages/web-crawler/src/crawler.ts +11 -1
  25. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +4 -1
  26. package/src/app/(backend)/webapi/chat/[provider]/route.ts +5 -1
  27. package/src/{features/Conversation/Messages/Assistant/Tool/Inspector/Loader.tsx → components/CircleLoader/index.tsx} +4 -3
  28. package/src/config/tools.ts +2 -0
  29. package/src/const/settings/agent.ts +6 -0
  30. package/src/features/ChatInput/ActionBar/Search/FCSearchModel.tsx +56 -0
  31. package/src/features/ChatInput/ActionBar/Search/FunctionCallingModelSelect/index.tsx +85 -0
  32. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +9 -23
  33. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +15 -23
  34. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +26 -0
  35. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +4 -4
  36. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +15 -10
  37. package/src/features/Conversation/Messages/Assistant/IntentUnderstanding.tsx +25 -0
  38. package/src/features/Conversation/Messages/Assistant/Tool/Inspector/ToolTitle.tsx +1 -2
  39. package/src/features/Conversation/Messages/Assistant/index.tsx +18 -9
  40. package/src/features/ModelSwitchPanel/index.tsx +1 -4
  41. package/src/hooks/useAgentEnableSearch.ts +1 -9
  42. package/src/libs/agent-runtime/anthropic/index.test.ts +315 -1
  43. package/src/libs/agent-runtime/anthropic/index.ts +7 -4
  44. package/src/locales/default/chat.ts +7 -3
  45. package/src/server/routers/tools/search.ts +8 -1
  46. package/src/services/__tests__/chat.test.ts +1 -0
  47. package/src/services/chat.ts +63 -30
  48. package/src/services/session/type.ts +1 -1
  49. package/src/store/agent/slices/chat/selectors/__snapshots__/agent.test.ts.snap +4 -0
  50. package/src/store/agent/slices/chat/selectors/chatConfig.ts +5 -1
  51. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +12 -4
  52. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +113 -13
  53. package/src/store/chat/slices/aiChat/initialState.ts +2 -0
  54. package/src/store/chat/slices/aiChat/selectors.ts +8 -1
  55. package/src/store/chat/slices/message/action.ts +9 -1
  56. package/src/store/chat/slices/plugin/action.ts +6 -4
  57. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +4 -0
  58. package/src/types/agent/chatConfig.ts +6 -0
  59. package/src/types/openai/chat.ts +0 -1
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
2
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
3
 
4
- import { ChatCompletionTool } from '@/libs/agent-runtime';
4
+ import { ChatCompletionTool, ChatStreamPayload } from '@/libs/agent-runtime';
5
5
 
6
6
  import * as anthropicHelpers from '../utils/anthropicHelpers';
7
7
  import * as debugStreamModule from '../utils/debugStream';
@@ -511,5 +511,319 @@ describe('LobeAnthropicAI', () => {
511
511
  ).resolves.toBeInstanceOf(Response);
512
512
  });
513
513
  });
514
+
515
+ describe('buildAnthropicPayload', () => {
516
+ it('should correctly build payload with user messages only', async () => {
517
+ const payload: ChatStreamPayload = {
518
+ messages: [{ content: 'Hello', role: 'user' }],
519
+ model: 'claude-3-haiku-20240307',
520
+ temperature: 0.5,
521
+ };
522
+
523
+ const result = await instance['buildAnthropicPayload'](payload);
524
+
525
+ expect(result).toEqual({
526
+ max_tokens: 4096,
527
+ messages: [
528
+ {
529
+ content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
530
+ role: 'user',
531
+ },
532
+ ],
533
+ model: 'claude-3-haiku-20240307',
534
+ temperature: 0.25,
535
+ });
536
+ });
537
+
538
+ it('should correctly build payload with system message', async () => {
539
+ const payload: ChatStreamPayload = {
540
+ messages: [
541
+ { content: 'You are a helpful assistant', role: 'system' },
542
+ { content: 'Hello', role: 'user' },
543
+ ],
544
+ model: 'claude-3-haiku-20240307',
545
+ temperature: 0.7,
546
+ };
547
+
548
+ const result = await instance['buildAnthropicPayload'](payload);
549
+
550
+ expect(result).toEqual({
551
+ max_tokens: 4096,
552
+ messages: [
553
+ {
554
+ content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
555
+ role: 'user',
556
+ },
557
+ ],
558
+ model: 'claude-3-haiku-20240307',
559
+ system: [
560
+ {
561
+ cache_control: { type: 'ephemeral' },
562
+ text: 'You are a helpful assistant',
563
+ type: 'text',
564
+ },
565
+ ],
566
+ temperature: 0.35,
567
+ });
568
+ });
569
+
570
+ it('should correctly build payload with tools', async () => {
571
+ const tools: ChatCompletionTool[] = [
572
+ { function: { name: 'tool1', description: 'desc1' }, type: 'function' },
573
+ ];
574
+
575
+ const spyOn = vi.spyOn(anthropicHelpers, 'buildAnthropicTools').mockReturnValueOnce([
576
+ {
577
+ name: 'tool1',
578
+ description: 'desc1',
579
+ },
580
+ ] as any);
581
+
582
+ const payload: ChatStreamPayload = {
583
+ messages: [{ content: 'Use a tool', role: 'user' }],
584
+ model: 'claude-3-haiku-20240307',
585
+ temperature: 0.8,
586
+ tools,
587
+ };
588
+
589
+ const result = await instance['buildAnthropicPayload'](payload);
590
+
591
+ expect(result).toEqual({
592
+ max_tokens: 4096,
593
+ messages: [
594
+ {
595
+ content: [{ cache_control: { type: 'ephemeral' }, text: 'Use a tool', type: 'text' }],
596
+ role: 'user',
597
+ },
598
+ ],
599
+ model: 'claude-3-haiku-20240307',
600
+ temperature: 0.4,
601
+ tools: [{ name: 'tool1', description: 'desc1' }],
602
+ });
603
+
604
+ expect(spyOn).toHaveBeenCalledWith(tools, {
605
+ enabledContextCaching: true,
606
+ });
607
+ });
608
+
609
+ it('should correctly build payload with thinking mode enabled', async () => {
610
+ const payload: ChatStreamPayload = {
611
+ messages: [{ content: 'Solve this problem', role: 'user' }],
612
+ model: 'claude-3-haiku-20240307',
613
+ temperature: 0.9,
614
+ thinking: { type: 'enabled', budget_tokens: 0 },
615
+ };
616
+
617
+ const result = await instance['buildAnthropicPayload'](payload);
618
+
619
+ expect(result).toEqual({
620
+ max_tokens: 64000,
621
+ messages: [
622
+ {
623
+ content: [
624
+ { cache_control: { type: 'ephemeral' }, text: 'Solve this problem', type: 'text' },
625
+ ],
626
+ role: 'user',
627
+ },
628
+ ],
629
+ model: 'claude-3-haiku-20240307',
630
+ thinking: { type: 'enabled', budget_tokens: 0 },
631
+ });
632
+ });
633
+
634
+ it('should respect max_tokens in thinking mode when provided', async () => {
635
+ const payload: ChatStreamPayload = {
636
+ max_tokens: 1000,
637
+ messages: [{ content: 'Solve this problem', role: 'user' }],
638
+ model: 'claude-3-haiku-20240307',
639
+ temperature: 0.7,
640
+ thinking: { type: 'enabled', budget_tokens: 0 },
641
+ };
642
+
643
+ const result = await instance['buildAnthropicPayload'](payload);
644
+
645
+ expect(result).toEqual({
646
+ max_tokens: 1000,
647
+ messages: [
648
+ {
649
+ content: [
650
+ { cache_control: { type: 'ephemeral' }, text: 'Solve this problem', type: 'text' },
651
+ ],
652
+ role: 'user',
653
+ },
654
+ ],
655
+ model: 'claude-3-haiku-20240307',
656
+ thinking: { type: 'enabled', budget_tokens: 0 },
657
+ });
658
+ });
659
+
660
+ it('should use budget_tokens in thinking mode when provided', async () => {
661
+ const payload: ChatStreamPayload = {
662
+ max_tokens: 1000,
663
+ messages: [{ content: 'Solve this problem', role: 'user' }],
664
+ model: 'claude-3-haiku-20240307',
665
+ temperature: 0.5,
666
+ thinking: { type: 'enabled', budget_tokens: 2000 },
667
+ };
668
+
669
+ const result = await instance['buildAnthropicPayload'](payload);
670
+
671
+ expect(result).toEqual({
672
+ max_tokens: 3000, // budget_tokens + max_tokens
673
+ messages: [
674
+ {
675
+ content: [
676
+ { cache_control: { type: 'ephemeral' }, text: 'Solve this problem', type: 'text' },
677
+ ],
678
+ role: 'user',
679
+ },
680
+ ],
681
+ model: 'claude-3-haiku-20240307',
682
+ thinking: { type: 'enabled', budget_tokens: 2000 },
683
+ });
684
+ });
685
+
686
+ it('should cap max_tokens at 64000 in thinking mode', async () => {
687
+ const payload: ChatStreamPayload = {
688
+ max_tokens: 10000,
689
+ messages: [{ content: 'Solve this problem', role: 'user' }],
690
+ model: 'claude-3-haiku-20240307',
691
+ temperature: 0.6,
692
+ thinking: { type: 'enabled', budget_tokens: 60000 },
693
+ };
694
+
695
+ const result = await instance['buildAnthropicPayload'](payload);
696
+
697
+ expect(result).toEqual({
698
+ max_tokens: 64000, // capped at 64000
699
+ messages: [
700
+ {
701
+ content: [
702
+ { cache_control: { type: 'ephemeral' }, text: 'Solve this problem', type: 'text' },
703
+ ],
704
+ role: 'user',
705
+ },
706
+ ],
707
+ model: 'claude-3-haiku-20240307',
708
+ thinking: { type: 'enabled', budget_tokens: 60000 },
709
+ });
710
+ });
711
+
712
+ it('should set correct max_tokens based on model for claude-3 models', async () => {
713
+ const payload: ChatStreamPayload = {
714
+ messages: [{ content: 'Hello', role: 'user' }],
715
+ model: 'claude-3-haiku-20240307',
716
+ temperature: 0.7,
717
+ };
718
+
719
+ const result = await instance['buildAnthropicPayload'](payload);
720
+
721
+ expect(result.max_tokens).toBe(4096);
722
+ });
723
+
724
+ it('should set correct max_tokens based on model for non claude-3 models', async () => {
725
+ const payload: ChatStreamPayload = {
726
+ messages: [{ content: 'Hello', role: 'user' }],
727
+ model: 'claude-2.1',
728
+ temperature: 0.7,
729
+ };
730
+
731
+ const result = await instance['buildAnthropicPayload'](payload);
732
+
733
+ expect(result.max_tokens).toBe(8192);
734
+ });
735
+
736
+ it('should respect max_tokens when explicitly provided', async () => {
737
+ const payload: ChatStreamPayload = {
738
+ max_tokens: 2000,
739
+ messages: [{ content: 'Hello', role: 'user' }],
740
+ model: 'claude-3-haiku-20240307',
741
+ temperature: 0.7,
742
+ };
743
+
744
+ const result = await instance['buildAnthropicPayload'](payload);
745
+
746
+ expect(result.max_tokens).toBe(2000);
747
+ });
748
+
749
+ it('should correctly handle temperature scaling', async () => {
750
+ const payload: ChatStreamPayload = {
751
+ messages: [{ content: 'Hello', role: 'user' }],
752
+ model: 'claude-3-haiku-20240307',
753
+ temperature: 1.0,
754
+ };
755
+
756
+ const result = await instance['buildAnthropicPayload'](payload);
757
+
758
+ expect(result.temperature).toBe(0.5); // Anthropic uses 0-1 scale, so divide by 2
759
+ });
760
+
761
+ it('should not include temperature when not provided in payload', async () => {
762
+ // We need to create a partial payload without temperature
763
+ // but since the type requires it, we'll use type assertion
764
+ const partialPayload = {
765
+ messages: [{ content: 'Hello', role: 'user' }],
766
+ model: 'claude-3-haiku-20240307',
767
+ } as ChatStreamPayload;
768
+
769
+ // Delete the temperature property to simulate it not being provided
770
+ delete (partialPayload as any).temperature;
771
+
772
+ const result = await instance['buildAnthropicPayload'](partialPayload);
773
+
774
+ expect(result.temperature).toBeUndefined();
775
+ });
776
+
777
+ it('should not include top_p when thinking is enabled', async () => {
778
+ const payload: ChatStreamPayload = {
779
+ messages: [{ content: 'Hello', role: 'user' }],
780
+ model: 'claude-3-haiku-20240307',
781
+ temperature: 0.7,
782
+ thinking: { type: 'enabled', budget_tokens: 0 },
783
+ top_p: 0.9,
784
+ };
785
+
786
+ const result = await instance['buildAnthropicPayload'](payload);
787
+
788
+ expect(result.top_p).toBeUndefined();
789
+ });
790
+
791
+ it('should include top_p when thinking is not enabled', async () => {
792
+ const payload: ChatStreamPayload = {
793
+ messages: [{ content: 'Hello', role: 'user' }],
794
+ model: 'claude-3-haiku-20240307',
795
+ temperature: 0.7,
796
+ top_p: 0.9,
797
+ };
798
+
799
+ const result = await instance['buildAnthropicPayload'](payload);
800
+
801
+ expect(result.top_p).toBe(0.9);
802
+ });
803
+
804
+ it('should handle thinking with type disabled', async () => {
805
+ const payload: ChatStreamPayload = {
806
+ messages: [{ content: 'Hello', role: 'user' }],
807
+ model: 'claude-3-haiku-20240307',
808
+ temperature: 0.7,
809
+ thinking: { type: 'disabled', budget_tokens: 0 },
810
+ };
811
+
812
+ const result = await instance['buildAnthropicPayload'](payload);
813
+
814
+ // When thinking is disabled, it should be treated as if thinking wasn't provided
815
+ expect(result).toEqual({
816
+ max_tokens: 4096,
817
+ messages: [
818
+ {
819
+ content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
820
+ role: 'user',
821
+ },
822
+ ],
823
+ model: 'claude-3-haiku-20240307',
824
+ temperature: 0.35,
825
+ });
826
+ });
827
+ });
514
828
  });
515
829
  });
@@ -100,10 +100,13 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
100
100
 
101
101
  const postTools = buildAnthropicTools(tools, { enabledContextCaching });
102
102
 
103
- if (!!thinking) {
104
- const maxTokens =
105
- // claude 3.7 thinking has max output of 64000 tokens
106
- max_tokens ?? (thinking?.budget_tokens ? thinking?.budget_tokens + 64_000 : 8192);
103
+ if (!!thinking && thinking.type === 'enabled') {
104
+ // claude 3.7 thinking has max output of 64000 tokens
105
+ const maxTokens = !!max_tokens
106
+ ? thinking?.budget_tokens && thinking?.budget_tokens > max_tokens
107
+ ? Math.min(thinking?.budget_tokens + max_tokens, 64_000)
108
+ : max_tokens
109
+ : 64_000;
107
110
 
108
111
  // `temperature` may only be set to 1 when thinking is enabled.
109
112
  // `top_p` must be unset when thinking is enabled.
@@ -65,6 +65,9 @@ export default {
65
65
  stop: '停止',
66
66
  warp: '换行',
67
67
  },
68
+ intentUnderstanding: {
69
+ title: '正在理解并分析您的意图...',
70
+ },
68
71
  knowledgeBase: {
69
72
  all: '所有内容',
70
73
  allFiles: '所有文件',
@@ -142,13 +145,11 @@ export default {
142
145
  searchQueries: '搜索关键词',
143
146
  title: '已搜索到 {{count}} 个结果',
144
147
  },
145
-
146
148
  mode: {
147
149
  auto: {
148
150
  desc: '根据对话内容智能判断是否需要搜索',
149
151
  title: '智能联网',
150
152
  },
151
- disable: '当前模型不支持函数调用,因此无法使用智能联网功能',
152
153
  off: {
153
154
  desc: '仅使用模型的基础知识,不进行网络搜索',
154
155
  title: '关闭联网',
@@ -159,7 +160,10 @@ export default {
159
160
  },
160
161
  useModelBuiltin: '使用模型内置搜索引擎',
161
162
  },
162
-
163
+ searchModel: {
164
+ desc: '当前模型不支持函数调用,因此需要搭配支持函数调用的模型才能联网搜索',
165
+ title: '搜索辅助模型',
166
+ },
163
167
  title: '联网搜索',
164
168
  },
165
169
  searchAgentPlaceholder: '搜索助手...',
@@ -20,7 +20,14 @@ export const searchRouter = router({
20
20
  }),
21
21
  )
22
22
  .mutation(async ({ input }) => {
23
- const crawler = new Crawler();
23
+ const envString = toolsEnv.CRAWLER_IMPLS || '';
24
+
25
+ // 处理全角逗号和多余空格
26
+ let envValue = envString.replaceAll(',', ',').trim();
27
+
28
+ const impls = envValue.split(',').filter(Boolean);
29
+
30
+ const crawler = new Crawler({ impls });
24
31
 
25
32
  const results = await pMap(
26
33
  input.urls,
@@ -877,6 +877,7 @@ describe('ChatService', () => {
877
877
  // 重新模拟模块,设置 isServerMode 为 true
878
878
  vi.doMock('@/const/version', () => ({
879
879
  isServerMode: true,
880
+ isDeprecatedEdition: false,
880
881
  }));
881
882
 
882
883
  // 需要在修改模拟后重新导入相关模块
@@ -32,6 +32,7 @@ import {
32
32
  userProfileSelectors,
33
33
  } from '@/store/user/selectors';
34
34
  import { WebBrowsingManifest } from '@/tools/web-browsing';
35
+ import { WorkingModel } from '@/types/agent';
35
36
  import { ChatErrorType } from '@/types/fetch';
36
37
  import { ChatMessage, MessageToolCall } from '@/types/message';
37
38
  import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
@@ -201,17 +202,10 @@ class ChatService {
201
202
 
202
203
  // ============ 2. preprocess tools ============ //
203
204
 
204
- let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
205
-
206
- // check this model can use function call
207
- const canUseFC = isCanUseFC(payload.model, payload.provider!);
208
-
209
- // the rule that model can use tools:
210
- // 1. tools is not empty
211
- // 2. model can use function call
212
- const shouldUseTools = filterTools.length > 0 && canUseFC;
213
-
214
- const tools = shouldUseTools ? filterTools : undefined;
205
+ const tools = this.prepareTools(pluginIds, {
206
+ model: payload.model,
207
+ provider: payload.provider!,
208
+ });
215
209
 
216
210
  // ============ 3. process extend params ============ //
217
211
 
@@ -433,16 +427,30 @@ class ChatService {
433
427
  onLoadingChange?.(true);
434
428
 
435
429
  try {
436
- await this.getChatCompletion(params, {
437
- onErrorHandle: (error) => {
438
- errorHandle(new Error(error.message), error);
439
- },
440
- onFinish,
441
- onMessageHandle,
442
- signal: abortController?.signal,
443
- trace: this.mapTrace(trace, TraceTagMap.SystemChain),
430
+ const oaiMessages = this.processMessages({
431
+ messages: params.messages as any,
432
+ model: params.model!,
433
+ provider: params.provider!,
434
+ tools: params.plugins,
435
+ });
436
+ const tools = this.prepareTools(params.plugins || [], {
437
+ model: params.model!,
438
+ provider: params.provider!,
444
439
  });
445
440
 
441
+ await this.getChatCompletion(
442
+ { ...params, messages: oaiMessages, tools },
443
+ {
444
+ onErrorHandle: (error) => {
445
+ errorHandle(new Error(error.message), error);
446
+ },
447
+ onFinish,
448
+ onMessageHandle,
449
+ signal: abortController?.signal,
450
+ trace: this.mapTrace(trace, TraceTagMap.SystemChain),
451
+ },
452
+ );
453
+
446
454
  onLoadingChange?.(false);
447
455
  } catch (e) {
448
456
  errorHandle(e as Error);
@@ -451,7 +459,7 @@ class ChatService {
451
459
 
452
460
  private processMessages = (
453
461
  {
454
- messages,
462
+ messages = [],
455
463
  tools,
456
464
  model,
457
465
  provider,
@@ -483,6 +491,7 @@ class ChatService {
483
491
  };
484
492
 
485
493
  let postMessages = messages.map((m): OpenAIChatMessage => {
494
+ const supportTools = isCanUseFC(model, provider);
486
495
  switch (m.role) {
487
496
  case 'user': {
488
497
  return { content: getContent(m), role: m.role };
@@ -492,17 +501,23 @@ class ChatService {
492
501
  // signature is a signal of anthropic thinking mode
493
502
  const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
494
503
 
504
+ const content = shouldIncludeThinking
505
+ ? [
506
+ {
507
+ signature: m.reasoning!.signature,
508
+ thinking: m.reasoning!.content,
509
+ type: 'thinking',
510
+ } as any,
511
+ { text: m.content, type: 'text' },
512
+ ]
513
+ : m.content;
514
+
515
+ if (!supportTools) {
516
+ return { content, role: m.role };
517
+ }
518
+
495
519
  return {
496
- content: shouldIncludeThinking
497
- ? [
498
- {
499
- signature: m.reasoning!.signature,
500
- thinking: m.reasoning!.content,
501
- type: 'thinking',
502
- } as any,
503
- { text: m.content, type: 'text' },
504
- ]
505
- : m.content,
520
+ content,
506
521
  role: m.role,
507
522
  tool_calls: m.tools?.map(
508
523
  (tool): MessageToolCall => ({
@@ -518,6 +533,10 @@ class ChatService {
518
533
  }
519
534
 
520
535
  case 'tool': {
536
+ if (!supportTools) {
537
+ return { content: m.content, role: 'user' };
538
+ }
539
+
521
540
  return {
522
541
  content: m.content,
523
542
  name: genToolCallingName(m.plugin!.identifier, m.plugin!.apiName, m.plugin?.type),
@@ -669,6 +688,20 @@ class ChatService {
669
688
 
670
689
  return reorderedMessages;
671
690
  };
691
+
692
+ private prepareTools = (pluginIds: string[], { model, provider }: WorkingModel) => {
693
+ let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
694
+
695
+ // check this model can use function call
696
+ const canUseFC = isCanUseFC(model, provider!);
697
+
698
+ // the rule that model can use tools:
699
+ // 1. tools is not empty
700
+ // 2. model can use function call
701
+ const shouldUseTools = filterTools.length > 0 && canUseFC;
702
+
703
+ return shouldUseTools ? filterTools : undefined;
704
+ };
672
705
  }
673
706
 
674
707
  export const chatService = new ChatService();
@@ -53,7 +53,7 @@ export interface ISessionService {
53
53
 
54
54
  updateSessionChatConfig(
55
55
  id: string,
56
- config: DeepPartial<LobeAgentChatConfig>,
56
+ config: Partial<LobeAgentChatConfig>,
57
57
  signal?: AbortSignal,
58
58
  ): Promise<any>;
59
59
 
@@ -11,6 +11,10 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
11
11
  "enableReasoning": false,
12
12
  "historyCount": 8,
13
13
  "reasoningBudgetToken": 1024,
14
+ "searchFCModel": {
15
+ "model": "gpt-4o-mini",
16
+ "provider": "openai",
17
+ },
14
18
  "searchMode": "off",
15
19
  },
16
20
  "model": "gpt-3.5-turbo",
@@ -1,5 +1,5 @@
1
1
  import { contextCachingModels, thinkingWithToolClaudeModels } from '@/const/models';
2
- import { DEFAULT_AGENT_CHAT_CONFIG } from '@/const/settings';
2
+ import { DEFAULT_AGENT_CHAT_CONFIG, DEFAULT_AGENT_SEARCH_FC_MODEL } from '@/const/settings';
3
3
  import { AgentStoreState } from '@/store/agent/initialState';
4
4
  import { LobeAgentChatConfig } from '@/types/agent';
5
5
 
@@ -14,6 +14,9 @@ const isAgentEnableSearch = (s: AgentStoreState) => agentSearchMode(s) !== 'off'
14
14
  const useModelBuiltinSearch = (s: AgentStoreState) =>
15
15
  currentAgentChatConfig(s).useModelBuiltinSearch;
16
16
 
17
+ const searchFCModel = (s: AgentStoreState) =>
18
+ currentAgentChatConfig(s).searchFCModel || DEFAULT_AGENT_SEARCH_FC_MODEL;
19
+
17
20
  const enableHistoryCount = (s: AgentStoreState) => {
18
21
  const config = currentAgentConfig(s);
19
22
  const chatConfig = currentAgentChatConfig(s);
@@ -62,5 +65,6 @@ export const agentChatConfigSelectors = {
62
65
  enableHistoryDivider,
63
66
  historyCount,
64
67
  isAgentEnableSearch,
68
+ searchFCModel,
65
69
  useModelBuiltinSearch,
66
70
  };
@@ -765,10 +765,12 @@ describe('chatMessage actions', () => {
765
765
  (fetch as Mock).mockResolvedValueOnce(new Response(aiResponse));
766
766
 
767
767
  await act(async () => {
768
- const response = await result.current.internal_fetchAIChatMessage(
768
+ const response = await result.current.internal_fetchAIChatMessage({
769
769
  messages,
770
- assistantMessageId,
771
- );
770
+ messageId: assistantMessageId,
771
+ model: 'gpt-4o-mini',
772
+ provider: 'openai',
773
+ });
772
774
  expect(response.isFunctionCall).toEqual(false);
773
775
  });
774
776
  });
@@ -784,7 +786,13 @@ describe('chatMessage actions', () => {
784
786
 
785
787
  await act(async () => {
786
788
  expect(
787
- await result.current.internal_fetchAIChatMessage(messages, assistantMessageId),
789
+ await result.current.internal_fetchAIChatMessage({
790
+ model: 'gpt-4o-mini',
791
+ provider: 'openai',
792
+
793
+ messages,
794
+ messageId: assistantMessageId,
795
+ }),
788
796
  ).toEqual({
789
797
  isFunctionCall: false,
790
798
  });