@lobehub/chat 1.69.5 → 1.70.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/self-hosting/advanced/auth/clerk.zh-CN.mdx +1 -1
  4. package/docs/self-hosting/server-database/vercel.zh-CN.mdx +1 -1
  5. package/locales/ar/chat.json +7 -1
  6. package/locales/ar/components.json +2 -0
  7. package/locales/ar/models.json +3 -0
  8. package/locales/bg-BG/chat.json +7 -1
  9. package/locales/bg-BG/components.json +2 -0
  10. package/locales/bg-BG/models.json +3 -0
  11. package/locales/de-DE/chat.json +7 -1
  12. package/locales/de-DE/components.json +2 -0
  13. package/locales/de-DE/models.json +3 -0
  14. package/locales/en-US/chat.json +7 -1
  15. package/locales/en-US/components.json +2 -0
  16. package/locales/en-US/models.json +3 -0
  17. package/locales/es-ES/chat.json +7 -1
  18. package/locales/es-ES/components.json +2 -0
  19. package/locales/es-ES/models.json +3 -0
  20. package/locales/fa-IR/chat.json +7 -1
  21. package/locales/fa-IR/components.json +2 -0
  22. package/locales/fa-IR/models.json +3 -0
  23. package/locales/fr-FR/chat.json +7 -1
  24. package/locales/fr-FR/components.json +2 -0
  25. package/locales/fr-FR/models.json +3 -0
  26. package/locales/it-IT/chat.json +7 -1
  27. package/locales/it-IT/components.json +2 -0
  28. package/locales/it-IT/models.json +3 -0
  29. package/locales/ja-JP/chat.json +7 -1
  30. package/locales/ja-JP/components.json +2 -0
  31. package/locales/ja-JP/models.json +3 -0
  32. package/locales/ko-KR/chat.json +7 -1
  33. package/locales/ko-KR/components.json +2 -0
  34. package/locales/ko-KR/models.json +3 -0
  35. package/locales/nl-NL/chat.json +7 -1
  36. package/locales/nl-NL/components.json +2 -0
  37. package/locales/nl-NL/models.json +3 -0
  38. package/locales/pl-PL/chat.json +7 -1
  39. package/locales/pl-PL/components.json +2 -0
  40. package/locales/pl-PL/models.json +3 -0
  41. package/locales/pt-BR/chat.json +7 -1
  42. package/locales/pt-BR/components.json +2 -0
  43. package/locales/pt-BR/models.json +3 -0
  44. package/locales/ru-RU/chat.json +7 -1
  45. package/locales/ru-RU/components.json +2 -0
  46. package/locales/ru-RU/models.json +3 -0
  47. package/locales/tr-TR/chat.json +7 -1
  48. package/locales/tr-TR/components.json +2 -0
  49. package/locales/tr-TR/models.json +3 -0
  50. package/locales/vi-VN/chat.json +7 -1
  51. package/locales/vi-VN/components.json +2 -0
  52. package/locales/vi-VN/models.json +3 -0
  53. package/locales/zh-CN/chat.json +7 -1
  54. package/locales/zh-CN/components.json +3 -1
  55. package/locales/zh-CN/models.json +3 -0
  56. package/locales/zh-TW/chat.json +7 -1
  57. package/locales/zh-TW/components.json +2 -0
  58. package/locales/zh-TW/models.json +3 -0
  59. package/package.json +3 -3
  60. package/packages/web-crawler/package.json +1 -1
  61. package/packages/web-crawler/src/crawImpl/__tests__/browserless.test.ts +1 -1
  62. package/packages/web-crawler/src/crawImpl/browserless.ts +1 -1
  63. package/packages/web-crawler/src/crawImpl/naive.ts +1 -1
  64. package/packages/web-crawler/src/crawler.ts +11 -1
  65. package/packages/web-crawler/src/utils/__snapshots__/htmlToMarkdown.test.ts.snap +2 -382
  66. package/packages/web-crawler/src/utils/htmlToMarkdown.ts +32 -2
  67. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +4 -1
  68. package/src/app/(backend)/webapi/chat/[provider]/route.ts +5 -1
  69. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/InboxWelcome/AgentsSuggest.tsx +5 -1
  70. package/src/{features/Conversation/Messages/Assistant/Tool/Inspector/Loader.tsx → components/CircleLoader/index.tsx} +4 -3
  71. package/src/config/modelProviders/openai.ts +3 -0
  72. package/src/config/tools.ts +2 -0
  73. package/src/const/settings/agent.ts +6 -0
  74. package/src/database/client/db.ts +3 -3
  75. package/src/features/ChatInput/ActionBar/Search/FCSearchModel.tsx +56 -0
  76. package/src/features/ChatInput/ActionBar/Search/FunctionCallingModelSelect/index.tsx +85 -0
  77. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +9 -23
  78. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +15 -23
  79. package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +26 -0
  80. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +4 -4
  81. package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +15 -10
  82. package/src/features/Conversation/Messages/Assistant/IntentUnderstanding.tsx +25 -0
  83. package/src/features/Conversation/Messages/Assistant/Tool/Inspector/ToolTitle.tsx +1 -2
  84. package/src/features/Conversation/Messages/Assistant/index.tsx +18 -9
  85. package/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +51 -52
  86. package/src/features/ModelSwitchPanel/index.tsx +34 -8
  87. package/src/hooks/useAgentEnableSearch.ts +1 -9
  88. package/src/libs/agent-runtime/anthropic/index.ts +5 -2
  89. package/src/locales/default/chat.ts +7 -3
  90. package/src/locales/default/components.ts +3 -1
  91. package/src/server/routers/tools/search.ts +8 -1
  92. package/src/services/__tests__/chat.test.ts +124 -0
  93. package/src/services/chat.ts +82 -49
  94. package/src/services/session/type.ts +1 -1
  95. package/src/store/agent/slices/chat/selectors/__snapshots__/agent.test.ts.snap +4 -0
  96. package/src/store/agent/slices/chat/selectors/chatConfig.ts +5 -1
  97. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +12 -4
  98. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +113 -13
  99. package/src/store/chat/slices/aiChat/initialState.ts +2 -0
  100. package/src/store/chat/slices/aiChat/selectors.ts +8 -1
  101. package/src/store/chat/slices/message/action.ts +9 -1
  102. package/src/store/chat/slices/plugin/action.ts +6 -4
  103. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +4 -0
  104. package/src/types/agent/chatConfig.ts +6 -0
  105. package/src/types/openai/chat.ts +0 -1
  106. package/src/utils/fetch/__tests__/fetchSSE.test.ts +3 -2
  107. package/src/utils/fetch/fetchSSE.ts +1 -1
@@ -9,19 +9,11 @@ export const useAgentEnableSearch = () => {
9
9
  agentChatConfigSelectors.agentSearchMode(s),
10
10
  ]);
11
11
 
12
- const isModelSupportToolUse = useAiInfraStore(
13
- aiModelSelectors.isModelSupportToolUse(model, provider),
14
- );
15
12
  const searchImpl = useAiInfraStore(aiModelSelectors.modelBuiltinSearchImpl(model, provider));
16
13
 
17
14
  // 只要是内置的搜索实现,一定可以联网搜索
18
15
  if (searchImpl === 'internal') return true;
19
16
 
20
17
  // 如果是关闭状态,一定不能联网搜索
21
- if (agentSearchMode === 'off') return false;
22
-
23
- // 如果是智能模式,根据是否支持 Tool Calling 判断
24
- if (agentSearchMode === 'auto') {
25
- return isModelSupportToolUse;
26
- }
18
+ return agentSearchMode !== 'off';
27
19
  };
@@ -102,7 +102,8 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
102
102
 
103
103
  if (!!thinking) {
104
104
  const maxTokens =
105
- max_tokens ?? (thinking?.budget_tokens ? thinking?.budget_tokens + 4096 : 4096);
105
+ // claude 3.7 thinking has max output of 64000 tokens
106
+ max_tokens ?? (thinking?.budget_tokens ? thinking?.budget_tokens + 64_000 : 8192);
106
107
 
107
108
  // `temperature` may only be set to 1 when thinking is enabled.
108
109
  // `top_p` must be unset when thinking is enabled.
@@ -117,7 +118,9 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
117
118
  }
118
119
 
119
120
  return {
120
- max_tokens: max_tokens ?? 4096,
121
+ // claude 3 series model hax max output token of 4096, 3.x series has 8192
122
+ // https://docs.anthropic.com/en/docs/about-claude/models/all-models#:~:text=200K-,Max%20output,-Normal%3A
123
+ max_tokens: max_tokens ?? (model.startsWith('claude-3-') ? 4096 : 8192),
121
124
  messages: postMessages,
122
125
  model,
123
126
  system: systemPrompts,
@@ -65,6 +65,9 @@ export default {
65
65
  stop: '停止',
66
66
  warp: '换行',
67
67
  },
68
+ intentUnderstanding: {
69
+ title: '正在分析并理解意图您的意图...',
70
+ },
68
71
  knowledgeBase: {
69
72
  all: '所有内容',
70
73
  allFiles: '所有文件',
@@ -142,13 +145,11 @@ export default {
142
145
  searchQueries: '搜索关键词',
143
146
  title: '已搜索到 {{count}} 个结果',
144
147
  },
145
-
146
148
  mode: {
147
149
  auto: {
148
150
  desc: '根据对话内容智能判断是否需要搜索',
149
151
  title: '智能联网',
150
152
  },
151
- disable: '当前模型不支持函数调用,因此无法使用智能联网功能',
152
153
  off: {
153
154
  desc: '仅使用模型的基础知识,不进行网络搜索',
154
155
  title: '关闭联网',
@@ -159,7 +160,10 @@ export default {
159
160
  },
160
161
  useModelBuiltin: '使用模型内置搜索引擎',
161
162
  },
162
-
163
+ searchModel: {
164
+ desc: '当前模型不支持函数调用,因此需要搭配支持函数调用的模型才能联网搜索',
165
+ title: '搜索辅助模型',
166
+ },
163
167
  title: '联网搜索',
164
168
  },
165
169
  searchAgentPlaceholder: '搜索助手...',
@@ -87,7 +87,9 @@ export default {
87
87
  },
88
88
  ModelSwitchPanel: {
89
89
  emptyModel: '没有启用的模型,请前往设置开启',
90
- provider: '提供商',
90
+ emptyProvider: '没有启用的服务商,请前往设置开启',
91
+ goToSettings: '前往设置',
92
+ provider: '服务商',
91
93
  },
92
94
  OllamaSetupGuide: {
93
95
  cors: {
@@ -20,7 +20,14 @@ export const searchRouter = router({
20
20
  }),
21
21
  )
22
22
  .mutation(async ({ input }) => {
23
- const crawler = new Crawler();
23
+ const envString = toolsEnv.CRAWLER_IMPLS || '';
24
+
25
+ // 处理全角逗号和多余空格
26
+ let envValue = envString.replaceAll(',', ',').trim();
27
+
28
+ const impls = envValue.split(',').filter(Boolean);
29
+
30
+ const crawler = new Crawler({ impls });
24
31
 
25
32
  const results = await pMap(
26
33
  input.urls,
@@ -26,12 +26,16 @@ import {
26
26
  ModelProvider,
27
27
  } from '@/libs/agent-runtime';
28
28
  import { AgentRuntime } from '@/libs/agent-runtime';
29
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
30
+ import { aiModelSelectors } from '@/store/aiInfra';
29
31
  import { useToolStore } from '@/store/tool';
32
+ import { toolSelectors } from '@/store/tool/selectors';
30
33
  import { UserStore } from '@/store/user';
31
34
  import { useUserStore } from '@/store/user';
32
35
  import { modelConfigSelectors } from '@/store/user/selectors';
33
36
  import { UserSettingsState, initialSettingsState } from '@/store/user/slices/settings/initialState';
34
37
  import { DalleManifest } from '@/tools/dalle';
38
+ import { WebBrowsingManifest } from '@/tools/web-browsing';
35
39
  import { ChatMessage } from '@/types/message';
36
40
  import { ChatStreamPayload, type OpenAIChatMessage } from '@/types/openai/chat';
37
41
  import { LobeTool } from '@/types/tool';
@@ -480,6 +484,125 @@ describe('ChatService', () => {
480
484
  expect(calls![1]).toBeUndefined();
481
485
  });
482
486
  });
487
+
488
+ describe('search functionality', () => {
489
+ it('should add WebBrowsingManifest when search is enabled and not using model built-in search', async () => {
490
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
491
+
492
+ const messages = [{ content: 'Search for something', role: 'user' }] as ChatMessage[];
493
+
494
+ // Mock agent store state with search enabled
495
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValueOnce({
496
+ searchMode: 'auto', // not 'off'
497
+ useModelBuiltinSearch: false,
498
+ } as any);
499
+
500
+ // Mock AI infra store state
501
+ vi.spyOn(aiModelSelectors, 'isModelHasBuiltinSearch').mockReturnValueOnce(() => false);
502
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValueOnce(() => false);
503
+
504
+ // Mock tool selectors
505
+ vi.spyOn(toolSelectors, 'enabledSchema').mockReturnValueOnce(() => [
506
+ {
507
+ type: 'function',
508
+ function: {
509
+ name: WebBrowsingManifest.identifier + '____search',
510
+ description: 'Search the web',
511
+ },
512
+ },
513
+ ]);
514
+
515
+ await chatService.createAssistantMessage({ messages, plugins: [] });
516
+
517
+ // Verify tools were passed to getChatCompletion
518
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
519
+ expect.objectContaining({
520
+ tools: expect.arrayContaining([
521
+ expect.objectContaining({
522
+ function: expect.objectContaining({
523
+ name: expect.stringContaining(WebBrowsingManifest.identifier),
524
+ }),
525
+ }),
526
+ ]),
527
+ }),
528
+ undefined,
529
+ );
530
+ });
531
+
532
+ it('should enable built-in search when model supports it and useModelBuiltinSearch is true', async () => {
533
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
534
+
535
+ const messages = [{ content: 'Search for something', role: 'user' }] as ChatMessage[];
536
+
537
+ // Mock agent store state with search enabled and useModelBuiltinSearch enabled
538
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValueOnce({
539
+ searchMode: 'auto', // not 'off'
540
+ useModelBuiltinSearch: true,
541
+ } as any);
542
+
543
+ // Mock AI infra store state - model has built-in search
544
+ vi.spyOn(aiModelSelectors, 'isModelHasBuiltinSearch').mockReturnValueOnce(() => true);
545
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValueOnce(() => false);
546
+
547
+ // Mock tool selectors
548
+ vi.spyOn(toolSelectors, 'enabledSchema').mockReturnValueOnce(() => [
549
+ {
550
+ type: 'function',
551
+ function: {
552
+ name: WebBrowsingManifest.identifier + '____search',
553
+ description: 'Search the web',
554
+ },
555
+ },
556
+ ]);
557
+
558
+ await chatService.createAssistantMessage({ messages, plugins: [] });
559
+
560
+ // Verify enabledSearch was set to true
561
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
562
+ expect.objectContaining({
563
+ enabledSearch: true,
564
+ }),
565
+ undefined,
566
+ );
567
+ });
568
+
569
+ it('should not enable search when searchMode is off', async () => {
570
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
571
+
572
+ const messages = [{ content: 'Search for something', role: 'user' }] as ChatMessage[];
573
+
574
+ // Mock agent store state with search disabled
575
+ vi.spyOn(agentChatConfigSelectors, 'currentChatConfig').mockReturnValueOnce({
576
+ searchMode: 'off',
577
+ useModelBuiltinSearch: true,
578
+ } as any);
579
+
580
+ // Mock AI infra store state
581
+ vi.spyOn(aiModelSelectors, 'isModelHasBuiltinSearch').mockReturnValueOnce(() => true);
582
+ vi.spyOn(aiModelSelectors, 'isModelHasExtendParams').mockReturnValueOnce(() => false);
583
+
584
+ // Mock tool selectors
585
+ vi.spyOn(toolSelectors, 'enabledSchema').mockReturnValueOnce(() => [
586
+ {
587
+ type: 'function',
588
+ function: {
589
+ name: WebBrowsingManifest.identifier + '____search',
590
+ description: 'Search the web',
591
+ },
592
+ },
593
+ ]);
594
+
595
+ await chatService.createAssistantMessage({ messages, plugins: [] });
596
+
597
+ // Verify enabledSearch was not set
598
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
599
+ expect.objectContaining({
600
+ enabledSearch: undefined,
601
+ }),
602
+ undefined,
603
+ );
604
+ });
605
+ });
483
606
  });
484
607
 
485
608
  describe('getChatCompletion', () => {
@@ -754,6 +877,7 @@ describe('ChatService', () => {
754
877
  // 重新模拟模块,设置 isServerMode 为 true
755
878
  vi.doMock('@/const/version', () => ({
756
879
  isServerMode: true,
880
+ isDeprecatedEdition: false,
757
881
  }));
758
882
 
759
883
  // 需要在修改模拟后重新导入相关模块
@@ -19,12 +19,7 @@ import { filesPrompts } from '@/prompts/files';
19
19
  import { BuiltinSystemRolePrompts } from '@/prompts/systemRole';
20
20
  import { getAgentStoreState } from '@/store/agent';
21
21
  import { agentChatConfigSelectors } from '@/store/agent/selectors';
22
- import {
23
- aiModelSelectors,
24
- aiProviderSelectors,
25
- getAiInfraStoreState,
26
- useAiInfraStore,
27
- } from '@/store/aiInfra';
22
+ import { aiModelSelectors, aiProviderSelectors, getAiInfraStoreState } from '@/store/aiInfra';
28
23
  import { getSessionStoreState } from '@/store/session';
29
24
  import { sessionMetaSelectors } from '@/store/session/selectors';
30
25
  import { getToolStoreState } from '@/store/tool';
@@ -37,6 +32,7 @@ import {
37
32
  userProfileSelectors,
38
33
  } from '@/store/user/selectors';
39
34
  import { WebBrowsingManifest } from '@/tools/web-browsing';
35
+ import { WorkingModel } from '@/types/agent';
40
36
  import { ChatErrorType } from '@/types/fetch';
41
37
  import { ChatMessage, MessageToolCall } from '@/types/message';
42
38
  import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
@@ -74,9 +70,9 @@ const findDeploymentName = (model: string, provider: string) => {
74
70
  if (deploymentName) deploymentId = deploymentName;
75
71
  } else {
76
72
  // find the model by id
77
- const modelItem = useAiInfraStore
78
- .getState()
79
- .enabledAiModels?.find((i) => i.id === model && i.providerId === provider);
73
+ const modelItem = getAiInfraStoreState().enabledAiModels?.find(
74
+ (i) => i.id === model && i.providerId === provider,
75
+ );
80
76
 
81
77
  if (modelItem && modelItem.config?.deploymentName) {
82
78
  deploymentId = modelItem.config?.deploymentName;
@@ -91,7 +87,7 @@ const isEnableFetchOnClient = (provider: string) => {
91
87
  if (isDeprecatedEdition) {
92
88
  return modelConfigSelectors.isProviderFetchOnClient(provider)(useUserStore.getState());
93
89
  } else {
94
- return aiProviderSelectors.isProviderFetchOnClient(provider)(useAiInfraStore.getState());
90
+ return aiProviderSelectors.isProviderFetchOnClient(provider)(getAiInfraStoreState());
95
91
  }
96
92
  };
97
93
 
@@ -181,10 +177,10 @@ class ChatService {
181
177
  const isModelHasBuiltinSearch = aiModelSelectors.isModelHasBuiltinSearch(
182
178
  payload.model,
183
179
  payload.provider!,
184
- )(useAiInfraStore.getState());
180
+ )(getAiInfraStoreState());
185
181
 
186
- const useApplicationBuiltinSearchTool =
187
- enabledSearch && !(isModelHasBuiltinSearch && chatConfig.useModelBuiltinSearch);
182
+ const useModelSearch = isModelHasBuiltinSearch && chatConfig.useModelBuiltinSearch;
183
+ const useApplicationBuiltinSearchTool = enabledSearch && !useModelSearch;
188
184
 
189
185
  const pluginIds = [...(enabledPlugins || [])];
190
186
 
@@ -206,17 +202,10 @@ class ChatService {
206
202
 
207
203
  // ============ 2. preprocess tools ============ //
208
204
 
209
- let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
210
-
211
- // check this model can use function call
212
- const canUseFC = isCanUseFC(payload.model, payload.provider!);
213
-
214
- // the rule that model can use tools:
215
- // 1. tools is not empty
216
- // 2. model can use function call
217
- const shouldUseTools = filterTools.length > 0 && canUseFC;
218
-
219
- const tools = shouldUseTools ? filterTools : undefined;
205
+ const tools = this.prepareTools(pluginIds, {
206
+ model: payload.model,
207
+ provider: payload.provider!,
208
+ });
220
209
 
221
210
  // ============ 3. process extend params ============ //
222
211
 
@@ -225,14 +214,14 @@ class ChatService {
225
214
  const isModelHasExtendParams = aiModelSelectors.isModelHasExtendParams(
226
215
  payload.model,
227
216
  payload.provider!,
228
- )(useAiInfraStore.getState());
217
+ )(getAiInfraStoreState());
229
218
 
230
219
  // model
231
220
  if (isModelHasExtendParams) {
232
221
  const modelExtendParams = aiModelSelectors.modelExtendParams(
233
222
  payload.model,
234
223
  payload.provider!,
235
- )(useAiInfraStore.getState());
224
+ )(getAiInfraStoreState());
236
225
  // if model has extended params, then we need to check if the model can use reasoning
237
226
 
238
227
  if (modelExtendParams!.includes('enableReasoning') && chatConfig.enableReasoning) {
@@ -241,13 +230,19 @@ class ChatService {
241
230
  type: 'enabled',
242
231
  };
243
232
  }
233
+ if (
234
+ modelExtendParams!.includes('disableContextCaching') &&
235
+ chatConfig.disableContextCaching
236
+ ) {
237
+ extendParams.enabledContextCaching = false;
238
+ }
244
239
  }
245
240
 
246
241
  return this.getChatCompletion(
247
242
  {
248
243
  ...params,
249
244
  ...extendParams,
250
- enabledSearch: enabledSearch && isModelHasBuiltinSearch ? true : undefined,
245
+ enabledSearch: enabledSearch && useModelSearch ? true : undefined,
251
246
  messages: oaiMessages,
252
247
  tools,
253
248
  },
@@ -351,9 +346,8 @@ class ChatService {
351
346
 
352
347
  // TODO: remove `!isDeprecatedEdition` condition in V2.0
353
348
  if (!isDeprecatedEdition && !isBuiltin) {
354
- const providerConfig = aiProviderSelectors.providerConfigById(provider)(
355
- useAiInfraStore.getState(),
356
- );
349
+ const providerConfig =
350
+ aiProviderSelectors.providerConfigById(provider)(getAiInfraStoreState());
357
351
 
358
352
  sdkType = providerConfig?.settings.sdkType || 'openai';
359
353
  }
@@ -433,16 +427,30 @@ class ChatService {
433
427
  onLoadingChange?.(true);
434
428
 
435
429
  try {
436
- await this.getChatCompletion(params, {
437
- onErrorHandle: (error) => {
438
- errorHandle(new Error(error.message), error);
439
- },
440
- onFinish,
441
- onMessageHandle,
442
- signal: abortController?.signal,
443
- trace: this.mapTrace(trace, TraceTagMap.SystemChain),
430
+ const oaiMessages = this.processMessages({
431
+ messages: params.messages as any,
432
+ model: params.model!,
433
+ provider: params.provider!,
434
+ tools: params.plugins,
435
+ });
436
+ const tools = this.prepareTools(params.plugins || [], {
437
+ model: params.model!,
438
+ provider: params.provider!,
444
439
  });
445
440
 
441
+ await this.getChatCompletion(
442
+ { ...params, messages: oaiMessages, tools },
443
+ {
444
+ onErrorHandle: (error) => {
445
+ errorHandle(new Error(error.message), error);
446
+ },
447
+ onFinish,
448
+ onMessageHandle,
449
+ signal: abortController?.signal,
450
+ trace: this.mapTrace(trace, TraceTagMap.SystemChain),
451
+ },
452
+ );
453
+
446
454
  onLoadingChange?.(false);
447
455
  } catch (e) {
448
456
  errorHandle(e as Error);
@@ -451,7 +459,7 @@ class ChatService {
451
459
 
452
460
  private processMessages = (
453
461
  {
454
- messages,
462
+ messages = [],
455
463
  tools,
456
464
  model,
457
465
  provider,
@@ -483,6 +491,7 @@ class ChatService {
483
491
  };
484
492
 
485
493
  let postMessages = messages.map((m): OpenAIChatMessage => {
494
+ const supportTools = isCanUseFC(model, provider);
486
495
  switch (m.role) {
487
496
  case 'user': {
488
497
  return { content: getContent(m), role: m.role };
@@ -492,17 +501,23 @@ class ChatService {
492
501
  // signature is a signal of anthropic thinking mode
493
502
  const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
494
503
 
504
+ const content = shouldIncludeThinking
505
+ ? [
506
+ {
507
+ signature: m.reasoning!.signature,
508
+ thinking: m.reasoning!.content,
509
+ type: 'thinking',
510
+ } as any,
511
+ { text: m.content, type: 'text' },
512
+ ]
513
+ : m.content;
514
+
515
+ if (!supportTools) {
516
+ return { content, role: m.role };
517
+ }
518
+
495
519
  return {
496
- content: shouldIncludeThinking
497
- ? [
498
- {
499
- signature: m.reasoning!.signature,
500
- thinking: m.reasoning!.content,
501
- type: 'thinking',
502
- } as any,
503
- { text: m.content, type: 'text' },
504
- ]
505
- : m.content,
520
+ content,
506
521
  role: m.role,
507
522
  tool_calls: m.tools?.map(
508
523
  (tool): MessageToolCall => ({
@@ -518,6 +533,10 @@ class ChatService {
518
533
  }
519
534
 
520
535
  case 'tool': {
536
+ if (!supportTools) {
537
+ return { content: m.content, role: 'user' };
538
+ }
539
+
521
540
  return {
522
541
  content: m.content,
523
542
  name: genToolCallingName(m.plugin!.identifier, m.plugin!.apiName, m.plugin?.type),
@@ -669,6 +688,20 @@ class ChatService {
669
688
 
670
689
  return reorderedMessages;
671
690
  };
691
+
692
+ private prepareTools = (pluginIds: string[], { model, provider }: WorkingModel) => {
693
+ let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
694
+
695
+ // check this model can use function call
696
+ const canUseFC = isCanUseFC(model, provider!);
697
+
698
+ // the rule that model can use tools:
699
+ // 1. tools is not empty
700
+ // 2. model can use function call
701
+ const shouldUseTools = filterTools.length > 0 && canUseFC;
702
+
703
+ return shouldUseTools ? filterTools : undefined;
704
+ };
672
705
  }
673
706
 
674
707
  export const chatService = new ChatService();
@@ -53,7 +53,7 @@ export interface ISessionService {
53
53
 
54
54
  updateSessionChatConfig(
55
55
  id: string,
56
- config: DeepPartial<LobeAgentChatConfig>,
56
+ config: Partial<LobeAgentChatConfig>,
57
57
  signal?: AbortSignal,
58
58
  ): Promise<any>;
59
59
 
@@ -11,6 +11,10 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
11
11
  "enableReasoning": false,
12
12
  "historyCount": 8,
13
13
  "reasoningBudgetToken": 1024,
14
+ "searchFCModel": {
15
+ "model": "gpt-4o-mini",
16
+ "provider": "openai",
17
+ },
14
18
  "searchMode": "off",
15
19
  },
16
20
  "model": "gpt-3.5-turbo",
@@ -1,5 +1,5 @@
1
1
  import { contextCachingModels, thinkingWithToolClaudeModels } from '@/const/models';
2
- import { DEFAULT_AGENT_CHAT_CONFIG } from '@/const/settings';
2
+ import { DEFAULT_AGENT_CHAT_CONFIG, DEFAULT_AGENT_SEARCH_FC_MODEL } from '@/const/settings';
3
3
  import { AgentStoreState } from '@/store/agent/initialState';
4
4
  import { LobeAgentChatConfig } from '@/types/agent';
5
5
 
@@ -14,6 +14,9 @@ const isAgentEnableSearch = (s: AgentStoreState) => agentSearchMode(s) !== 'off'
14
14
  const useModelBuiltinSearch = (s: AgentStoreState) =>
15
15
  currentAgentChatConfig(s).useModelBuiltinSearch;
16
16
 
17
+ const searchFCModel = (s: AgentStoreState) =>
18
+ currentAgentChatConfig(s).searchFCModel || DEFAULT_AGENT_SEARCH_FC_MODEL;
19
+
17
20
  const enableHistoryCount = (s: AgentStoreState) => {
18
21
  const config = currentAgentConfig(s);
19
22
  const chatConfig = currentAgentChatConfig(s);
@@ -62,5 +65,6 @@ export const agentChatConfigSelectors = {
62
65
  enableHistoryDivider,
63
66
  historyCount,
64
67
  isAgentEnableSearch,
68
+ searchFCModel,
65
69
  useModelBuiltinSearch,
66
70
  };
@@ -765,10 +765,12 @@ describe('chatMessage actions', () => {
765
765
  (fetch as Mock).mockResolvedValueOnce(new Response(aiResponse));
766
766
 
767
767
  await act(async () => {
768
- const response = await result.current.internal_fetchAIChatMessage(
768
+ const response = await result.current.internal_fetchAIChatMessage({
769
769
  messages,
770
- assistantMessageId,
771
- );
770
+ messageId: assistantMessageId,
771
+ model: 'gpt-4o-mini',
772
+ provider: 'openai',
773
+ });
772
774
  expect(response.isFunctionCall).toEqual(false);
773
775
  });
774
776
  });
@@ -784,7 +786,13 @@ describe('chatMessage actions', () => {
784
786
 
785
787
  await act(async () => {
786
788
  expect(
787
- await result.current.internal_fetchAIChatMessage(messages, assistantMessageId),
789
+ await result.current.internal_fetchAIChatMessage({
790
+ model: 'gpt-4o-mini',
791
+ provider: 'openai',
792
+
793
+ messages,
794
+ messageId: assistantMessageId,
795
+ }),
788
796
  ).toEqual({
789
797
  isFunctionCall: false,
790
798
  });