@lobehub/chat 1.62.11 → 1.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/changelog/v1.json +12 -0
  3. package/locales/ar/chat.json +26 -0
  4. package/locales/ar/models.json +21 -0
  5. package/locales/bg-BG/chat.json +26 -0
  6. package/locales/bg-BG/models.json +21 -0
  7. package/locales/de-DE/chat.json +26 -0
  8. package/locales/de-DE/models.json +21 -0
  9. package/locales/en-US/chat.json +26 -0
  10. package/locales/en-US/models.json +21 -0
  11. package/locales/es-ES/chat.json +26 -0
  12. package/locales/es-ES/models.json +21 -0
  13. package/locales/fa-IR/chat.json +26 -0
  14. package/locales/fa-IR/models.json +21 -0
  15. package/locales/fr-FR/chat.json +26 -0
  16. package/locales/fr-FR/models.json +21 -0
  17. package/locales/it-IT/chat.json +26 -0
  18. package/locales/it-IT/models.json +21 -0
  19. package/locales/ja-JP/chat.json +26 -0
  20. package/locales/ja-JP/models.json +21 -0
  21. package/locales/ko-KR/chat.json +26 -0
  22. package/locales/ko-KR/models.json +21 -0
  23. package/locales/nl-NL/chat.json +26 -0
  24. package/locales/nl-NL/models.json +21 -0
  25. package/locales/pl-PL/chat.json +26 -0
  26. package/locales/pl-PL/models.json +21 -0
  27. package/locales/pt-BR/chat.json +26 -0
  28. package/locales/pt-BR/models.json +21 -0
  29. package/locales/ru-RU/chat.json +26 -0
  30. package/locales/ru-RU/models.json +21 -0
  31. package/locales/tr-TR/chat.json +26 -0
  32. package/locales/tr-TR/models.json +21 -0
  33. package/locales/vi-VN/chat.json +26 -0
  34. package/locales/vi-VN/models.json +21 -0
  35. package/locales/zh-CN/chat.json +27 -1
  36. package/locales/zh-CN/models.json +25 -4
  37. package/locales/zh-TW/chat.json +26 -0
  38. package/locales/zh-TW/models.json +21 -0
  39. package/package.json +1 -1
  40. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -0
  41. package/src/config/aiModels/google.ts +8 -0
  42. package/src/config/aiModels/groq.ts +111 -95
  43. package/src/config/aiModels/hunyuan.ts +36 -4
  44. package/src/config/aiModels/internlm.ts +4 -5
  45. package/src/config/aiModels/jina.ts +3 -0
  46. package/src/config/aiModels/mistral.ts +35 -21
  47. package/src/config/aiModels/novita.ts +293 -32
  48. package/src/config/aiModels/perplexity.ts +14 -2
  49. package/src/config/aiModels/qwen.ts +91 -37
  50. package/src/config/aiModels/sensenova.ts +70 -17
  51. package/src/config/aiModels/siliconcloud.ts +5 -3
  52. package/src/config/aiModels/stepfun.ts +19 -0
  53. package/src/config/aiModels/taichu.ts +4 -2
  54. package/src/config/aiModels/upstage.ts +24 -11
  55. package/src/config/modelProviders/openrouter.ts +1 -0
  56. package/src/config/modelProviders/qwen.ts +2 -1
  57. package/src/const/settings/agent.ts +1 -0
  58. package/src/database/repositories/aiInfra/index.test.ts +2 -5
  59. package/src/database/repositories/aiInfra/index.ts +6 -2
  60. package/src/database/schemas/message.ts +2 -1
  61. package/src/database/server/models/aiModel.ts +1 -1
  62. package/src/database/server/models/aiProvider.ts +6 -1
  63. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -0
  64. package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +40 -0
  65. package/src/features/ChatInput/ActionBar/Model/index.tsx +132 -0
  66. package/src/features/ChatInput/ActionBar/Params/index.tsx +2 -2
  67. package/src/features/ChatInput/ActionBar/Search/ExaIcon.tsx +15 -0
  68. package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +68 -0
  69. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +167 -0
  70. package/src/features/ChatInput/ActionBar/Search/index.tsx +76 -0
  71. package/src/features/ChatInput/ActionBar/config.ts +4 -2
  72. package/src/features/Conversation/Messages/Assistant/SearchGrounding.tsx +153 -0
  73. package/src/features/Conversation/Messages/Assistant/index.tsx +7 -1
  74. package/src/features/ModelSelect/index.tsx +1 -1
  75. package/src/features/ModelSwitchPanel/index.tsx +2 -3
  76. package/src/hooks/useEnabledChatModels.ts +1 -1
  77. package/src/libs/agent-runtime/google/index.test.ts +142 -36
  78. package/src/libs/agent-runtime/google/index.ts +26 -51
  79. package/src/libs/agent-runtime/novita/__snapshots__/index.test.ts.snap +3 -3
  80. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -3
  81. package/src/libs/agent-runtime/openrouter/index.ts +20 -20
  82. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  83. package/src/libs/agent-runtime/qwen/index.ts +38 -55
  84. package/src/libs/agent-runtime/types/chat.ts +6 -2
  85. package/src/libs/agent-runtime/utils/streams/google-ai.ts +29 -4
  86. package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
  87. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  88. package/src/locales/default/chat.ts +28 -0
  89. package/src/services/chat.ts +10 -0
  90. package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +1 -0
  91. package/src/store/agent/slices/chat/selectors.ts +6 -0
  92. package/src/store/aiInfra/slices/aiModel/selectors.ts +36 -0
  93. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -2
  94. package/src/store/aiInfra/slices/aiProvider/selectors.ts +14 -0
  95. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +15 -5
  96. package/src/store/chat/slices/message/action.ts +1 -1
  97. package/src/store/user/slices/modelList/selectors/modelProvider.ts +1 -1
  98. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +1 -0
  99. package/src/types/agent/index.ts +4 -0
  100. package/src/types/aiModel.ts +35 -8
  101. package/src/types/aiProvider.ts +7 -10
  102. package/src/types/message/base.ts +2 -5
  103. package/src/types/message/chat.ts +5 -3
  104. package/src/types/openai/chat.ts +5 -0
  105. package/src/types/search.ts +29 -0
  106. package/src/utils/fetch/fetchSSE.ts +11 -11
  107. package/src/features/ChatInput/ActionBar/ModelSwitch.tsx +0 -20
@@ -1,9 +1,9 @@
1
+ import type { ChatModelCard } from '@/types/llm';
2
+
1
3
  import { ModelProvider } from '../types';
2
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
5
  import { OpenRouterModelCard } from './type';
4
6
 
5
- import type { ChatModelCard } from '@/types/llm';
6
-
7
7
  export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
8
8
  baseURL: 'https://openrouter.ai/api/v1',
9
9
  chatCompletion: {
@@ -11,6 +11,7 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
11
11
  return {
12
12
  ...payload,
13
13
  include_reasoning: true,
14
+ model: payload.enabledSearch ? `${payload.model}:online` : payload.model,
14
15
  stream: payload.stream ?? true,
15
16
  } as any;
16
17
  },
@@ -27,10 +28,7 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
27
28
  models: async ({ client }) => {
28
29
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
29
30
 
30
- const visionKeywords = [
31
- 'qwen/qvq',
32
- 'vision'
33
- ];
31
+ const visionKeywords = ['qwen/qvq', 'vision'];
34
32
 
35
33
  const reasoningKeywords = [
36
34
  'deepseek/deepseek-r1',
@@ -41,12 +39,14 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
41
39
  'thinking',
42
40
  ];
43
41
 
44
- const modelsPage = await client.models.list() as any;
42
+ const modelsPage = (await client.models.list()) as any;
45
43
  const modelList: OpenRouterModelCard[] = modelsPage.data;
46
44
 
47
45
  return modelList
48
46
  .map((model) => {
49
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
47
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
48
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
49
+ );
50
50
 
51
51
  return {
52
52
  contextWindowTokens: model.context_length,
@@ -54,25 +54,25 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
54
54
  displayName: model.name,
55
55
  enabled: knownModel?.enabled || false,
56
56
  functionCall:
57
- model.description.includes('function calling')
58
- || model.description.includes('tools')
59
- || knownModel?.abilities?.functionCall
60
- || false,
57
+ model.description.includes('function calling') ||
58
+ model.description.includes('tools') ||
59
+ knownModel?.abilities?.functionCall ||
60
+ false,
61
61
  id: model.id,
62
62
  maxTokens:
63
63
  typeof model.top_provider.max_completion_tokens === 'number'
64
64
  ? model.top_provider.max_completion_tokens
65
65
  : undefined,
66
66
  reasoning:
67
- reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
68
- || knownModel?.abilities?.reasoning
69
- || false,
67
+ reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
68
+ knownModel?.abilities?.reasoning ||
69
+ false,
70
70
  vision:
71
- model.description.includes('vision')
72
- || model.description.includes('multimodal')
73
- || visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
74
- || knownModel?.abilities?.vision
75
- || false,
71
+ model.description.includes('vision') ||
72
+ model.description.includes('multimodal') ||
73
+ visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
74
+ knownModel?.abilities?.vision ||
75
+ false,
76
76
  };
77
77
  })
78
78
  .filter(Boolean) as ChatModelCard[];
@@ -203,8 +203,8 @@ describe('LobePerplexityAI', () => {
203
203
  expect(stream).toEqual(
204
204
  [
205
205
  'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
206
- 'event: citations',
207
- 'data: [{"title":"https://www.weather.com.cn/weather/101210101.shtml","url":"https://www.weather.com.cn/weather/101210101.shtml"},{"title":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou","url":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou"},{"title":"https://weather.cma.cn/web/weather/58457.html","url":"https://weather.cma.cn/web/weather/58457.html"},{"title":"https://tianqi.so.com/weather/101210101","url":"https://tianqi.so.com/weather/101210101"},{"title":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832","url":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832"},{"title":"https://www.hzqx.com","url":"https://www.hzqx.com"},{"title":"https://www.hzqx.com/pc/hztq/","url":"https://www.hzqx.com/pc/hztq/"}]\n',
206
+ 'event: grounding',
207
+ 'data: {"citations":[{"title":"https://www.weather.com.cn/weather/101210101.shtml","url":"https://www.weather.com.cn/weather/101210101.shtml"},{"title":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou","url":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou"},{"title":"https://weather.cma.cn/web/weather/58457.html","url":"https://weather.cma.cn/web/weather/58457.html"},{"title":"https://tianqi.so.com/weather/101210101","url":"https://tianqi.so.com/weather/101210101"},{"title":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832","url":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832"},{"title":"https://www.hzqx.com","url":"https://www.hzqx.com"},{"title":"https://www.hzqx.com/pc/hztq/","url":"https://www.hzqx.com/pc/hztq/"}]}\n',
208
208
  'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
209
209
  'event: text',
210
210
  'data: "杭州今"\n',
@@ -1,24 +1,13 @@
1
+ import type { ChatModelCard } from '@/types/llm';
2
+
1
3
  import { ModelProvider } from '../types';
2
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
-
4
5
  import { QwenAIStream } from '../utils/streams';
5
6
 
6
- import type { ChatModelCard } from '@/types/llm';
7
-
8
7
  export interface QwenModelCard {
9
8
  id: string;
10
9
  }
11
10
 
12
- /*
13
- QwenEnableSearchModelSeries: An array of Qwen model series that support the enable_search parameter.
14
- Currently, enable_search is only supported on Qwen commercial series, excluding Qwen-VL and Qwen-Long series.
15
- */
16
- export const QwenEnableSearchModelSeries = [
17
- 'qwen-max',
18
- 'qwen-plus',
19
- 'qwen-turbo',
20
- ];
21
-
22
11
  /*
23
12
  QwenLegacyModels: A set of legacy Qwen models that do not support presence_penalty.
24
13
  Currently, presence_penalty is only supported on Qwen commercial models and open-source models starting from Qwen 1.5 and later.
@@ -35,30 +24,34 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
35
24
  baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
36
25
  chatCompletion: {
37
26
  handlePayload: (payload) => {
38
- const { model, presence_penalty, temperature, top_p, ...rest } = payload;
27
+ const { model, presence_penalty, temperature, top_p, enabledSearch, ...rest } = payload;
39
28
 
40
29
  return {
41
30
  ...rest,
42
31
  frequency_penalty: undefined,
43
32
  model,
44
- presence_penalty:
45
- QwenLegacyModels.has(model)
46
- ? undefined
47
- : (presence_penalty !== undefined && presence_penalty >= -2 && presence_penalty <= 2)
48
- ? presence_penalty
49
- : undefined,
33
+ presence_penalty: QwenLegacyModels.has(model)
34
+ ? undefined
35
+ : presence_penalty !== undefined && presence_penalty >= -2 && presence_penalty <= 2
36
+ ? presence_penalty
37
+ : undefined,
50
38
  stream: !payload.tools,
51
- temperature: (temperature !== undefined && temperature >= 0 && temperature < 2) ? temperature : undefined,
52
- ...(model.startsWith('qvq') || model.startsWith('qwen-vl') ? {
53
- top_p: (top_p !== undefined && top_p > 0 && top_p <= 1) ? top_p : undefined,
54
- } : {
55
- top_p: (top_p !== undefined && top_p > 0 && top_p < 1) ? top_p : undefined,
56
- }),
57
- ...(process.env.QWEN_ENABLE_SEARCH === '1' && QwenEnableSearchModelSeries.some(prefix => model.startsWith(prefix)) && {
58
- enable_search: true,
39
+ temperature:
40
+ temperature !== undefined && temperature >= 0 && temperature < 2
41
+ ? temperature
42
+ : undefined,
43
+ ...(model.startsWith('qvq') || model.startsWith('qwen-vl')
44
+ ? {
45
+ top_p: top_p !== undefined && top_p > 0 && top_p <= 1 ? top_p : undefined,
46
+ }
47
+ : {
48
+ top_p: top_p !== undefined && top_p > 0 && top_p < 1 ? top_p : undefined,
49
+ }),
50
+ ...(enabledSearch && {
51
+ enable_search: enabledSearch,
59
52
  search_options: {
60
53
  search_strategy: process.env.QWEN_SEARCH_STRATEGY || 'standard', // standard or pro
61
- }
54
+ },
62
55
  }),
63
56
  ...(payload.tools && {
64
57
  parallel_tool_calls: true,
@@ -73,48 +66,38 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
73
66
  models: async ({ client }) => {
74
67
  const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
75
68
 
76
- const functionCallKeywords = [
77
- 'qwen-max',
78
- 'qwen-plus',
79
- 'qwen-turbo',
80
- 'qwen2.5',
81
- ];
69
+ const functionCallKeywords = ['qwen-max', 'qwen-plus', 'qwen-turbo', 'qwen2.5'];
82
70
 
83
- const visionKeywords = [
84
- 'qvq',
85
- 'vl',
86
- ];
71
+ const visionKeywords = ['qvq', 'vl'];
87
72
 
88
- const reasoningKeywords = [
89
- 'qvq',
90
- 'qwq',
91
- 'deepseek-r1'
92
- ];
73
+ const reasoningKeywords = ['qvq', 'qwq', 'deepseek-r1'];
93
74
 
94
- const modelsPage = await client.models.list() as any;
75
+ const modelsPage = (await client.models.list()) as any;
95
76
  const modelList: QwenModelCard[] = modelsPage.data;
96
77
 
97
78
  return modelList
98
79
  .map((model) => {
99
- const knownModel = LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.toLowerCase() === m.id.toLowerCase());
80
+ const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
81
+ (m) => model.id.toLowerCase() === m.id.toLowerCase(),
82
+ );
100
83
 
101
84
  return {
102
85
  contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
103
86
  displayName: knownModel?.displayName ?? undefined,
104
87
  enabled: knownModel?.enabled || false,
105
88
  functionCall:
106
- functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
107
- || knownModel?.abilities?.functionCall
108
- || false,
89
+ functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
90
+ knownModel?.abilities?.functionCall ||
91
+ false,
109
92
  id: model.id,
110
93
  reasoning:
111
- reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
112
- || knownModel?.abilities?.reasoning
113
- || false,
94
+ reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
95
+ knownModel?.abilities?.reasoning ||
96
+ false,
114
97
  vision:
115
- visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
116
- || knownModel?.abilities?.vision
117
- || false,
98
+ visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
99
+ knownModel?.abilities?.vision ||
100
+ false,
118
101
  };
119
102
  })
120
103
  .filter(Boolean) as ChatModelCard[];
@@ -38,6 +38,10 @@ export interface OpenAIChatMessage {
38
38
  * @title Chat Stream Payload
39
39
  */
40
40
  export interface ChatStreamPayload {
41
+ /**
42
+ * 是否开启搜索
43
+ */
44
+ enabledSearch?: boolean;
41
45
  /**
42
46
  * @title 控制生成文本中的惩罚系数,用于减少重复性
43
47
  * @default 0
@@ -68,11 +72,11 @@ export interface ChatStreamPayload {
68
72
  * @default 0
69
73
  */
70
74
  presence_penalty?: number;
75
+
71
76
  /**
72
77
  * @default openai
73
78
  */
74
79
  provider?: string;
75
-
76
80
  responseMode?: 'streamText' | 'json';
77
81
  /**
78
82
  * @title 是否开启流式请求
@@ -85,8 +89,8 @@ export interface ChatStreamPayload {
85
89
  */
86
90
  temperature: number;
87
91
  tool_choice?: string;
88
- tools?: ChatCompletionTool[];
89
92
 
93
+ tools?: ChatCompletionTool[];
90
94
  /**
91
95
  * @title 控制生成文本中最高概率的单个令牌
92
96
  * @default 1
@@ -1,5 +1,6 @@
1
1
  import { EnhancedGenerateContentResponse } from '@google/generative-ai';
2
2
 
3
+ import { GroundingSearch } from '@/types/search';
3
4
  import { nanoid } from '@/utils/uuid';
4
5
 
5
6
  import { ChatStreamCallbacks } from '../../types';
@@ -14,8 +15,8 @@ import {
14
15
 
15
16
  const transformGoogleGenerativeAIStream = (
16
17
  chunk: EnhancedGenerateContentResponse,
17
- stack: StreamContext,
18
- ): StreamProtocolChunk => {
18
+ context: StreamContext,
19
+ ): StreamProtocolChunk | StreamProtocolChunk[] => {
19
20
  // maybe need another structure to add support for multiple choices
20
21
  const functionCalls = chunk.functionCalls();
21
22
 
@@ -32,15 +33,39 @@ const transformGoogleGenerativeAIStream = (
32
33
  type: 'function',
33
34
  }),
34
35
  ),
35
- id: stack.id,
36
+ id: context.id,
36
37
  type: 'tool_calls',
37
38
  };
38
39
  }
39
40
  const text = chunk.text();
40
41
 
42
+ if (chunk.candidates && chunk.candidates[0].groundingMetadata) {
43
+ const { webSearchQueries, groundingSupports, groundingChunks } =
44
+ chunk.candidates[0].groundingMetadata;
45
+ console.log({ groundingChunks, groundingSupports, webSearchQueries });
46
+
47
+ return [
48
+ { data: text, id: context.id, type: 'text' },
49
+ {
50
+ data: {
51
+ citations: groundingChunks?.map((chunk) => ({
52
+ // google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
53
+ // 需要使用 title 作为替换
54
+ favicon: chunk.web?.title,
55
+ title: chunk.web?.title,
56
+ url: chunk.web?.uri,
57
+ })),
58
+ searchQueries: webSearchQueries,
59
+ } as GroundingSearch,
60
+ id: context.id,
61
+ type: 'grounding',
62
+ },
63
+ ];
64
+ }
65
+
41
66
  return {
42
67
  data: text,
43
- id: stack?.id,
68
+ id: context?.id,
44
69
  type: 'text',
45
70
  };
46
71
  };
@@ -127,7 +127,7 @@ export const transformOpenAIStream = (
127
127
  );
128
128
 
129
129
  return [
130
- { data: citations, id: chunk.id, type: 'citations' },
130
+ { data: { citations }, id: chunk.id, type: 'grounding' },
131
131
  { data: content, id: chunk.id, type: 'text' },
132
132
  ];
133
133
  }
@@ -30,7 +30,7 @@ export interface StreamProtocolChunk {
30
30
  // Model Thinking
31
31
  | 'reasoning'
32
32
  // Search or Grounding
33
- | 'citations'
33
+ | 'grounding'
34
34
  // stop signal
35
35
  | 'stop'
36
36
  // Error
@@ -32,6 +32,9 @@ export default {
32
32
  },
33
33
  duplicateTitle: '{{title}} 副本',
34
34
  emptyAgent: '暂无助手',
35
+ extendControls: {
36
+ title: '模型扩展功能',
37
+ },
35
38
  historyRange: '历史范围',
36
39
  historySummary: '历史消息总结',
37
40
  inbox: {
@@ -86,6 +89,31 @@ export default {
86
89
  },
87
90
  regenerate: '重新生成',
88
91
  roleAndArchive: '角色与记录',
92
+ search: {
93
+ grounding: {
94
+ searchQueries: '搜索关键词',
95
+ title: '已搜索到 {{count}} 个结果',
96
+ },
97
+
98
+ mode: {
99
+ auto: {
100
+ desc: '根据对话内容智能判断是否需要搜索',
101
+ title: '智能联网',
102
+ },
103
+ disable: '当前模型不支持函数调用,因此无法使用智能联网功能',
104
+ off: {
105
+ desc: '仅使用模型的基础知识,不进行网络搜索',
106
+ title: '关闭联网',
107
+ },
108
+ on: {
109
+ desc: '持续进行网络搜索,获取最新信息',
110
+ title: '始终联网',
111
+ },
112
+ useModelBuiltin: '使用模型内置搜索引擎',
113
+ },
114
+
115
+ title: '联网搜索',
116
+ },
89
117
  searchAgentPlaceholder: '搜索助手...',
90
118
  sendPlaceholder: '输入聊天内容...',
91
119
  sessionGroup: {
@@ -18,6 +18,7 @@ import {
18
18
  import { filesPrompts } from '@/prompts/files';
19
19
  import { BuiltinSystemRolePrompts } from '@/prompts/systemRole';
20
20
  import { aiModelSelectors, aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
21
+ import { getAgentChatConfig } from '@/store/chat/slices/aiChat/actions/helpers';
21
22
  import { useSessionStore } from '@/store/session';
22
23
  import { sessionMetaSelectors } from '@/store/session/selectors';
23
24
  import { useToolStore } from '@/store/tool';
@@ -224,6 +225,8 @@ class ChatService {
224
225
 
225
226
  const { provider = ModelProvider.OpenAI, ...res } = params;
226
227
 
228
+ // =================== process model =================== //
229
+ // ===================================================== //
227
230
  let model = res.model || DEFAULT_AGENT_CONFIG.model;
228
231
 
229
232
  // if the provider is Azure, get the deployment name as the request model
@@ -238,6 +241,13 @@ class ChatService {
238
241
  model = findDeploymentName(model, provider);
239
242
  }
240
243
 
244
+ // =================== process search =================== //
245
+ // ===================================================== //
246
+ const chatConfig = getAgentChatConfig();
247
+ if (chatConfig.searchMode !== 'off') {
248
+ res.enabledSearch = true;
249
+ }
250
+
241
251
  const payload = merge(
242
252
  { model: DEFAULT_AGENT_CONFIG.model, stream: true, ...DEFAULT_AGENT_CONFIG.params },
243
253
  { ...res, model },
@@ -9,6 +9,7 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
9
9
  "enableCompressHistory": true,
10
10
  "enableHistoryCount": true,
11
11
  "historyCount": 8,
12
+ "searchMode": "off",
12
13
  },
13
14
  "model": "gpt-3.5-turbo",
14
15
  "params": {
@@ -107,6 +107,8 @@ const currentEnabledKnowledge = (s: AgentStore) => {
107
107
  ] as KnowledgeItem[];
108
108
  };
109
109
 
110
+ const agentSearchMode = (s: AgentStore) => currentAgentChatConfig(s).searchMode || 'off';
111
+
110
112
  const hasSystemRole = (s: AgentStore) => {
111
113
  const config = currentAgentConfig(s);
112
114
 
@@ -140,7 +142,10 @@ const currentKnowledgeIds = (s: AgentStore) => {
140
142
 
141
143
  const isAgentConfigLoading = (s: AgentStore) => !s.agentConfigInitMap[s.activeId];
142
144
 
145
+ const isAgentEnableSearch = (s: AgentStore) => agentSearchMode(s) !== 'off';
146
+
143
147
  export const agentSelectors = {
148
+ agentSearchMode,
144
149
  currentAgentChatConfig,
145
150
  currentAgentConfig,
146
151
  currentAgentFiles,
@@ -160,5 +165,6 @@ export const agentSelectors = {
160
165
  inboxAgentConfig,
161
166
  inboxAgentModel,
162
167
  isAgentConfigLoading,
168
+ isAgentEnableSearch,
163
169
  isInboxSession,
164
170
  };
@@ -1,5 +1,6 @@
1
1
  import { AIProviderStoreState } from '@/store/aiInfra/initialState';
2
2
  import { AiModelSourceEnum } from '@/types/aiModel';
3
+ import { ModelSearchImplement } from '@/types/search';
3
4
 
4
5
  const aiProviderChatModelListIds = (s: AIProviderStoreState) =>
5
6
  s.aiProviderModelList.filter((item) => item.type === 'chat').map((item) => item.id);
@@ -69,20 +70,55 @@ const modelContextWindowTokens = (id: string, provider: string) => (s: AIProvide
69
70
  return model?.contextWindowTokens;
70
71
  };
71
72
 
73
+ const modelExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
74
+ const model = getEnabledModelById(id, provider)(s);
75
+
76
+ return model?.settings?.extendControls;
77
+ };
78
+
79
+ const isModelHasExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
80
+ const controls = modelExtendControls(id, provider)(s);
81
+
82
+ return !!controls && controls.length > 0;
83
+ };
84
+
85
+ const isModelHasBuiltinSearch = (id: string, provider: string) => (s: AIProviderStoreState) => {
86
+ const model = getEnabledModelById(id, provider)(s);
87
+
88
+ return !!model?.settings?.searchImpl;
89
+ };
90
+
91
+ const isModelHasBuiltinSearchConfig =
92
+ (id: string, provider: string) => (s: AIProviderStoreState) => {
93
+ const model = getEnabledModelById(id, provider)(s);
94
+
95
+ return (
96
+ !!model?.settings?.searchImpl &&
97
+ [ModelSearchImplement.Tool, ModelSearchImplement.Params].includes(
98
+ model?.settings?.searchImpl as ModelSearchImplement,
99
+ )
100
+ );
101
+ };
102
+
72
103
  export const aiModelSelectors = {
73
104
  aiProviderChatModelListIds,
74
105
  disabledAiProviderModelList,
75
106
  enabledAiProviderModelList,
76
107
  filteredAiProviderModelList,
77
108
  getAiModelById,
109
+ getEnabledModelById,
78
110
  hasRemoteModels,
79
111
  isEmptyAiProviderModelList,
80
112
  isModelEnabled,
113
+ isModelHasBuiltinSearch,
114
+ isModelHasBuiltinSearchConfig,
81
115
  isModelHasContextWindowToken,
116
+ isModelHasExtendControls,
82
117
  isModelLoading,
83
118
  isModelSupportReasoning,
84
119
  isModelSupportToolUse,
85
120
  isModelSupportVision,
86
121
  modelContextWindowTokens,
122
+ modelExtendControls,
87
123
  totalAiProviderModelList,
88
124
  };
@@ -1,10 +1,10 @@
1
- import { EnabledProviderWithModels } from '@/types/aiModel';
1
+ import { EnabledAiModel } from '@/types/aiModel';
2
2
  import {
3
3
  AiProviderDetailItem,
4
4
  AiProviderListItem,
5
5
  AiProviderRuntimeConfig,
6
- EnabledAiModel,
7
6
  EnabledProvider,
7
+ EnabledProviderWithModels,
8
8
  } from '@/types/aiProvider';
9
9
 
10
10
  export interface AIProviderState {
@@ -87,6 +87,18 @@ const providerKeyVaults = (provider: string | undefined) => (s: AIProviderStoreS
87
87
  return s.aiProviderRuntimeConfig?.[provider]?.keyVaults;
88
88
  };
89
89
 
90
+ const isProviderHasBuiltinSearch = (provider: string) => (s: AIProviderStoreState) => {
91
+ const config = providerConfigById(provider)(s);
92
+
93
+ return !!config?.settings.searchMode;
94
+ };
95
+
96
+ const isProviderHasBuiltinSearchConfig = (id: string) => (s: AIProviderStoreState) => {
97
+ const providerCfg = providerConfigById(id)(s);
98
+
99
+ return !!providerCfg?.settings.searchMode && providerCfg?.settings.searchMode !== 'internal';
100
+ };
101
+
90
102
  export const aiProviderSelectors = {
91
103
  activeProviderConfig,
92
104
  disabledAiProviderList,
@@ -97,6 +109,8 @@ export const aiProviderSelectors = {
97
109
  isProviderConfigUpdating,
98
110
  isProviderEnabled,
99
111
  isProviderFetchOnClient,
112
+ isProviderHasBuiltinSearch,
113
+ isProviderHasBuiltinSearchConfig,
100
114
  isProviderLoading,
101
115
  providerConfigById,
102
116
  providerKeyVaults,
@@ -455,7 +455,7 @@ export const generateAIChat: StateCreator<
455
455
  await messageService.updateMessageError(messageId, error);
456
456
  await refreshMessages();
457
457
  },
458
- onFinish: async (content, { traceId, observationId, toolCalls, reasoning, citations }) => {
458
+ onFinish: async (content, { traceId, observationId, toolCalls, reasoning, grounding }) => {
459
459
  // if there is traceId, update it
460
460
  if (traceId) {
461
461
  msgTraceId = traceId;
@@ -473,19 +473,29 @@ export const generateAIChat: StateCreator<
473
473
  await internal_updateMessageContent(messageId, content, {
474
474
  toolCalls,
475
475
  reasoning: !!reasoning ? { content: reasoning, duration } : undefined,
476
- search: !!citations ? { citations } : undefined,
476
+ search: !!grounding?.citations ? grounding : undefined,
477
477
  });
478
478
  },
479
479
  onMessageHandle: async (chunk) => {
480
480
  switch (chunk.type) {
481
- case 'citations': {
481
+ case 'grounding': {
482
482
  // if there is no citations, then stop
483
- if (!chunk.citations || chunk.citations.length <= 0) return;
483
+ if (
484
+ !chunk.grounding ||
485
+ !chunk.grounding.citations ||
486
+ chunk.grounding.citations.length <= 0
487
+ )
488
+ return;
484
489
 
485
490
  internal_dispatchMessage({
486
491
  id: messageId,
487
492
  type: 'updateMessage',
488
- value: { search: { citations: chunk.citations } },
493
+ value: {
494
+ search: {
495
+ citations: chunk.grounding.citations,
496
+ searchQueries: chunk.grounding.searchQueries,
497
+ },
498
+ },
489
499
  });
490
500
  break;
491
501
  }
@@ -16,10 +16,10 @@ import {
16
16
  ChatMessage,
17
17
  ChatMessageError,
18
18
  CreateMessageParams,
19
- GroundingSearch,
20
19
  MessageToolCall,
21
20
  ModelReasoning,
22
21
  } from '@/types/message';
22
+ import { GroundingSearch } from '@/types/search';
23
23
  import { TraceEventPayloads } from '@/types/trace';
24
24
  import { setNamespace } from '@/utils/storeDebug';
25
25
  import { nanoid } from '@/utils/uuid';
@@ -1,7 +1,7 @@
1
1
  import { uniqBy } from 'lodash-es';
2
2
 
3
3
  import { filterEnabledModels } from '@/config/modelProviders';
4
- import { EnabledProviderWithModels } from '@/types/aiModel';
4
+ import { EnabledProviderWithModels } from '@/types/aiProvider';
5
5
  import { ChatModelCard, ModelProviderCard } from '@/types/llm';
6
6
  import { ServerModelProviderConfig } from '@/types/serverConfig';
7
7
  import { GlobalLLMProviderKey } from '@/types/user/settings';
@@ -76,6 +76,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
76
76
  "enableCompressHistory": true,
77
77
  "enableHistoryCount": true,
78
78
  "historyCount": 8,
79
+ "searchMode": "off",
79
80
  },
80
81
  "model": "gpt-3.5-turbo",
81
82
  "params": {