@lobehub/chat 1.62.10 → 1.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/CHANGELOG.md +66 -0
  2. package/changelog/v1.json +24 -0
  3. package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
  4. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +18 -0
  5. package/docs/self-hosting/server-database/sealos.mdx +5 -1
  6. package/locales/ar/chat.json +26 -0
  7. package/locales/ar/models.json +21 -0
  8. package/locales/bg-BG/chat.json +26 -0
  9. package/locales/bg-BG/models.json +21 -0
  10. package/locales/de-DE/chat.json +26 -0
  11. package/locales/de-DE/models.json +21 -0
  12. package/locales/en-US/chat.json +26 -0
  13. package/locales/en-US/models.json +21 -0
  14. package/locales/es-ES/chat.json +26 -0
  15. package/locales/es-ES/models.json +21 -0
  16. package/locales/fa-IR/chat.json +26 -0
  17. package/locales/fa-IR/models.json +21 -0
  18. package/locales/fr-FR/chat.json +26 -0
  19. package/locales/fr-FR/models.json +21 -0
  20. package/locales/it-IT/chat.json +26 -0
  21. package/locales/it-IT/models.json +21 -0
  22. package/locales/ja-JP/chat.json +26 -0
  23. package/locales/ja-JP/models.json +21 -0
  24. package/locales/ko-KR/chat.json +26 -0
  25. package/locales/ko-KR/models.json +21 -0
  26. package/locales/nl-NL/chat.json +26 -0
  27. package/locales/nl-NL/models.json +21 -0
  28. package/locales/pl-PL/chat.json +26 -0
  29. package/locales/pl-PL/models.json +21 -0
  30. package/locales/pt-BR/chat.json +26 -0
  31. package/locales/pt-BR/models.json +21 -0
  32. package/locales/ru-RU/chat.json +26 -0
  33. package/locales/ru-RU/models.json +21 -0
  34. package/locales/tr-TR/chat.json +26 -0
  35. package/locales/tr-TR/models.json +21 -0
  36. package/locales/vi-VN/chat.json +26 -0
  37. package/locales/vi-VN/models.json +21 -0
  38. package/locales/zh-CN/chat.json +27 -1
  39. package/locales/zh-CN/models.json +25 -4
  40. package/locales/zh-TW/chat.json +26 -0
  41. package/locales/zh-TW/models.json +21 -0
  42. package/package.json +3 -3
  43. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -0
  44. package/src/config/aiModels/google.ts +8 -0
  45. package/src/config/aiModels/groq.ts +111 -95
  46. package/src/config/aiModels/hunyuan.ts +36 -4
  47. package/src/config/aiModels/internlm.ts +4 -5
  48. package/src/config/aiModels/jina.ts +3 -0
  49. package/src/config/aiModels/mistral.ts +35 -21
  50. package/src/config/aiModels/novita.ts +293 -32
  51. package/src/config/aiModels/perplexity.ts +14 -2
  52. package/src/config/aiModels/qwen.ts +91 -37
  53. package/src/config/aiModels/sensenova.ts +70 -17
  54. package/src/config/aiModels/siliconcloud.ts +5 -3
  55. package/src/config/aiModels/stepfun.ts +19 -0
  56. package/src/config/aiModels/taichu.ts +4 -2
  57. package/src/config/aiModels/upstage.ts +24 -11
  58. package/src/config/modelProviders/openrouter.ts +1 -0
  59. package/src/config/modelProviders/qwen.ts +2 -1
  60. package/src/config/modelProviders/volcengine.ts +4 -1
  61. package/src/const/settings/agent.ts +1 -0
  62. package/src/database/repositories/aiInfra/index.test.ts +2 -5
  63. package/src/database/repositories/aiInfra/index.ts +6 -2
  64. package/src/database/schemas/message.ts +2 -1
  65. package/src/database/server/models/aiModel.ts +1 -1
  66. package/src/database/server/models/aiProvider.ts +6 -1
  67. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -0
  68. package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +40 -0
  69. package/src/features/ChatInput/ActionBar/Model/index.tsx +132 -0
  70. package/src/features/ChatInput/ActionBar/Params/index.tsx +2 -2
  71. package/src/features/ChatInput/ActionBar/Search/ExaIcon.tsx +15 -0
  72. package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +68 -0
  73. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +167 -0
  74. package/src/features/ChatInput/ActionBar/Search/index.tsx +76 -0
  75. package/src/features/ChatInput/ActionBar/config.ts +4 -2
  76. package/src/features/Conversation/Messages/Assistant/SearchGrounding.tsx +153 -0
  77. package/src/features/Conversation/Messages/Assistant/index.tsx +7 -1
  78. package/src/features/ModelSelect/index.tsx +1 -1
  79. package/src/features/ModelSwitchPanel/index.tsx +2 -3
  80. package/src/hooks/useEnabledChatModels.ts +1 -1
  81. package/src/libs/agent-runtime/azureai/index.ts +21 -2
  82. package/src/libs/agent-runtime/google/index.test.ts +142 -36
  83. package/src/libs/agent-runtime/google/index.ts +26 -51
  84. package/src/libs/agent-runtime/novita/__snapshots__/index.test.ts.snap +3 -3
  85. package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -3
  86. package/src/libs/agent-runtime/openrouter/index.ts +20 -20
  87. package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
  88. package/src/libs/agent-runtime/qwen/index.ts +38 -55
  89. package/src/libs/agent-runtime/types/chat.ts +6 -2
  90. package/src/libs/agent-runtime/utils/streams/google-ai.ts +29 -4
  91. package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
  92. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  93. package/src/locales/default/chat.ts +28 -0
  94. package/src/services/chat.ts +10 -0
  95. package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +1 -0
  96. package/src/store/agent/slices/chat/selectors.ts +6 -0
  97. package/src/store/aiInfra/slices/aiModel/selectors.ts +36 -0
  98. package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -2
  99. package/src/store/aiInfra/slices/aiProvider/selectors.ts +14 -0
  100. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +15 -5
  101. package/src/store/chat/slices/message/action.ts +1 -1
  102. package/src/store/user/slices/modelList/selectors/modelProvider.ts +1 -1
  103. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +1 -0
  104. package/src/types/agent/index.ts +4 -0
  105. package/src/types/aiModel.ts +35 -8
  106. package/src/types/aiProvider.ts +7 -10
  107. package/src/types/message/base.ts +2 -5
  108. package/src/types/message/chat.ts +5 -3
  109. package/src/types/openai/chat.ts +5 -0
  110. package/src/types/search.ts +29 -0
  111. package/src/utils/fetch/fetchSSE.ts +11 -11
  112. package/src/features/ChatInput/ActionBar/ModelSwitch.tsx +0 -20
@@ -1,5 +1,6 @@
1
1
  import { AIProviderStoreState } from '@/store/aiInfra/initialState';
2
2
  import { AiModelSourceEnum } from '@/types/aiModel';
3
+ import { ModelSearchImplement } from '@/types/search';
3
4
 
4
5
  const aiProviderChatModelListIds = (s: AIProviderStoreState) =>
5
6
  s.aiProviderModelList.filter((item) => item.type === 'chat').map((item) => item.id);
@@ -69,20 +70,55 @@ const modelContextWindowTokens = (id: string, provider: string) => (s: AIProvide
69
70
  return model?.contextWindowTokens;
70
71
  };
71
72
 
73
+ const modelExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
74
+ const model = getEnabledModelById(id, provider)(s);
75
+
76
+ return model?.settings?.extendControls;
77
+ };
78
+
79
+ const isModelHasExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
80
+ const controls = modelExtendControls(id, provider)(s);
81
+
82
+ return !!controls && controls.length > 0;
83
+ };
84
+
85
+ const isModelHasBuiltinSearch = (id: string, provider: string) => (s: AIProviderStoreState) => {
86
+ const model = getEnabledModelById(id, provider)(s);
87
+
88
+ return !!model?.settings?.searchImpl;
89
+ };
90
+
91
+ const isModelHasBuiltinSearchConfig =
92
+ (id: string, provider: string) => (s: AIProviderStoreState) => {
93
+ const model = getEnabledModelById(id, provider)(s);
94
+
95
+ return (
96
+ !!model?.settings?.searchImpl &&
97
+ [ModelSearchImplement.Tool, ModelSearchImplement.Params].includes(
98
+ model?.settings?.searchImpl as ModelSearchImplement,
99
+ )
100
+ );
101
+ };
102
+
72
103
  export const aiModelSelectors = {
73
104
  aiProviderChatModelListIds,
74
105
  disabledAiProviderModelList,
75
106
  enabledAiProviderModelList,
76
107
  filteredAiProviderModelList,
77
108
  getAiModelById,
109
+ getEnabledModelById,
78
110
  hasRemoteModels,
79
111
  isEmptyAiProviderModelList,
80
112
  isModelEnabled,
113
+ isModelHasBuiltinSearch,
114
+ isModelHasBuiltinSearchConfig,
81
115
  isModelHasContextWindowToken,
116
+ isModelHasExtendControls,
82
117
  isModelLoading,
83
118
  isModelSupportReasoning,
84
119
  isModelSupportToolUse,
85
120
  isModelSupportVision,
86
121
  modelContextWindowTokens,
122
+ modelExtendControls,
87
123
  totalAiProviderModelList,
88
124
  };
@@ -1,10 +1,10 @@
1
- import { EnabledProviderWithModels } from '@/types/aiModel';
1
+ import { EnabledAiModel } from '@/types/aiModel';
2
2
  import {
3
3
  AiProviderDetailItem,
4
4
  AiProviderListItem,
5
5
  AiProviderRuntimeConfig,
6
- EnabledAiModel,
7
6
  EnabledProvider,
7
+ EnabledProviderWithModels,
8
8
  } from '@/types/aiProvider';
9
9
 
10
10
  export interface AIProviderState {
@@ -87,6 +87,18 @@ const providerKeyVaults = (provider: string | undefined) => (s: AIProviderStoreS
87
87
  return s.aiProviderRuntimeConfig?.[provider]?.keyVaults;
88
88
  };
89
89
 
90
+ const isProviderHasBuiltinSearch = (provider: string) => (s: AIProviderStoreState) => {
91
+ const config = providerConfigById(provider)(s);
92
+
93
+ return !!config?.settings.searchMode;
94
+ };
95
+
96
+ const isProviderHasBuiltinSearchConfig = (id: string) => (s: AIProviderStoreState) => {
97
+ const providerCfg = providerConfigById(id)(s);
98
+
99
+ return !!providerCfg?.settings.searchMode && providerCfg?.settings.searchMode !== 'internal';
100
+ };
101
+
90
102
  export const aiProviderSelectors = {
91
103
  activeProviderConfig,
92
104
  disabledAiProviderList,
@@ -97,6 +109,8 @@ export const aiProviderSelectors = {
97
109
  isProviderConfigUpdating,
98
110
  isProviderEnabled,
99
111
  isProviderFetchOnClient,
112
+ isProviderHasBuiltinSearch,
113
+ isProviderHasBuiltinSearchConfig,
100
114
  isProviderLoading,
101
115
  providerConfigById,
102
116
  providerKeyVaults,
@@ -455,7 +455,7 @@ export const generateAIChat: StateCreator<
455
455
  await messageService.updateMessageError(messageId, error);
456
456
  await refreshMessages();
457
457
  },
458
- onFinish: async (content, { traceId, observationId, toolCalls, reasoning, citations }) => {
458
+ onFinish: async (content, { traceId, observationId, toolCalls, reasoning, grounding }) => {
459
459
  // if there is traceId, update it
460
460
  if (traceId) {
461
461
  msgTraceId = traceId;
@@ -473,19 +473,29 @@ export const generateAIChat: StateCreator<
473
473
  await internal_updateMessageContent(messageId, content, {
474
474
  toolCalls,
475
475
  reasoning: !!reasoning ? { content: reasoning, duration } : undefined,
476
- search: !!citations ? { citations } : undefined,
476
+ search: !!grounding?.citations ? grounding : undefined,
477
477
  });
478
478
  },
479
479
  onMessageHandle: async (chunk) => {
480
480
  switch (chunk.type) {
481
- case 'citations': {
481
+ case 'grounding': {
482
482
  // if there is no citations, then stop
483
- if (!chunk.citations || chunk.citations.length <= 0) return;
483
+ if (
484
+ !chunk.grounding ||
485
+ !chunk.grounding.citations ||
486
+ chunk.grounding.citations.length <= 0
487
+ )
488
+ return;
484
489
 
485
490
  internal_dispatchMessage({
486
491
  id: messageId,
487
492
  type: 'updateMessage',
488
- value: { search: { citations: chunk.citations } },
493
+ value: {
494
+ search: {
495
+ citations: chunk.grounding.citations,
496
+ searchQueries: chunk.grounding.searchQueries,
497
+ },
498
+ },
489
499
  });
490
500
  break;
491
501
  }
@@ -16,10 +16,10 @@ import {
16
16
  ChatMessage,
17
17
  ChatMessageError,
18
18
  CreateMessageParams,
19
- GroundingSearch,
20
19
  MessageToolCall,
21
20
  ModelReasoning,
22
21
  } from '@/types/message';
22
+ import { GroundingSearch } from '@/types/search';
23
23
  import { TraceEventPayloads } from '@/types/trace';
24
24
  import { setNamespace } from '@/utils/storeDebug';
25
25
  import { nanoid } from '@/utils/uuid';
@@ -1,7 +1,7 @@
1
1
  import { uniqBy } from 'lodash-es';
2
2
 
3
3
  import { filterEnabledModels } from '@/config/modelProviders';
4
- import { EnabledProviderWithModels } from '@/types/aiModel';
4
+ import { EnabledProviderWithModels } from '@/types/aiProvider';
5
5
  import { ChatModelCard, ModelProviderCard } from '@/types/llm';
6
6
  import { ServerModelProviderConfig } from '@/types/serverConfig';
7
7
  import { GlobalLLMProviderKey } from '@/types/user/settings';
@@ -76,6 +76,7 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
76
76
  "enableCompressHistory": true,
77
77
  "enableHistoryCount": true,
78
78
  "historyCount": 8,
79
+ "searchMode": "off",
79
80
  },
80
81
  "model": "gpt-3.5-turbo",
81
82
  "params": {
@@ -3,6 +3,7 @@ import { z } from 'zod';
3
3
  import { FileItem } from '@/types/files';
4
4
  import { KnowledgeBaseItem } from '@/types/knowledgeBase';
5
5
  import { FewShots, LLMParams } from '@/types/llm';
6
+ import { SearchMode } from '@/types/search';
6
7
 
7
8
  export type TTSServer = 'openai' | 'edge' | 'microsoft';
8
9
 
@@ -78,6 +79,8 @@ export interface LobeAgentChatConfig {
78
79
  */
79
80
  historyCount?: number;
80
81
  inputTemplate?: string;
82
+ searchMode?: SearchMode;
83
+ useModelBuiltinSearch?: boolean;
81
84
  }
82
85
 
83
86
  export const AgentChatConfigSchema = z.object({
@@ -89,6 +92,7 @@ export const AgentChatConfigSchema = z.object({
89
92
  enableMaxTokens: z.boolean().optional(),
90
93
  enableReasoningEffort: z.boolean().optional(),
91
94
  historyCount: z.number().optional(),
95
+ searchMode: z.enum(['off', 'on', 'auto']).optional(),
92
96
  });
93
97
 
94
98
  export type LobeAgentConfigKeys =
@@ -1,7 +1,5 @@
1
1
  import { z } from 'zod';
2
2
 
3
- import { AiProviderSourceType } from '@/types/aiProvider';
4
-
5
3
  export type ModelPriceCurrency = 'CNY' | 'USD';
6
4
 
7
5
  export const AiModelSourceEnum = {
@@ -133,6 +131,29 @@ export interface AiModelConfig {
133
131
  * used in azure and doubao
134
132
  */
135
133
  deploymentName?: string;
134
+
135
+ /**
136
+ * qwen series model enabled search
137
+ */
138
+ enabledSearch?: boolean;
139
+ }
140
+
141
+ export interface ExtendedControl {
142
+ key: string;
143
+ requestParams: string | string[];
144
+ type: 'params' | 'tool';
145
+ valueType: 'boolean';
146
+ }
147
+
148
+ export type ModelSearchImplementType = 'tool' | 'params' | 'internal';
149
+
150
+ export interface AiModelSettings {
151
+ extendControls?: ExtendedControl[];
152
+ /**
153
+ * 模型层实现搜索的方式
154
+ */
155
+ searchImpl?: ModelSearchImplementType;
156
+ searchProvider?: string;
136
157
  }
137
158
 
138
159
  export interface AIChatModelCard extends AIBaseModelCard {
@@ -140,6 +161,7 @@ export interface AIChatModelCard extends AIBaseModelCard {
140
161
  config?: AiModelConfig;
141
162
  maxOutput?: number;
142
163
  pricing?: ChatModelPricing;
164
+ settings?: AiModelSettings;
143
165
  type: 'chat';
144
166
  }
145
167
 
@@ -306,17 +328,22 @@ export type ToggleAiModelEnableParams = z.infer<typeof ToggleAiModelEnableSchema
306
328
 
307
329
  //
308
330
 
309
- interface AiModelForSelect {
331
+ export interface AiModelForSelect {
310
332
  abilities: ModelAbilities;
311
333
  contextWindowTokens?: number;
312
334
  displayName?: string;
313
335
  id: string;
314
336
  }
315
337
 
316
- export interface EnabledProviderWithModels {
317
- children: AiModelForSelect[];
338
+ export interface EnabledAiModel {
339
+ abilities: ModelAbilities;
340
+ config?: AiModelConfig;
341
+ contextWindowTokens?: number;
342
+ displayName?: string;
343
+ enabled?: boolean;
318
344
  id: string;
319
- logo?: string;
320
- name: string;
321
- source: AiProviderSourceType;
345
+ providerId: string;
346
+ settings?: AiModelSettings;
347
+ sort?: number;
348
+ type: AiModelType;
322
349
  }
@@ -1,6 +1,6 @@
1
1
  import { z } from 'zod';
2
2
 
3
- import { AiModelConfig, AiModelType, ModelAbilities } from '@/types/aiModel';
3
+ import { AiModelForSelect, EnabledAiModel, ModelSearchImplementType } from '@/types/aiModel';
4
4
  import { SmoothingParams } from '@/types/llm';
5
5
 
6
6
  export const AiProviderSourceEnum = {
@@ -96,6 +96,7 @@ export interface AiProviderSettings {
96
96
  * default openai
97
97
  */
98
98
  sdkType?: AiProviderSDKType;
99
+ searchMode?: ModelSearchImplementType;
99
100
  showAddNewModel?: boolean;
100
101
  /**
101
102
  * whether show api key in the provider config
@@ -199,16 +200,12 @@ export interface EnabledProvider {
199
200
  source: AiProviderSourceType;
200
201
  }
201
202
 
202
- export interface EnabledAiModel {
203
- abilities: ModelAbilities;
204
- config?: AiModelConfig;
205
- contextWindowTokens?: number;
206
- displayName?: string;
207
- enabled?: boolean;
203
+ export interface EnabledProviderWithModels {
204
+ children: AiModelForSelect[];
208
205
  id: string;
209
- providerId: string;
210
- sort?: number;
211
- type: AiModelType;
206
+ logo?: string;
207
+ name: string;
208
+ source: AiProviderSourceType;
212
209
  }
213
210
 
214
211
  export interface AiProviderRuntimeConfig {
@@ -1,3 +1,5 @@
1
+ import { GroundingSearch } from '@/types/search';
2
+
1
3
  export interface CitationItem {
2
4
  id?: string;
3
5
  onlyUrl?: boolean;
@@ -5,11 +7,6 @@ export interface CitationItem {
5
7
  url: string;
6
8
  }
7
9
 
8
- export interface GroundingSearch {
9
- citations?: CitationItem[];
10
- searchQueries?: string[];
11
- }
12
-
13
10
  export interface ModelReasoning {
14
11
  content?: string;
15
12
  duration?: number;
@@ -2,11 +2,13 @@ import { IPluginErrorType } from '@lobehub/chat-plugin-sdk';
2
2
 
3
3
  import { ILobeAgentRuntimeErrorType } from '@/libs/agent-runtime';
4
4
  import { ErrorType } from '@/types/fetch';
5
- import { GroundingSearch, MessageRoleType, ModelReasoning } from '@/types/message/base';
6
- import { ChatPluginPayload, ChatToolPayload } from '@/types/message/tools';
7
- import { Translate } from '@/types/message/translate';
8
5
  import { MetaData } from '@/types/meta';
9
6
  import { MessageSemanticSearchChunk } from '@/types/rag';
7
+ import { GroundingSearch } from '@/types/search';
8
+
9
+ import { MessageRoleType, ModelReasoning } from './base';
10
+ import { ChatPluginPayload, ChatToolPayload } from './tools';
11
+ import { Translate } from './translate';
10
12
 
11
13
  /**
12
14
  * 聊天消息错误对象
@@ -42,6 +42,10 @@ export interface OpenAIChatMessage {
42
42
  * @title Chat Stream Payload
43
43
  */
44
44
  export interface ChatStreamPayload {
45
+ /**
46
+ * 是否开启搜索
47
+ */
48
+ enabledSearch?: boolean;
45
49
  /**
46
50
  * @title 控制生成文本中的惩罚系数,用于减少重复性
47
51
  * @default 0
@@ -64,6 +68,7 @@ export interface ChatStreamPayload {
64
68
  */
65
69
  n?: number;
66
70
  /**
71
+ * @deprecated
67
72
  * 开启的插件列表
68
73
  */
69
74
  plugins?: string[];
@@ -0,0 +1,29 @@
1
+ export type SearchMode = 'off' | 'auto' | 'on';
2
+
3
+ export enum ModelSearchImplement {
4
+ /**
5
+ * 模型内置了搜索功能
6
+ * 类似 Jina 、PPLX 等模型的搜索模式,让调用方无感知
7
+ */
8
+ Internal = 'internal',
9
+ /**
10
+ * 使用参数开关的方式,例如 Qwen、Google、OpenRouter,搜索结果在
11
+ */
12
+ Params = 'params',
13
+ /**
14
+ * 使用工具调用的方式
15
+ */
16
+ Tool = 'tool',
17
+ }
18
+
19
+ export interface CitationItem {
20
+ favicon?: string;
21
+ id?: string;
22
+ title?: string;
23
+ url: string;
24
+ }
25
+
26
+ export interface GroundingSearch {
27
+ citations?: CitationItem[];
28
+ searchQueries?: string[];
29
+ }
@@ -6,11 +6,11 @@ import { ChatErrorType } from '@/types/fetch';
6
6
  import { SmoothingParams } from '@/types/llm';
7
7
  import {
8
8
  ChatMessageError,
9
- CitationItem,
10
9
  MessageToolCall,
11
10
  MessageToolCallChunk,
12
11
  MessageToolCallSchema,
13
12
  } from '@/types/message';
13
+ import { GroundingSearch } from '@/types/search';
14
14
 
15
15
  import { fetchEventSource } from './fetchEventSource';
16
16
  import { getMessageError } from './parseError';
@@ -21,7 +21,7 @@ type SSEFinishType = 'done' | 'error' | 'abort';
21
21
  export type OnFinishHandler = (
22
22
  text: string,
23
23
  context: {
24
- citations?: CitationItem[];
24
+ grounding?: GroundingSearch;
25
25
  observationId?: string | null;
26
26
  reasoning?: string;
27
27
  toolCalls?: MessageToolCall[];
@@ -40,9 +40,9 @@ export interface MessageReasoningChunk {
40
40
  type: 'reasoning';
41
41
  }
42
42
 
43
- export interface MessageCitationsChunk {
44
- citations: CitationItem[];
45
- type: 'citations';
43
+ export interface MessageGroundingChunk {
44
+ grounding: GroundingSearch;
45
+ type: 'grounding';
46
46
  }
47
47
 
48
48
  interface MessageToolCallsChunk {
@@ -57,7 +57,7 @@ export interface FetchSSEOptions {
57
57
  onErrorHandle?: (error: ChatMessageError) => void;
58
58
  onFinish?: OnFinishHandler;
59
59
  onMessageHandle?: (
60
- chunk: MessageTextChunk | MessageToolCallsChunk | MessageReasoningChunk | MessageCitationsChunk,
60
+ chunk: MessageTextChunk | MessageToolCallsChunk | MessageReasoningChunk | MessageGroundingChunk,
61
61
  ) => void;
62
62
  smoothing?: SmoothingParams | boolean;
63
63
  }
@@ -286,7 +286,7 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
286
286
  startSpeed: smoothingSpeed,
287
287
  });
288
288
 
289
- let citations: CitationItem[] | undefined = undefined;
289
+ let grounding: GroundingSearch | undefined = undefined;
290
290
  await fetchEventSource(url, {
291
291
  body: options.body,
292
292
  fetch: options?.fetcher,
@@ -359,9 +359,9 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
359
359
  break;
360
360
  }
361
361
 
362
- case 'citations': {
363
- citations = data;
364
- options.onMessageHandle?.({ citations: data, type: 'citations' });
362
+ case 'grounding': {
363
+ grounding = data;
364
+ options.onMessageHandle?.({ grounding: data, type: 'grounding' });
365
365
  break;
366
366
  }
367
367
 
@@ -434,7 +434,7 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
434
434
  }
435
435
 
436
436
  await options?.onFinish?.(output, {
437
- citations,
437
+ grounding,
438
438
  observationId,
439
439
  reasoning: !!thinking ? thinking : undefined,
440
440
  toolCalls,
@@ -1,20 +0,0 @@
1
- import { ActionIcon } from '@lobehub/ui';
2
- import { Brain } from 'lucide-react';
3
- import { memo } from 'react';
4
- import { useTranslation } from 'react-i18next';
5
-
6
- import ModelSwitchPanel from '@/features/ModelSwitchPanel';
7
-
8
- const ModelSwitch = memo(() => {
9
- const { t } = useTranslation('chat');
10
-
11
- return (
12
- <ModelSwitchPanel>
13
- <ActionIcon icon={Brain} placement={'bottom'} title={t('ModelSwitch.title')} />
14
- </ModelSwitchPanel>
15
- );
16
- });
17
-
18
- ModelSwitch.displayName = 'ModelSwitch';
19
-
20
- export default ModelSwitch;