@lobehub/chat 1.62.10 → 1.63.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +66 -0
- package/changelog/v1.json +24 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +18 -0
- package/docs/self-hosting/server-database/sealos.mdx +5 -1
- package/locales/ar/chat.json +26 -0
- package/locales/ar/models.json +21 -0
- package/locales/bg-BG/chat.json +26 -0
- package/locales/bg-BG/models.json +21 -0
- package/locales/de-DE/chat.json +26 -0
- package/locales/de-DE/models.json +21 -0
- package/locales/en-US/chat.json +26 -0
- package/locales/en-US/models.json +21 -0
- package/locales/es-ES/chat.json +26 -0
- package/locales/es-ES/models.json +21 -0
- package/locales/fa-IR/chat.json +26 -0
- package/locales/fa-IR/models.json +21 -0
- package/locales/fr-FR/chat.json +26 -0
- package/locales/fr-FR/models.json +21 -0
- package/locales/it-IT/chat.json +26 -0
- package/locales/it-IT/models.json +21 -0
- package/locales/ja-JP/chat.json +26 -0
- package/locales/ja-JP/models.json +21 -0
- package/locales/ko-KR/chat.json +26 -0
- package/locales/ko-KR/models.json +21 -0
- package/locales/nl-NL/chat.json +26 -0
- package/locales/nl-NL/models.json +21 -0
- package/locales/pl-PL/chat.json +26 -0
- package/locales/pl-PL/models.json +21 -0
- package/locales/pt-BR/chat.json +26 -0
- package/locales/pt-BR/models.json +21 -0
- package/locales/ru-RU/chat.json +26 -0
- package/locales/ru-RU/models.json +21 -0
- package/locales/tr-TR/chat.json +26 -0
- package/locales/tr-TR/models.json +21 -0
- package/locales/vi-VN/chat.json +26 -0
- package/locales/vi-VN/models.json +21 -0
- package/locales/zh-CN/chat.json +27 -1
- package/locales/zh-CN/models.json +25 -4
- package/locales/zh-TW/chat.json +26 -0
- package/locales/zh-TW/models.json +21 -0
- package/package.json +3 -3
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatInput/Desktop/index.tsx +1 -0
- package/src/config/aiModels/google.ts +8 -0
- package/src/config/aiModels/groq.ts +111 -95
- package/src/config/aiModels/hunyuan.ts +36 -4
- package/src/config/aiModels/internlm.ts +4 -5
- package/src/config/aiModels/jina.ts +3 -0
- package/src/config/aiModels/mistral.ts +35 -21
- package/src/config/aiModels/novita.ts +293 -32
- package/src/config/aiModels/perplexity.ts +14 -2
- package/src/config/aiModels/qwen.ts +91 -37
- package/src/config/aiModels/sensenova.ts +70 -17
- package/src/config/aiModels/siliconcloud.ts +5 -3
- package/src/config/aiModels/stepfun.ts +19 -0
- package/src/config/aiModels/taichu.ts +4 -2
- package/src/config/aiModels/upstage.ts +24 -11
- package/src/config/modelProviders/openrouter.ts +1 -0
- package/src/config/modelProviders/qwen.ts +2 -1
- package/src/config/modelProviders/volcengine.ts +4 -1
- package/src/const/settings/agent.ts +1 -0
- package/src/database/repositories/aiInfra/index.test.ts +2 -5
- package/src/database/repositories/aiInfra/index.ts +6 -2
- package/src/database/schemas/message.ts +2 -1
- package/src/database/server/models/aiModel.ts +1 -1
- package/src/database/server/models/aiProvider.ts +6 -1
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -0
- package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +40 -0
- package/src/features/ChatInput/ActionBar/Model/index.tsx +132 -0
- package/src/features/ChatInput/ActionBar/Params/index.tsx +2 -2
- package/src/features/ChatInput/ActionBar/Search/ExaIcon.tsx +15 -0
- package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +68 -0
- package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +167 -0
- package/src/features/ChatInput/ActionBar/Search/index.tsx +76 -0
- package/src/features/ChatInput/ActionBar/config.ts +4 -2
- package/src/features/Conversation/Messages/Assistant/SearchGrounding.tsx +153 -0
- package/src/features/Conversation/Messages/Assistant/index.tsx +7 -1
- package/src/features/ModelSelect/index.tsx +1 -1
- package/src/features/ModelSwitchPanel/index.tsx +2 -3
- package/src/hooks/useEnabledChatModels.ts +1 -1
- package/src/libs/agent-runtime/azureai/index.ts +21 -2
- package/src/libs/agent-runtime/google/index.test.ts +142 -36
- package/src/libs/agent-runtime/google/index.ts +26 -51
- package/src/libs/agent-runtime/novita/__snapshots__/index.test.ts.snap +3 -3
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -3
- package/src/libs/agent-runtime/openrouter/index.ts +20 -20
- package/src/libs/agent-runtime/perplexity/index.test.ts +2 -2
- package/src/libs/agent-runtime/qwen/index.ts +38 -55
- package/src/libs/agent-runtime/types/chat.ts +6 -2
- package/src/libs/agent-runtime/utils/streams/google-ai.ts +29 -4
- package/src/libs/agent-runtime/utils/streams/openai.ts +1 -1
- package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
- package/src/locales/default/chat.ts +28 -0
- package/src/services/chat.ts +10 -0
- package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +1 -0
- package/src/store/agent/slices/chat/selectors.ts +6 -0
- package/src/store/aiInfra/slices/aiModel/selectors.ts +36 -0
- package/src/store/aiInfra/slices/aiProvider/initialState.ts +2 -2
- package/src/store/aiInfra/slices/aiProvider/selectors.ts +14 -0
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +15 -5
- package/src/store/chat/slices/message/action.ts +1 -1
- package/src/store/user/slices/modelList/selectors/modelProvider.ts +1 -1
- package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +1 -0
- package/src/types/agent/index.ts +4 -0
- package/src/types/aiModel.ts +35 -8
- package/src/types/aiProvider.ts +7 -10
- package/src/types/message/base.ts +2 -5
- package/src/types/message/chat.ts +5 -3
- package/src/types/openai/chat.ts +5 -0
- package/src/types/search.ts +29 -0
- package/src/utils/fetch/fetchSSE.ts +11 -11
- package/src/features/ChatInput/ActionBar/ModelSwitch.tsx +0 -20
@@ -5,12 +5,13 @@ import {
|
|
5
5
|
FunctionDeclaration,
|
6
6
|
Tool as GoogleFunctionCallTool,
|
7
7
|
GoogleGenerativeAI,
|
8
|
+
GoogleSearchRetrievalTool,
|
8
9
|
Part,
|
9
10
|
SchemaType,
|
10
11
|
} from '@google/generative-ai';
|
11
12
|
|
12
|
-
import type { ChatModelCard } from '@/types/llm';
|
13
13
|
import { VertexAIStream } from '@/libs/agent-runtime/utils/streams/vertex-ai';
|
14
|
+
import type { ChatModelCard } from '@/types/llm';
|
14
15
|
import { imageUrlToBase64 } from '@/utils/imageToBase64';
|
15
16
|
import { safeParseJSON } from '@/utils/safeParseJSON';
|
16
17
|
|
@@ -86,7 +87,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
86
87
|
const payload = this.buildPayload(rawPayload);
|
87
88
|
const model = payload.model;
|
88
89
|
|
89
|
-
const contents = await this.buildGoogleMessages(payload.messages
|
90
|
+
const contents = await this.buildGoogleMessages(payload.messages);
|
90
91
|
|
91
92
|
const geminiStreamResult = await this.client
|
92
93
|
.getGenerativeModel(
|
@@ -123,7 +124,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
123
124
|
.generateContentStream({
|
124
125
|
contents,
|
125
126
|
systemInstruction: payload.system as string,
|
126
|
-
tools: this.buildGoogleTools(payload.tools),
|
127
|
+
tools: this.buildGoogleTools(payload.tools, payload),
|
127
128
|
});
|
128
129
|
|
129
130
|
const googleStream = convertIterableToStream(geminiStreamResult.stream);
|
@@ -168,26 +169,30 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
168
169
|
.map((model) => {
|
169
170
|
const modelName = model.name.replace(/^models\//, '');
|
170
171
|
|
171
|
-
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
172
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
173
|
+
(m) => modelName.toLowerCase() === m.id.toLowerCase(),
|
174
|
+
);
|
172
175
|
|
173
176
|
return {
|
174
177
|
contextWindowTokens: model.inputTokenLimit + model.outputTokenLimit,
|
175
178
|
displayName: model.displayName,
|
176
179
|
enabled: knownModel?.enabled || false,
|
177
180
|
functionCall:
|
178
|
-
modelName.toLowerCase().includes('gemini') &&
|
179
|
-
|
180
|
-
||
|
181
|
+
(modelName.toLowerCase().includes('gemini') &&
|
182
|
+
!modelName.toLowerCase().includes('thinking')) ||
|
183
|
+
knownModel?.abilities?.functionCall ||
|
184
|
+
false,
|
181
185
|
id: modelName,
|
182
186
|
reasoning:
|
183
|
-
modelName.toLowerCase().includes('thinking')
|
184
|
-
|
185
|
-
|
187
|
+
modelName.toLowerCase().includes('thinking') ||
|
188
|
+
knownModel?.abilities?.reasoning ||
|
189
|
+
false,
|
186
190
|
vision:
|
187
|
-
modelName.toLowerCase().includes('vision')
|
188
|
-
|
189
|
-
|
190
|
-
||
|
191
|
+
modelName.toLowerCase().includes('vision') ||
|
192
|
+
(modelName.toLowerCase().includes('gemini') &&
|
193
|
+
!modelName.toLowerCase().includes('gemini-1.0')) ||
|
194
|
+
knownModel?.abilities?.vision ||
|
195
|
+
false,
|
191
196
|
};
|
192
197
|
})
|
193
198
|
.filter(Boolean) as ChatModelCard[];
|
@@ -266,43 +271,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
266
271
|
};
|
267
272
|
|
268
273
|
// convert messages from the OpenAI format to Google GenAI SDK
|
269
|
-
private buildGoogleMessages = async (
|
270
|
-
messages: OpenAIChatMessage[],
|
271
|
-
model: string,
|
272
|
-
): Promise<Content[]> => {
|
273
|
-
// if the model is gemini-1.0 we need to pair messages
|
274
|
-
if (model.startsWith('gemini-1.0')) {
|
275
|
-
const contents: Content[] = [];
|
276
|
-
let lastRole = 'model';
|
277
|
-
|
278
|
-
for (const message of messages) {
|
279
|
-
// current to filter function message
|
280
|
-
if (message.role === 'function') {
|
281
|
-
continue;
|
282
|
-
}
|
283
|
-
const googleMessage = await this.convertOAIMessagesToGoogleMessage(message);
|
284
|
-
|
285
|
-
// if the last message is a model message and the current message is a model message
|
286
|
-
// then we need to add a user message to separate them
|
287
|
-
if (lastRole === googleMessage.role) {
|
288
|
-
contents.push({ parts: [{ text: '' }], role: lastRole === 'user' ? 'model' : 'user' });
|
289
|
-
}
|
290
|
-
|
291
|
-
// add the current message to the contents
|
292
|
-
contents.push(googleMessage);
|
293
|
-
|
294
|
-
// update the last role
|
295
|
-
lastRole = googleMessage.role;
|
296
|
-
}
|
297
|
-
|
298
|
-
// if the last message is a user message, then we need to add a model message to separate them
|
299
|
-
if (lastRole === 'model') {
|
300
|
-
contents.push({ parts: [{ text: '' }], role: 'user' });
|
301
|
-
}
|
302
|
-
|
303
|
-
return contents;
|
304
|
-
}
|
305
|
-
|
274
|
+
private buildGoogleMessages = async (messages: OpenAIChatMessage[]): Promise<Content[]> => {
|
306
275
|
const pools = messages
|
307
276
|
.filter((message) => message.role !== 'function')
|
308
277
|
.map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg));
|
@@ -353,7 +322,13 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
353
322
|
|
354
323
|
private buildGoogleTools(
|
355
324
|
tools: ChatCompletionTool[] | undefined,
|
325
|
+
payload?: ChatStreamPayload,
|
356
326
|
): GoogleFunctionCallTool[] | undefined {
|
327
|
+
// 目前 Tools (例如 googleSearch) 无法与其他 FunctionCall 同时使用
|
328
|
+
if (payload?.enabledSearch) {
|
329
|
+
return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
|
330
|
+
}
|
331
|
+
|
357
332
|
if (!tools || tools.length === 0) return;
|
358
333
|
|
359
334
|
return [
|
@@ -26,7 +26,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
26
26
|
"contextWindowTokens": 8192,
|
27
27
|
"description": "Meta's latest class of models, Llama 3.1, launched with a variety of sizes and configurations. The 8B instruct-tuned version is particularly fast and efficient. It has demonstrated strong performance in human evaluations, outperforming several leading closed-source models.",
|
28
28
|
"displayName": "meta-llama/llama-3.1-8b-instruct",
|
29
|
-
"enabled":
|
29
|
+
"enabled": false,
|
30
30
|
"functionCall": false,
|
31
31
|
"id": "meta-llama/llama-3.1-8b-instruct",
|
32
32
|
"reasoning": false,
|
@@ -36,7 +36,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
36
36
|
"contextWindowTokens": 8192,
|
37
37
|
"description": "Meta's latest class of models, Llama 3.1, has launched with a variety of sizes and configurations. The 70B instruct-tuned version is optimized for high-quality dialogue use cases. It has demonstrated strong performance in human evaluations compared to leading closed-source models.",
|
38
38
|
"displayName": "meta-llama/llama-3.1-70b-instruct",
|
39
|
-
"enabled":
|
39
|
+
"enabled": false,
|
40
40
|
"functionCall": false,
|
41
41
|
"id": "meta-llama/llama-3.1-70b-instruct",
|
42
42
|
"reasoning": false,
|
@@ -46,7 +46,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
46
46
|
"contextWindowTokens": 32768,
|
47
47
|
"description": "Meta's latest class of models, Llama 3.1, launched with a variety of sizes and configurations. This 405B instruct-tuned version is optimized for high-quality dialogue use cases. It has demonstrated strong performance compared to leading closed-source models, including GPT-4o and Claude 3.5 Sonnet, in evaluations.",
|
48
48
|
"displayName": "meta-llama/llama-3.1-405b-instruct",
|
49
|
-
"enabled":
|
49
|
+
"enabled": false,
|
50
50
|
"functionCall": false,
|
51
51
|
"id": "meta-llama/llama-3.1-405b-instruct",
|
52
52
|
"reasoning": false,
|
@@ -407,7 +407,7 @@ It has demonstrated strong performance compared to leading closed-source models
|
|
407
407
|
|
408
408
|
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
409
409
|
"displayName": "Meta: Llama 3.1 70B Instruct",
|
410
|
-
"enabled":
|
410
|
+
"enabled": false,
|
411
411
|
"functionCall": false,
|
412
412
|
"id": "meta-llama/llama-3.1-70b-instruct",
|
413
413
|
"maxTokens": undefined,
|
@@ -439,7 +439,7 @@ It has demonstrated strong performance compared to leading closed-source models
|
|
439
439
|
|
440
440
|
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
441
441
|
"displayName": "Meta: Llama 3.1 8B Instruct",
|
442
|
-
"enabled":
|
442
|
+
"enabled": false,
|
443
443
|
"functionCall": false,
|
444
444
|
"id": "meta-llama/llama-3.1-8b-instruct",
|
445
445
|
"maxTokens": undefined,
|
@@ -456,7 +456,7 @@ It has demonstrated strong performance compared to leading closed-source models
|
|
456
456
|
|
457
457
|
To read more about the model release, [click here](https://ai.meta.com/blog/meta-llama-3/). Usage of this model is subject to [Meta's Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).",
|
458
458
|
"displayName": "Meta: Llama 3.1 405B Instruct",
|
459
|
-
"enabled":
|
459
|
+
"enabled": false,
|
460
460
|
"functionCall": false,
|
461
461
|
"id": "meta-llama/llama-3.1-405b-instruct",
|
462
462
|
"maxTokens": undefined,
|
@@ -1,9 +1,9 @@
|
|
1
|
+
import type { ChatModelCard } from '@/types/llm';
|
2
|
+
|
1
3
|
import { ModelProvider } from '../types';
|
2
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
5
|
import { OpenRouterModelCard } from './type';
|
4
6
|
|
5
|
-
import type { ChatModelCard } from '@/types/llm';
|
6
|
-
|
7
7
|
export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
|
8
8
|
baseURL: 'https://openrouter.ai/api/v1',
|
9
9
|
chatCompletion: {
|
@@ -11,6 +11,7 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
|
|
11
11
|
return {
|
12
12
|
...payload,
|
13
13
|
include_reasoning: true,
|
14
|
+
model: payload.enabledSearch ? `${payload.model}:online` : payload.model,
|
14
15
|
stream: payload.stream ?? true,
|
15
16
|
} as any;
|
16
17
|
},
|
@@ -27,10 +28,7 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
|
|
27
28
|
models: async ({ client }) => {
|
28
29
|
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
29
30
|
|
30
|
-
const visionKeywords = [
|
31
|
-
'qwen/qvq',
|
32
|
-
'vision'
|
33
|
-
];
|
31
|
+
const visionKeywords = ['qwen/qvq', 'vision'];
|
34
32
|
|
35
33
|
const reasoningKeywords = [
|
36
34
|
'deepseek/deepseek-r1',
|
@@ -41,12 +39,14 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
|
|
41
39
|
'thinking',
|
42
40
|
];
|
43
41
|
|
44
|
-
const modelsPage = await client.models.list() as any;
|
42
|
+
const modelsPage = (await client.models.list()) as any;
|
45
43
|
const modelList: OpenRouterModelCard[] = modelsPage.data;
|
46
44
|
|
47
45
|
return modelList
|
48
46
|
.map((model) => {
|
49
|
-
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
47
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
48
|
+
(m) => model.id.toLowerCase() === m.id.toLowerCase(),
|
49
|
+
);
|
50
50
|
|
51
51
|
return {
|
52
52
|
contextWindowTokens: model.context_length,
|
@@ -54,25 +54,25 @@ export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
|
|
54
54
|
displayName: model.name,
|
55
55
|
enabled: knownModel?.enabled || false,
|
56
56
|
functionCall:
|
57
|
-
model.description.includes('function calling')
|
58
|
-
|
59
|
-
|
60
|
-
|
57
|
+
model.description.includes('function calling') ||
|
58
|
+
model.description.includes('tools') ||
|
59
|
+
knownModel?.abilities?.functionCall ||
|
60
|
+
false,
|
61
61
|
id: model.id,
|
62
62
|
maxTokens:
|
63
63
|
typeof model.top_provider.max_completion_tokens === 'number'
|
64
64
|
? model.top_provider.max_completion_tokens
|
65
65
|
: undefined,
|
66
66
|
reasoning:
|
67
|
-
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
68
|
-
|
69
|
-
|
67
|
+
reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
68
|
+
knownModel?.abilities?.reasoning ||
|
69
|
+
false,
|
70
70
|
vision:
|
71
|
-
model.description.includes('vision')
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
71
|
+
model.description.includes('vision') ||
|
72
|
+
model.description.includes('multimodal') ||
|
73
|
+
visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
74
|
+
knownModel?.abilities?.vision ||
|
75
|
+
false,
|
76
76
|
};
|
77
77
|
})
|
78
78
|
.filter(Boolean) as ChatModelCard[];
|
@@ -203,8 +203,8 @@ describe('LobePerplexityAI', () => {
|
|
203
203
|
expect(stream).toEqual(
|
204
204
|
[
|
205
205
|
'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
|
206
|
-
'event:
|
207
|
-
'data: [{"title":"https://www.weather.com.cn/weather/101210101.shtml","url":"https://www.weather.com.cn/weather/101210101.shtml"},{"title":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou","url":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou"},{"title":"https://weather.cma.cn/web/weather/58457.html","url":"https://weather.cma.cn/web/weather/58457.html"},{"title":"https://tianqi.so.com/weather/101210101","url":"https://tianqi.so.com/weather/101210101"},{"title":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832","url":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832"},{"title":"https://www.hzqx.com","url":"https://www.hzqx.com"},{"title":"https://www.hzqx.com/pc/hztq/","url":"https://www.hzqx.com/pc/hztq/"}]\n',
|
206
|
+
'event: grounding',
|
207
|
+
'data: {"citations":[{"title":"https://www.weather.com.cn/weather/101210101.shtml","url":"https://www.weather.com.cn/weather/101210101.shtml"},{"title":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou","url":"https://tianqi.moji.com/weather/china/zhejiang/hangzhou"},{"title":"https://weather.cma.cn/web/weather/58457.html","url":"https://weather.cma.cn/web/weather/58457.html"},{"title":"https://tianqi.so.com/weather/101210101","url":"https://tianqi.so.com/weather/101210101"},{"title":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832","url":"https://www.accuweather.com/zh/cn/hangzhou/106832/weather-forecast/106832"},{"title":"https://www.hzqx.com","url":"https://www.hzqx.com"},{"title":"https://www.hzqx.com/pc/hztq/","url":"https://www.hzqx.com/pc/hztq/"}]}\n',
|
208
208
|
'id: 506d64fb-e7f2-4d94-b80f-158369e9446d',
|
209
209
|
'event: text',
|
210
210
|
'data: "杭州今"\n',
|
@@ -1,24 +1,13 @@
|
|
1
|
+
import type { ChatModelCard } from '@/types/llm';
|
2
|
+
|
1
3
|
import { ModelProvider } from '../types';
|
2
4
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
|
-
|
4
5
|
import { QwenAIStream } from '../utils/streams';
|
5
6
|
|
6
|
-
import type { ChatModelCard } from '@/types/llm';
|
7
|
-
|
8
7
|
export interface QwenModelCard {
|
9
8
|
id: string;
|
10
9
|
}
|
11
10
|
|
12
|
-
/*
|
13
|
-
QwenEnableSearchModelSeries: An array of Qwen model series that support the enable_search parameter.
|
14
|
-
Currently, enable_search is only supported on Qwen commercial series, excluding Qwen-VL and Qwen-Long series.
|
15
|
-
*/
|
16
|
-
export const QwenEnableSearchModelSeries = [
|
17
|
-
'qwen-max',
|
18
|
-
'qwen-plus',
|
19
|
-
'qwen-turbo',
|
20
|
-
];
|
21
|
-
|
22
11
|
/*
|
23
12
|
QwenLegacyModels: A set of legacy Qwen models that do not support presence_penalty.
|
24
13
|
Currently, presence_penalty is only supported on Qwen commercial models and open-source models starting from Qwen 1.5 and later.
|
@@ -35,30 +24,34 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
|
|
35
24
|
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
36
25
|
chatCompletion: {
|
37
26
|
handlePayload: (payload) => {
|
38
|
-
const { model, presence_penalty, temperature, top_p, ...rest } = payload;
|
27
|
+
const { model, presence_penalty, temperature, top_p, enabledSearch, ...rest } = payload;
|
39
28
|
|
40
29
|
return {
|
41
30
|
...rest,
|
42
31
|
frequency_penalty: undefined,
|
43
32
|
model,
|
44
|
-
presence_penalty:
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
: undefined,
|
33
|
+
presence_penalty: QwenLegacyModels.has(model)
|
34
|
+
? undefined
|
35
|
+
: presence_penalty !== undefined && presence_penalty >= -2 && presence_penalty <= 2
|
36
|
+
? presence_penalty
|
37
|
+
: undefined,
|
50
38
|
stream: !payload.tools,
|
51
|
-
temperature:
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
39
|
+
temperature:
|
40
|
+
temperature !== undefined && temperature >= 0 && temperature < 2
|
41
|
+
? temperature
|
42
|
+
: undefined,
|
43
|
+
...(model.startsWith('qvq') || model.startsWith('qwen-vl')
|
44
|
+
? {
|
45
|
+
top_p: top_p !== undefined && top_p > 0 && top_p <= 1 ? top_p : undefined,
|
46
|
+
}
|
47
|
+
: {
|
48
|
+
top_p: top_p !== undefined && top_p > 0 && top_p < 1 ? top_p : undefined,
|
49
|
+
}),
|
50
|
+
...(enabledSearch && {
|
51
|
+
enable_search: enabledSearch,
|
59
52
|
search_options: {
|
60
53
|
search_strategy: process.env.QWEN_SEARCH_STRATEGY || 'standard', // standard or pro
|
61
|
-
}
|
54
|
+
},
|
62
55
|
}),
|
63
56
|
...(payload.tools && {
|
64
57
|
parallel_tool_calls: true,
|
@@ -73,48 +66,38 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
|
|
73
66
|
models: async ({ client }) => {
|
74
67
|
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
75
68
|
|
76
|
-
const functionCallKeywords = [
|
77
|
-
'qwen-max',
|
78
|
-
'qwen-plus',
|
79
|
-
'qwen-turbo',
|
80
|
-
'qwen2.5',
|
81
|
-
];
|
69
|
+
const functionCallKeywords = ['qwen-max', 'qwen-plus', 'qwen-turbo', 'qwen2.5'];
|
82
70
|
|
83
|
-
const visionKeywords = [
|
84
|
-
'qvq',
|
85
|
-
'vl',
|
86
|
-
];
|
71
|
+
const visionKeywords = ['qvq', 'vl'];
|
87
72
|
|
88
|
-
const reasoningKeywords = [
|
89
|
-
'qvq',
|
90
|
-
'qwq',
|
91
|
-
'deepseek-r1'
|
92
|
-
];
|
73
|
+
const reasoningKeywords = ['qvq', 'qwq', 'deepseek-r1'];
|
93
74
|
|
94
|
-
const modelsPage = await client.models.list() as any;
|
75
|
+
const modelsPage = (await client.models.list()) as any;
|
95
76
|
const modelList: QwenModelCard[] = modelsPage.data;
|
96
77
|
|
97
78
|
return modelList
|
98
79
|
.map((model) => {
|
99
|
-
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
80
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
81
|
+
(m) => model.id.toLowerCase() === m.id.toLowerCase(),
|
82
|
+
);
|
100
83
|
|
101
84
|
return {
|
102
85
|
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
103
86
|
displayName: knownModel?.displayName ?? undefined,
|
104
87
|
enabled: knownModel?.enabled || false,
|
105
88
|
functionCall:
|
106
|
-
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
107
|
-
|
108
|
-
|
89
|
+
functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
90
|
+
knownModel?.abilities?.functionCall ||
|
91
|
+
false,
|
109
92
|
id: model.id,
|
110
93
|
reasoning:
|
111
|
-
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
112
|
-
|
113
|
-
|
94
|
+
reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
95
|
+
knownModel?.abilities?.reasoning ||
|
96
|
+
false,
|
114
97
|
vision:
|
115
|
-
visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
116
|
-
|
117
|
-
|
98
|
+
visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
99
|
+
knownModel?.abilities?.vision ||
|
100
|
+
false,
|
118
101
|
};
|
119
102
|
})
|
120
103
|
.filter(Boolean) as ChatModelCard[];
|
@@ -38,6 +38,10 @@ export interface OpenAIChatMessage {
|
|
38
38
|
* @title Chat Stream Payload
|
39
39
|
*/
|
40
40
|
export interface ChatStreamPayload {
|
41
|
+
/**
|
42
|
+
* 是否开启搜索
|
43
|
+
*/
|
44
|
+
enabledSearch?: boolean;
|
41
45
|
/**
|
42
46
|
* @title 控制生成文本中的惩罚系数,用于减少重复性
|
43
47
|
* @default 0
|
@@ -68,11 +72,11 @@ export interface ChatStreamPayload {
|
|
68
72
|
* @default 0
|
69
73
|
*/
|
70
74
|
presence_penalty?: number;
|
75
|
+
|
71
76
|
/**
|
72
77
|
* @default openai
|
73
78
|
*/
|
74
79
|
provider?: string;
|
75
|
-
|
76
80
|
responseMode?: 'streamText' | 'json';
|
77
81
|
/**
|
78
82
|
* @title 是否开启流式请求
|
@@ -85,8 +89,8 @@ export interface ChatStreamPayload {
|
|
85
89
|
*/
|
86
90
|
temperature: number;
|
87
91
|
tool_choice?: string;
|
88
|
-
tools?: ChatCompletionTool[];
|
89
92
|
|
93
|
+
tools?: ChatCompletionTool[];
|
90
94
|
/**
|
91
95
|
* @title 控制生成文本中最高概率的单个令牌
|
92
96
|
* @default 1
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { EnhancedGenerateContentResponse } from '@google/generative-ai';
|
2
2
|
|
3
|
+
import { GroundingSearch } from '@/types/search';
|
3
4
|
import { nanoid } from '@/utils/uuid';
|
4
5
|
|
5
6
|
import { ChatStreamCallbacks } from '../../types';
|
@@ -14,8 +15,8 @@ import {
|
|
14
15
|
|
15
16
|
const transformGoogleGenerativeAIStream = (
|
16
17
|
chunk: EnhancedGenerateContentResponse,
|
17
|
-
|
18
|
-
): StreamProtocolChunk => {
|
18
|
+
context: StreamContext,
|
19
|
+
): StreamProtocolChunk | StreamProtocolChunk[] => {
|
19
20
|
// maybe need another structure to add support for multiple choices
|
20
21
|
const functionCalls = chunk.functionCalls();
|
21
22
|
|
@@ -32,15 +33,39 @@ const transformGoogleGenerativeAIStream = (
|
|
32
33
|
type: 'function',
|
33
34
|
}),
|
34
35
|
),
|
35
|
-
id:
|
36
|
+
id: context.id,
|
36
37
|
type: 'tool_calls',
|
37
38
|
};
|
38
39
|
}
|
39
40
|
const text = chunk.text();
|
40
41
|
|
42
|
+
if (chunk.candidates && chunk.candidates[0].groundingMetadata) {
|
43
|
+
const { webSearchQueries, groundingSupports, groundingChunks } =
|
44
|
+
chunk.candidates[0].groundingMetadata;
|
45
|
+
console.log({ groundingChunks, groundingSupports, webSearchQueries });
|
46
|
+
|
47
|
+
return [
|
48
|
+
{ data: text, id: context.id, type: 'text' },
|
49
|
+
{
|
50
|
+
data: {
|
51
|
+
citations: groundingChunks?.map((chunk) => ({
|
52
|
+
// google 返回的 uri 是经过 google 自己处理过的 url,因此无法展现真实的 favicon
|
53
|
+
// 需要使用 title 作为替换
|
54
|
+
favicon: chunk.web?.title,
|
55
|
+
title: chunk.web?.title,
|
56
|
+
url: chunk.web?.uri,
|
57
|
+
})),
|
58
|
+
searchQueries: webSearchQueries,
|
59
|
+
} as GroundingSearch,
|
60
|
+
id: context.id,
|
61
|
+
type: 'grounding',
|
62
|
+
},
|
63
|
+
];
|
64
|
+
}
|
65
|
+
|
41
66
|
return {
|
42
67
|
data: text,
|
43
|
-
id:
|
68
|
+
id: context?.id,
|
44
69
|
type: 'text',
|
45
70
|
};
|
46
71
|
};
|
@@ -32,6 +32,9 @@ export default {
|
|
32
32
|
},
|
33
33
|
duplicateTitle: '{{title}} 副本',
|
34
34
|
emptyAgent: '暂无助手',
|
35
|
+
extendControls: {
|
36
|
+
title: '模型扩展功能',
|
37
|
+
},
|
35
38
|
historyRange: '历史范围',
|
36
39
|
historySummary: '历史消息总结',
|
37
40
|
inbox: {
|
@@ -86,6 +89,31 @@ export default {
|
|
86
89
|
},
|
87
90
|
regenerate: '重新生成',
|
88
91
|
roleAndArchive: '角色与记录',
|
92
|
+
search: {
|
93
|
+
grounding: {
|
94
|
+
searchQueries: '搜索关键词',
|
95
|
+
title: '已搜索到 {{count}} 个结果',
|
96
|
+
},
|
97
|
+
|
98
|
+
mode: {
|
99
|
+
auto: {
|
100
|
+
desc: '根据对话内容智能判断是否需要搜索',
|
101
|
+
title: '智能联网',
|
102
|
+
},
|
103
|
+
disable: '当前模型不支持函数调用,因此无法使用智能联网功能',
|
104
|
+
off: {
|
105
|
+
desc: '仅使用模型的基础知识,不进行网络搜索',
|
106
|
+
title: '关闭联网',
|
107
|
+
},
|
108
|
+
on: {
|
109
|
+
desc: '持续进行网络搜索,获取最新信息',
|
110
|
+
title: '始终联网',
|
111
|
+
},
|
112
|
+
useModelBuiltin: '使用模型内置搜索引擎',
|
113
|
+
},
|
114
|
+
|
115
|
+
title: '联网搜索',
|
116
|
+
},
|
89
117
|
searchAgentPlaceholder: '搜索助手...',
|
90
118
|
sendPlaceholder: '输入聊天内容...',
|
91
119
|
sessionGroup: {
|
package/src/services/chat.ts
CHANGED
@@ -18,6 +18,7 @@ import {
|
|
18
18
|
import { filesPrompts } from '@/prompts/files';
|
19
19
|
import { BuiltinSystemRolePrompts } from '@/prompts/systemRole';
|
20
20
|
import { aiModelSelectors, aiProviderSelectors, useAiInfraStore } from '@/store/aiInfra';
|
21
|
+
import { getAgentChatConfig } from '@/store/chat/slices/aiChat/actions/helpers';
|
21
22
|
import { useSessionStore } from '@/store/session';
|
22
23
|
import { sessionMetaSelectors } from '@/store/session/selectors';
|
23
24
|
import { useToolStore } from '@/store/tool';
|
@@ -224,6 +225,8 @@ class ChatService {
|
|
224
225
|
|
225
226
|
const { provider = ModelProvider.OpenAI, ...res } = params;
|
226
227
|
|
228
|
+
// =================== process model =================== //
|
229
|
+
// ===================================================== //
|
227
230
|
let model = res.model || DEFAULT_AGENT_CONFIG.model;
|
228
231
|
|
229
232
|
// if the provider is Azure, get the deployment name as the request model
|
@@ -238,6 +241,13 @@ class ChatService {
|
|
238
241
|
model = findDeploymentName(model, provider);
|
239
242
|
}
|
240
243
|
|
244
|
+
// =================== process search =================== //
|
245
|
+
// ===================================================== //
|
246
|
+
const chatConfig = getAgentChatConfig();
|
247
|
+
if (chatConfig.searchMode !== 'off') {
|
248
|
+
res.enabledSearch = true;
|
249
|
+
}
|
250
|
+
|
241
251
|
const payload = merge(
|
242
252
|
{ model: DEFAULT_AGENT_CONFIG.model, stream: true, ...DEFAULT_AGENT_CONFIG.params },
|
243
253
|
{ ...res, model },
|
@@ -107,6 +107,8 @@ const currentEnabledKnowledge = (s: AgentStore) => {
|
|
107
107
|
] as KnowledgeItem[];
|
108
108
|
};
|
109
109
|
|
110
|
+
const agentSearchMode = (s: AgentStore) => currentAgentChatConfig(s).searchMode || 'off';
|
111
|
+
|
110
112
|
const hasSystemRole = (s: AgentStore) => {
|
111
113
|
const config = currentAgentConfig(s);
|
112
114
|
|
@@ -140,7 +142,10 @@ const currentKnowledgeIds = (s: AgentStore) => {
|
|
140
142
|
|
141
143
|
const isAgentConfigLoading = (s: AgentStore) => !s.agentConfigInitMap[s.activeId];
|
142
144
|
|
145
|
+
const isAgentEnableSearch = (s: AgentStore) => agentSearchMode(s) !== 'off';
|
146
|
+
|
143
147
|
export const agentSelectors = {
|
148
|
+
agentSearchMode,
|
144
149
|
currentAgentChatConfig,
|
145
150
|
currentAgentConfig,
|
146
151
|
currentAgentFiles,
|
@@ -160,5 +165,6 @@ export const agentSelectors = {
|
|
160
165
|
inboxAgentConfig,
|
161
166
|
inboxAgentModel,
|
162
167
|
isAgentConfigLoading,
|
168
|
+
isAgentEnableSearch,
|
163
169
|
isInboxSession,
|
164
170
|
};
|