@lobehub/chat 1.69.6 → 1.70.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/docs/self-hosting/advanced/auth/clerk.zh-CN.mdx +1 -1
- package/docs/self-hosting/server-database/vercel.zh-CN.mdx +1 -1
- package/locales/ar/chat.json +7 -1
- package/locales/bg-BG/chat.json +7 -1
- package/locales/de-DE/chat.json +7 -1
- package/locales/en-US/chat.json +7 -1
- package/locales/es-ES/chat.json +7 -1
- package/locales/fa-IR/chat.json +7 -1
- package/locales/fr-FR/chat.json +7 -1
- package/locales/it-IT/chat.json +7 -1
- package/locales/ja-JP/chat.json +7 -1
- package/locales/ko-KR/chat.json +7 -1
- package/locales/nl-NL/chat.json +7 -1
- package/locales/pl-PL/chat.json +7 -1
- package/locales/pt-BR/chat.json +7 -1
- package/locales/ru-RU/chat.json +7 -1
- package/locales/tr-TR/chat.json +7 -1
- package/locales/vi-VN/chat.json +7 -1
- package/locales/zh-CN/chat.json +7 -1
- package/locales/zh-TW/chat.json +7 -1
- package/package.json +1 -1
- package/packages/web-crawler/src/crawler.ts +11 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +4 -1
- package/src/app/(backend)/webapi/chat/[provider]/route.ts +5 -1
- package/src/{features/Conversation/Messages/Assistant/Tool/Inspector/Loader.tsx → components/CircleLoader/index.tsx} +4 -3
- package/src/config/tools.ts +2 -0
- package/src/const/settings/agent.ts +6 -0
- package/src/features/ChatInput/ActionBar/Search/FCSearchModel.tsx +56 -0
- package/src/features/ChatInput/ActionBar/Search/FunctionCallingModelSelect/index.tsx +85 -0
- package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +9 -23
- package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +15 -23
- package/src/features/Conversation/Extras/Usage/UsageDetail/pricing.ts +26 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +4 -4
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +15 -10
- package/src/features/Conversation/Messages/Assistant/IntentUnderstanding.tsx +25 -0
- package/src/features/Conversation/Messages/Assistant/Tool/Inspector/ToolTitle.tsx +1 -2
- package/src/features/Conversation/Messages/Assistant/index.tsx +18 -9
- package/src/features/ModelSwitchPanel/index.tsx +1 -4
- package/src/hooks/useAgentEnableSearch.ts +1 -9
- package/src/locales/default/chat.ts +7 -3
- package/src/server/routers/tools/search.ts +8 -1
- package/src/services/__tests__/chat.test.ts +1 -0
- package/src/services/chat.ts +63 -30
- package/src/services/session/type.ts +1 -1
- package/src/store/agent/slices/chat/selectors/__snapshots__/agent.test.ts.snap +4 -0
- package/src/store/agent/slices/chat/selectors/chatConfig.ts +5 -1
- package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +12 -4
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +113 -13
- package/src/store/chat/slices/aiChat/initialState.ts +2 -0
- package/src/store/chat/slices/aiChat/selectors.ts +8 -1
- package/src/store/chat/slices/message/action.ts +9 -1
- package/src/store/chat/slices/plugin/action.ts +6 -4
- package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +4 -0
- package/src/types/agent/chatConfig.ts +6 -0
- package/src/types/openai/chat.ts +0 -1
@@ -20,7 +20,14 @@ export const searchRouter = router({
|
|
20
20
|
}),
|
21
21
|
)
|
22
22
|
.mutation(async ({ input }) => {
|
23
|
-
const
|
23
|
+
const envString = toolsEnv.CRAWLER_IMPLS || '';
|
24
|
+
|
25
|
+
// 处理全角逗号和多余空格
|
26
|
+
let envValue = envString.replaceAll(',', ',').trim();
|
27
|
+
|
28
|
+
const impls = envValue.split(',').filter(Boolean);
|
29
|
+
|
30
|
+
const crawler = new Crawler({ impls });
|
24
31
|
|
25
32
|
const results = await pMap(
|
26
33
|
input.urls,
|
package/src/services/chat.ts
CHANGED
@@ -32,6 +32,7 @@ import {
|
|
32
32
|
userProfileSelectors,
|
33
33
|
} from '@/store/user/selectors';
|
34
34
|
import { WebBrowsingManifest } from '@/tools/web-browsing';
|
35
|
+
import { WorkingModel } from '@/types/agent';
|
35
36
|
import { ChatErrorType } from '@/types/fetch';
|
36
37
|
import { ChatMessage, MessageToolCall } from '@/types/message';
|
37
38
|
import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
|
@@ -201,17 +202,10 @@ class ChatService {
|
|
201
202
|
|
202
203
|
// ============ 2. preprocess tools ============ //
|
203
204
|
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
// the rule that model can use tools:
|
210
|
-
// 1. tools is not empty
|
211
|
-
// 2. model can use function call
|
212
|
-
const shouldUseTools = filterTools.length > 0 && canUseFC;
|
213
|
-
|
214
|
-
const tools = shouldUseTools ? filterTools : undefined;
|
205
|
+
const tools = this.prepareTools(pluginIds, {
|
206
|
+
model: payload.model,
|
207
|
+
provider: payload.provider!,
|
208
|
+
});
|
215
209
|
|
216
210
|
// ============ 3. process extend params ============ //
|
217
211
|
|
@@ -433,16 +427,30 @@ class ChatService {
|
|
433
427
|
onLoadingChange?.(true);
|
434
428
|
|
435
429
|
try {
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
430
|
+
const oaiMessages = this.processMessages({
|
431
|
+
messages: params.messages as any,
|
432
|
+
model: params.model!,
|
433
|
+
provider: params.provider!,
|
434
|
+
tools: params.plugins,
|
435
|
+
});
|
436
|
+
const tools = this.prepareTools(params.plugins || [], {
|
437
|
+
model: params.model!,
|
438
|
+
provider: params.provider!,
|
444
439
|
});
|
445
440
|
|
441
|
+
await this.getChatCompletion(
|
442
|
+
{ ...params, messages: oaiMessages, tools },
|
443
|
+
{
|
444
|
+
onErrorHandle: (error) => {
|
445
|
+
errorHandle(new Error(error.message), error);
|
446
|
+
},
|
447
|
+
onFinish,
|
448
|
+
onMessageHandle,
|
449
|
+
signal: abortController?.signal,
|
450
|
+
trace: this.mapTrace(trace, TraceTagMap.SystemChain),
|
451
|
+
},
|
452
|
+
);
|
453
|
+
|
446
454
|
onLoadingChange?.(false);
|
447
455
|
} catch (e) {
|
448
456
|
errorHandle(e as Error);
|
@@ -451,7 +459,7 @@ class ChatService {
|
|
451
459
|
|
452
460
|
private processMessages = (
|
453
461
|
{
|
454
|
-
messages,
|
462
|
+
messages = [],
|
455
463
|
tools,
|
456
464
|
model,
|
457
465
|
provider,
|
@@ -483,6 +491,7 @@ class ChatService {
|
|
483
491
|
};
|
484
492
|
|
485
493
|
let postMessages = messages.map((m): OpenAIChatMessage => {
|
494
|
+
const supportTools = isCanUseFC(model, provider);
|
486
495
|
switch (m.role) {
|
487
496
|
case 'user': {
|
488
497
|
return { content: getContent(m), role: m.role };
|
@@ -492,17 +501,23 @@ class ChatService {
|
|
492
501
|
// signature is a signal of anthropic thinking mode
|
493
502
|
const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
|
494
503
|
|
504
|
+
const content = shouldIncludeThinking
|
505
|
+
? [
|
506
|
+
{
|
507
|
+
signature: m.reasoning!.signature,
|
508
|
+
thinking: m.reasoning!.content,
|
509
|
+
type: 'thinking',
|
510
|
+
} as any,
|
511
|
+
{ text: m.content, type: 'text' },
|
512
|
+
]
|
513
|
+
: m.content;
|
514
|
+
|
515
|
+
if (!supportTools) {
|
516
|
+
return { content, role: m.role };
|
517
|
+
}
|
518
|
+
|
495
519
|
return {
|
496
|
-
content
|
497
|
-
? [
|
498
|
-
{
|
499
|
-
signature: m.reasoning!.signature,
|
500
|
-
thinking: m.reasoning!.content,
|
501
|
-
type: 'thinking',
|
502
|
-
} as any,
|
503
|
-
{ text: m.content, type: 'text' },
|
504
|
-
]
|
505
|
-
: m.content,
|
520
|
+
content,
|
506
521
|
role: m.role,
|
507
522
|
tool_calls: m.tools?.map(
|
508
523
|
(tool): MessageToolCall => ({
|
@@ -518,6 +533,10 @@ class ChatService {
|
|
518
533
|
}
|
519
534
|
|
520
535
|
case 'tool': {
|
536
|
+
if (!supportTools) {
|
537
|
+
return { content: m.content, role: 'user' };
|
538
|
+
}
|
539
|
+
|
521
540
|
return {
|
522
541
|
content: m.content,
|
523
542
|
name: genToolCallingName(m.plugin!.identifier, m.plugin!.apiName, m.plugin?.type),
|
@@ -669,6 +688,20 @@ class ChatService {
|
|
669
688
|
|
670
689
|
return reorderedMessages;
|
671
690
|
};
|
691
|
+
|
692
|
+
private prepareTools = (pluginIds: string[], { model, provider }: WorkingModel) => {
|
693
|
+
let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
|
694
|
+
|
695
|
+
// check this model can use function call
|
696
|
+
const canUseFC = isCanUseFC(model, provider!);
|
697
|
+
|
698
|
+
// the rule that model can use tools:
|
699
|
+
// 1. tools is not empty
|
700
|
+
// 2. model can use function call
|
701
|
+
const shouldUseTools = filterTools.length > 0 && canUseFC;
|
702
|
+
|
703
|
+
return shouldUseTools ? filterTools : undefined;
|
704
|
+
};
|
672
705
|
}
|
673
706
|
|
674
707
|
export const chatService = new ChatService();
|
@@ -11,6 +11,10 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
|
|
11
11
|
"enableReasoning": false,
|
12
12
|
"historyCount": 8,
|
13
13
|
"reasoningBudgetToken": 1024,
|
14
|
+
"searchFCModel": {
|
15
|
+
"model": "gpt-4o-mini",
|
16
|
+
"provider": "openai",
|
17
|
+
},
|
14
18
|
"searchMode": "off",
|
15
19
|
},
|
16
20
|
"model": "gpt-3.5-turbo",
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { contextCachingModels, thinkingWithToolClaudeModels } from '@/const/models';
|
2
|
-
import { DEFAULT_AGENT_CHAT_CONFIG } from '@/const/settings';
|
2
|
+
import { DEFAULT_AGENT_CHAT_CONFIG, DEFAULT_AGENT_SEARCH_FC_MODEL } from '@/const/settings';
|
3
3
|
import { AgentStoreState } from '@/store/agent/initialState';
|
4
4
|
import { LobeAgentChatConfig } from '@/types/agent';
|
5
5
|
|
@@ -14,6 +14,9 @@ const isAgentEnableSearch = (s: AgentStoreState) => agentSearchMode(s) !== 'off'
|
|
14
14
|
const useModelBuiltinSearch = (s: AgentStoreState) =>
|
15
15
|
currentAgentChatConfig(s).useModelBuiltinSearch;
|
16
16
|
|
17
|
+
const searchFCModel = (s: AgentStoreState) =>
|
18
|
+
currentAgentChatConfig(s).searchFCModel || DEFAULT_AGENT_SEARCH_FC_MODEL;
|
19
|
+
|
17
20
|
const enableHistoryCount = (s: AgentStoreState) => {
|
18
21
|
const config = currentAgentConfig(s);
|
19
22
|
const chatConfig = currentAgentChatConfig(s);
|
@@ -62,5 +65,6 @@ export const agentChatConfigSelectors = {
|
|
62
65
|
enableHistoryDivider,
|
63
66
|
historyCount,
|
64
67
|
isAgentEnableSearch,
|
68
|
+
searchFCModel,
|
65
69
|
useModelBuiltinSearch,
|
66
70
|
};
|
@@ -765,10 +765,12 @@ describe('chatMessage actions', () => {
|
|
765
765
|
(fetch as Mock).mockResolvedValueOnce(new Response(aiResponse));
|
766
766
|
|
767
767
|
await act(async () => {
|
768
|
-
const response = await result.current.internal_fetchAIChatMessage(
|
768
|
+
const response = await result.current.internal_fetchAIChatMessage({
|
769
769
|
messages,
|
770
|
-
assistantMessageId,
|
771
|
-
|
770
|
+
messageId: assistantMessageId,
|
771
|
+
model: 'gpt-4o-mini',
|
772
|
+
provider: 'openai',
|
773
|
+
});
|
772
774
|
expect(response.isFunctionCall).toEqual(false);
|
773
775
|
});
|
774
776
|
});
|
@@ -784,7 +786,13 @@ describe('chatMessage actions', () => {
|
|
784
786
|
|
785
787
|
await act(async () => {
|
786
788
|
expect(
|
787
|
-
await result.current.internal_fetchAIChatMessage(
|
789
|
+
await result.current.internal_fetchAIChatMessage({
|
790
|
+
model: 'gpt-4o-mini',
|
791
|
+
provider: 'openai',
|
792
|
+
|
793
|
+
messages,
|
794
|
+
messageId: assistantMessageId,
|
795
|
+
}),
|
788
796
|
).toEqual({
|
789
797
|
isFunctionCall: false,
|
790
798
|
});
|
@@ -13,10 +13,13 @@ import { messageService } from '@/services/message';
|
|
13
13
|
import { useAgentStore } from '@/store/agent';
|
14
14
|
import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
|
15
15
|
import { getAgentStoreState } from '@/store/agent/store';
|
16
|
+
import { aiModelSelectors } from '@/store/aiInfra';
|
17
|
+
import { getAiInfraStoreState } from '@/store/aiInfra/store';
|
16
18
|
import { chatHelpers } from '@/store/chat/helpers';
|
17
19
|
import { ChatStore } from '@/store/chat/store';
|
18
20
|
import { messageMapKey } from '@/store/chat/utils/messageMapKey';
|
19
21
|
import { useSessionStore } from '@/store/session';
|
22
|
+
import { WebBrowsingManifest } from '@/tools/web-browsing';
|
20
23
|
import { ChatMessage, CreateMessageParams, SendMessageParams } from '@/types/message';
|
21
24
|
import { MessageSemanticSearchChunk } from '@/types/rag';
|
22
25
|
import { setNamespace } from '@/utils/storeDebug';
|
@@ -28,6 +31,7 @@ const n = setNamespace('ai');
|
|
28
31
|
interface ProcessMessageParams {
|
29
32
|
traceId?: string;
|
30
33
|
isWelcomeQuestion?: boolean;
|
34
|
+
inSearchWorkflow?: boolean;
|
31
35
|
/**
|
32
36
|
* the RAG query content, should be embedding and used in the semantic search
|
33
37
|
*/
|
@@ -70,11 +74,13 @@ export interface AIGenerateAction {
|
|
70
74
|
/**
|
71
75
|
* Retrieves an AI-generated chat message from the backend service
|
72
76
|
*/
|
73
|
-
internal_fetchAIChatMessage: (
|
74
|
-
messages: ChatMessage[]
|
75
|
-
messageId: string
|
76
|
-
params?: ProcessMessageParams
|
77
|
-
|
77
|
+
internal_fetchAIChatMessage: (input: {
|
78
|
+
messages: ChatMessage[];
|
79
|
+
messageId: string;
|
80
|
+
params?: ProcessMessageParams;
|
81
|
+
model: string;
|
82
|
+
provider: string;
|
83
|
+
}) => Promise<{
|
78
84
|
isFunctionCall: boolean;
|
79
85
|
traceId?: string;
|
80
86
|
}>;
|
@@ -110,6 +116,8 @@ export interface AIGenerateAction {
|
|
110
116
|
id?: string,
|
111
117
|
action?: string,
|
112
118
|
) => AbortController | undefined;
|
119
|
+
|
120
|
+
internal_toggleSearchWorkflow: (loading: boolean, id?: string) => void;
|
113
121
|
}
|
114
122
|
|
115
123
|
export const generateAIChat: StateCreator<
|
@@ -336,10 +344,97 @@ export const generateAIChat: StateCreator<
|
|
336
344
|
|
337
345
|
const assistantId = await get().internal_createMessage(assistantMessage);
|
338
346
|
|
339
|
-
// 3.
|
340
|
-
const
|
347
|
+
// 3. place a search with the search working model if this model is not support tool use
|
348
|
+
const isModelSupportToolUse = aiModelSelectors.isModelSupportToolUse(
|
349
|
+
model,
|
350
|
+
provider!,
|
351
|
+
)(getAiInfraStoreState());
|
352
|
+
const isAgentEnableSearch = agentChatConfigSelectors.isAgentEnableSearch(getAgentStoreState());
|
353
|
+
|
354
|
+
if (isAgentEnableSearch && !isModelSupportToolUse) {
|
355
|
+
const { model, provider } = agentChatConfigSelectors.searchFCModel(getAgentStoreState());
|
356
|
+
|
357
|
+
let isToolsCalling = false;
|
358
|
+
let isError = false;
|
359
|
+
|
360
|
+
const abortController = get().internal_toggleChatLoading(
|
361
|
+
true,
|
362
|
+
assistantId,
|
363
|
+
n('generateMessage(start)', { messageId: assistantId, messages }) as string,
|
364
|
+
);
|
365
|
+
|
366
|
+
get().internal_toggleSearchWorkflow(true, assistantId);
|
367
|
+
await chatService.fetchPresetTaskResult({
|
368
|
+
params: { messages, model, provider, plugins: [WebBrowsingManifest.identifier] },
|
369
|
+
onFinish: async (_, { toolCalls, usage }) => {
|
370
|
+
if (toolCalls && toolCalls.length > 0) {
|
371
|
+
get().internal_toggleToolCallingStreaming(assistantId, undefined);
|
372
|
+
// update tools calling
|
373
|
+
await get().internal_updateMessageContent(assistantId, '', {
|
374
|
+
toolCalls,
|
375
|
+
metadata: usage,
|
376
|
+
model,
|
377
|
+
provider,
|
378
|
+
});
|
379
|
+
}
|
380
|
+
},
|
381
|
+
abortController,
|
382
|
+
onMessageHandle: async (chunk) => {
|
383
|
+
if (chunk.type === 'tool_calls') {
|
384
|
+
get().internal_toggleSearchWorkflow(false, assistantId);
|
385
|
+
get().internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives);
|
386
|
+
get().internal_dispatchMessage({
|
387
|
+
id: assistantId,
|
388
|
+
type: 'updateMessage',
|
389
|
+
value: { tools: get().internal_transformToolCalls(chunk.tool_calls) },
|
390
|
+
});
|
391
|
+
isToolsCalling = true;
|
392
|
+
}
|
393
|
+
|
394
|
+
if (chunk.type === 'text') {
|
395
|
+
abortController!.abort('not fc');
|
396
|
+
}
|
397
|
+
},
|
398
|
+
onErrorHandle: async (error) => {
|
399
|
+
isError = true;
|
400
|
+
await messageService.updateMessageError(assistantId, error);
|
401
|
+
await refreshMessages();
|
402
|
+
},
|
403
|
+
});
|
404
|
+
|
405
|
+
get().internal_toggleChatLoading(
|
406
|
+
false,
|
407
|
+
assistantId,
|
408
|
+
n('generateMessage(start)', { messageId: assistantId, messages }) as string,
|
409
|
+
);
|
410
|
+
get().internal_toggleSearchWorkflow(false, assistantId);
|
411
|
+
|
412
|
+
// if there is error, then stop
|
413
|
+
if (isError) return;
|
414
|
+
|
415
|
+
// if it's the function call message, trigger the function method
|
416
|
+
if (isToolsCalling) {
|
417
|
+
await refreshMessages();
|
418
|
+
await triggerToolCalls(assistantId, {
|
419
|
+
threadId: params?.threadId,
|
420
|
+
inPortalThread: params?.inPortalThread,
|
421
|
+
});
|
422
|
+
|
423
|
+
// then story the workflow
|
424
|
+
return;
|
425
|
+
}
|
426
|
+
}
|
427
|
+
|
428
|
+
// 4. fetch the AI response
|
429
|
+
const { isFunctionCall } = await internal_fetchAIChatMessage({
|
430
|
+
messages,
|
431
|
+
messageId: assistantId,
|
432
|
+
params,
|
433
|
+
model,
|
434
|
+
provider: provider!,
|
435
|
+
});
|
341
436
|
|
342
|
-
//
|
437
|
+
// 5. if it's the function call message, trigger the function method
|
343
438
|
if (isFunctionCall) {
|
344
439
|
await refreshMessages();
|
345
440
|
await triggerToolCalls(assistantId, {
|
@@ -348,7 +443,7 @@ export const generateAIChat: StateCreator<
|
|
348
443
|
});
|
349
444
|
}
|
350
445
|
|
351
|
-
//
|
446
|
+
// 6. summary history if context messages is larger than historyCount
|
352
447
|
const historyCount = agentChatConfigSelectors.historyCount(getAgentStoreState());
|
353
448
|
|
354
449
|
if (
|
@@ -365,7 +460,7 @@ export const generateAIChat: StateCreator<
|
|
365
460
|
await get().internal_summaryHistory(historyMessages);
|
366
461
|
}
|
367
462
|
},
|
368
|
-
internal_fetchAIChatMessage: async (messages, messageId, params) => {
|
463
|
+
internal_fetchAIChatMessage: async ({ messages, messageId, params, provider, model }) => {
|
369
464
|
const {
|
370
465
|
internal_toggleChatLoading,
|
371
466
|
refreshMessages,
|
@@ -382,7 +477,7 @@ export const generateAIChat: StateCreator<
|
|
382
477
|
);
|
383
478
|
|
384
479
|
const agentConfig = agentSelectors.currentAgentConfig(getAgentStoreState());
|
385
|
-
const chatConfig =
|
480
|
+
const chatConfig = agentChatConfigSelectors.currentChatConfig(getAgentStoreState());
|
386
481
|
|
387
482
|
const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\S\s]+?)}}/g });
|
388
483
|
|
@@ -444,8 +539,8 @@ export const generateAIChat: StateCreator<
|
|
444
539
|
abortController,
|
445
540
|
params: {
|
446
541
|
messages: preprocessMsgs,
|
447
|
-
model
|
448
|
-
provider
|
542
|
+
model,
|
543
|
+
provider,
|
449
544
|
...agentConfig.params,
|
450
545
|
plugins: agentConfig.plugins,
|
451
546
|
},
|
@@ -529,6 +624,7 @@ export const generateAIChat: StateCreator<
|
|
529
624
|
});
|
530
625
|
break;
|
531
626
|
}
|
627
|
+
|
532
628
|
case 'reasoning': {
|
533
629
|
// if there is no thinkingStartAt, it means the start of reasoning
|
534
630
|
if (!thinkingStartAt) {
|
@@ -639,4 +735,8 @@ export const generateAIChat: StateCreator<
|
|
639
735
|
`toggleToolCallingStreaming/${!!streaming ? 'start' : 'end'}`,
|
640
736
|
);
|
641
737
|
},
|
738
|
+
|
739
|
+
internal_toggleSearchWorkflow: (loading, id) => {
|
740
|
+
return get().internal_toggleLoadingArrays('searchWorkflowLoadingIds', loading, id);
|
741
|
+
},
|
642
742
|
});
|
@@ -15,6 +15,7 @@ export interface ChatAIChatState {
|
|
15
15
|
* is the AI message is reasoning
|
16
16
|
*/
|
17
17
|
reasoningLoadingIds: string[];
|
18
|
+
searchWorkflowLoadingIds: string[];
|
18
19
|
/**
|
19
20
|
* the tool calling stream ids
|
20
21
|
*/
|
@@ -28,5 +29,6 @@ export const initialAiChatState: ChatAIChatState = {
|
|
28
29
|
messageRAGLoadingIds: [],
|
29
30
|
pluginApiLoadingIds: [],
|
30
31
|
reasoningLoadingIds: [],
|
32
|
+
searchWorkflowLoadingIds: [],
|
31
33
|
toolCallingStreamIds: {},
|
32
34
|
};
|
@@ -1,9 +1,16 @@
|
|
1
|
-
|
2
1
|
import type { ChatStoreState } from '../../initialState';
|
3
2
|
|
4
3
|
const isMessageInReasoning = (id: string) => (s: ChatStoreState) =>
|
5
4
|
s.reasoningLoadingIds.includes(id);
|
6
5
|
|
6
|
+
const isMessageInSearchWorkflow = (id: string) => (s: ChatStoreState) =>
|
7
|
+
s.searchWorkflowLoadingIds.includes(id);
|
8
|
+
|
9
|
+
const isIntentUnderstanding = (id: string) => (s: ChatStoreState) =>
|
10
|
+
isMessageInSearchWorkflow(id)(s);
|
11
|
+
|
7
12
|
export const aiChatSelectors = {
|
13
|
+
isIntentUnderstanding,
|
8
14
|
isMessageInReasoning,
|
15
|
+
isMessageInSearchWorkflow,
|
9
16
|
};
|
@@ -81,6 +81,8 @@ export interface ChatMessageAction {
|
|
81
81
|
reasoning?: ModelReasoning;
|
82
82
|
search?: GroundingSearch;
|
83
83
|
metadata?: MessageMetadata;
|
84
|
+
model?: string;
|
85
|
+
provider?: string;
|
84
86
|
},
|
85
87
|
) => Promise<void>;
|
86
88
|
/**
|
@@ -302,7 +304,11 @@ export const chatMessage: StateCreator<
|
|
302
304
|
value: { tools: internal_transformToolCalls(extra?.toolCalls) },
|
303
305
|
});
|
304
306
|
} else {
|
305
|
-
internal_dispatchMessage({
|
307
|
+
internal_dispatchMessage({
|
308
|
+
id,
|
309
|
+
type: 'updateMessage',
|
310
|
+
value: { content },
|
311
|
+
});
|
306
312
|
}
|
307
313
|
|
308
314
|
await messageService.updateMessage(id, {
|
@@ -311,6 +317,8 @@ export const chatMessage: StateCreator<
|
|
311
317
|
reasoning: extra?.reasoning,
|
312
318
|
search: extra?.search,
|
313
319
|
metadata: extra?.metadata,
|
320
|
+
model: extra?.model,
|
321
|
+
provider: extra?.provider,
|
314
322
|
});
|
315
323
|
await refreshMessages();
|
316
324
|
},
|
@@ -50,12 +50,13 @@ export interface ChatPluginAction {
|
|
50
50
|
traceId?: string;
|
51
51
|
threadId?: string;
|
52
52
|
inPortalThread?: boolean;
|
53
|
+
inSearchWorkflow?: boolean;
|
53
54
|
}) => Promise<void>;
|
54
55
|
summaryPluginContent: (id: string) => Promise<void>;
|
55
56
|
|
56
57
|
triggerToolCalls: (
|
57
58
|
id: string,
|
58
|
-
params?: { threadId?: string; inPortalThread?: boolean },
|
59
|
+
params?: { threadId?: string; inPortalThread?: boolean; inSearchWorkflow?: boolean },
|
59
60
|
) => Promise<void>;
|
60
61
|
updatePluginState: (id: string, value: any) => Promise<void>;
|
61
62
|
updatePluginArguments: <T = any>(id: string, value: T) => Promise<void>;
|
@@ -209,7 +210,7 @@ export const chatPlugin: StateCreator<
|
|
209
210
|
await get().internal_invokeDifferentTypePlugin(id, payload);
|
210
211
|
},
|
211
212
|
|
212
|
-
triggerAIMessage: async ({ parentId, traceId, threadId, inPortalThread }) => {
|
213
|
+
triggerAIMessage: async ({ parentId, traceId, threadId, inPortalThread, inSearchWorkflow }) => {
|
213
214
|
const { internal_coreProcessMessage } = get();
|
214
215
|
|
215
216
|
const chats = inPortalThread
|
@@ -220,6 +221,7 @@ export const chatPlugin: StateCreator<
|
|
220
221
|
traceId,
|
221
222
|
threadId,
|
222
223
|
inPortalThread,
|
224
|
+
inSearchWorkflow,
|
223
225
|
});
|
224
226
|
},
|
225
227
|
|
@@ -245,7 +247,7 @@ export const chatPlugin: StateCreator<
|
|
245
247
|
);
|
246
248
|
},
|
247
249
|
|
248
|
-
triggerToolCalls: async (assistantId, { threadId, inPortalThread } = {}) => {
|
250
|
+
triggerToolCalls: async (assistantId, { threadId, inPortalThread, inSearchWorkflow } = {}) => {
|
249
251
|
const message = chatSelectors.getMessageById(assistantId)(get());
|
250
252
|
if (!message || !message.tools) return;
|
251
253
|
|
@@ -281,7 +283,7 @@ export const chatPlugin: StateCreator<
|
|
281
283
|
|
282
284
|
const traceId = chatSelectors.getTraceIdByMessageId(latestToolId)(get());
|
283
285
|
|
284
|
-
await get().triggerAIMessage({ traceId, threadId, inPortalThread });
|
286
|
+
await get().triggerAIMessage({ traceId, threadId, inPortalThread, inSearchWorkflow });
|
285
287
|
},
|
286
288
|
updatePluginState: async (id, value) => {
|
287
289
|
const { refreshMessages } = get();
|
@@ -78,6 +78,10 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
|
|
78
78
|
"enableReasoning": false,
|
79
79
|
"historyCount": 8,
|
80
80
|
"reasoningBudgetToken": 1024,
|
81
|
+
"searchFCModel": {
|
82
|
+
"model": "gpt-4o-mini",
|
83
|
+
"provider": "openai",
|
84
|
+
},
|
81
85
|
"searchMode": "off",
|
82
86
|
},
|
83
87
|
"model": "gpt-3.5-turbo",
|
@@ -62,5 +62,11 @@ export const AgentChatConfigSchema = z.object({
|
|
62
62
|
enableReasoningEffort: z.boolean().optional(),
|
63
63
|
historyCount: z.number().optional(),
|
64
64
|
reasoningBudgetToken: z.number().optional(),
|
65
|
+
searchFCModel: z
|
66
|
+
.object({
|
67
|
+
model: z.string(),
|
68
|
+
provider: z.string(),
|
69
|
+
})
|
70
|
+
.optional(),
|
65
71
|
searchMode: z.enum(['off', 'on', 'auto']).optional(),
|
66
72
|
});
|