@lobehub/chat 1.128.0 → 1.128.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.github/workflows/test.yml +8 -1
  2. package/CHANGELOG.md +51 -0
  3. package/changelog/v1.json +18 -0
  4. package/locales/ar/models.json +6 -0
  5. package/locales/bg-BG/models.json +6 -0
  6. package/locales/de-DE/models.json +6 -0
  7. package/locales/en-US/models.json +6 -0
  8. package/locales/es-ES/models.json +6 -0
  9. package/locales/fa-IR/models.json +6 -0
  10. package/locales/fr-FR/models.json +6 -0
  11. package/locales/it-IT/models.json +6 -0
  12. package/locales/ja-JP/models.json +6 -0
  13. package/locales/ko-KR/models.json +6 -0
  14. package/locales/nl-NL/models.json +6 -0
  15. package/locales/pl-PL/models.json +6 -0
  16. package/locales/pt-BR/models.json +6 -0
  17. package/locales/ru-RU/models.json +6 -0
  18. package/locales/tr-TR/models.json +6 -0
  19. package/locales/vi-VN/models.json +6 -0
  20. package/locales/zh-CN/models.json +6 -0
  21. package/locales/zh-TW/models.json +6 -0
  22. package/next.config.ts +8 -1
  23. package/package.json +71 -69
  24. package/packages/context-engine/ARCHITECTURE.md +425 -0
  25. package/packages/context-engine/package.json +40 -0
  26. package/packages/context-engine/src/base/BaseProcessor.ts +87 -0
  27. package/packages/context-engine/src/base/BaseProvider.ts +22 -0
  28. package/packages/context-engine/src/index.ts +32 -0
  29. package/packages/context-engine/src/pipeline.ts +219 -0
  30. package/packages/context-engine/src/processors/HistoryTruncate.ts +76 -0
  31. package/packages/context-engine/src/processors/InputTemplate.ts +83 -0
  32. package/packages/context-engine/src/processors/MessageCleanup.ts +87 -0
  33. package/packages/context-engine/src/processors/MessageContent.ts +298 -0
  34. package/packages/context-engine/src/processors/PlaceholderVariables.ts +196 -0
  35. package/packages/context-engine/src/processors/ToolCall.ts +186 -0
  36. package/packages/context-engine/src/processors/ToolMessageReorder.ts +113 -0
  37. package/packages/context-engine/src/processors/__tests__/HistoryTruncate.test.ts +175 -0
  38. package/packages/context-engine/src/processors/__tests__/InputTemplate.test.ts +243 -0
  39. package/packages/context-engine/src/processors/__tests__/MessageContent.test.ts +394 -0
  40. package/packages/context-engine/src/processors/__tests__/PlaceholderVariables.test.ts +334 -0
  41. package/packages/context-engine/src/processors/__tests__/ToolMessageReorder.test.ts +186 -0
  42. package/packages/context-engine/src/processors/index.ts +15 -0
  43. package/packages/context-engine/src/providers/HistorySummary.ts +102 -0
  44. package/packages/context-engine/src/providers/InboxGuide.ts +102 -0
  45. package/packages/context-engine/src/providers/SystemRoleInjector.ts +64 -0
  46. package/packages/context-engine/src/providers/ToolSystemRole.ts +118 -0
  47. package/packages/context-engine/src/providers/__tests__/HistorySummaryProvider.test.ts +112 -0
  48. package/packages/context-engine/src/providers/__tests__/InboxGuideProvider.test.ts +121 -0
  49. package/packages/context-engine/src/providers/__tests__/SystemRoleInjector.test.ts +200 -0
  50. package/packages/context-engine/src/providers/__tests__/ToolSystemRoleProvider.test.ts +140 -0
  51. package/packages/context-engine/src/providers/index.ts +11 -0
  52. package/packages/context-engine/src/types.ts +201 -0
  53. package/packages/context-engine/vitest.config.mts +10 -0
  54. package/packages/database/package.json +1 -1
  55. package/packages/model-bank/src/aiModels/deepseek.ts +4 -4
  56. package/packages/model-bank/src/aiModels/google.ts +30 -6
  57. package/packages/model-bank/src/aiModels/groq.ts +1 -19
  58. package/packages/model-bank/src/aiModels/modelscope.ts +24 -18
  59. package/packages/model-bank/src/aiModels/novita.ts +71 -5
  60. package/packages/model-bank/src/aiModels/qwen.ts +87 -2
  61. package/packages/model-bank/src/aiModels/siliconcloud.ts +65 -2
  62. package/packages/model-bank/src/aiModels/volcengine.ts +4 -3
  63. package/packages/model-runtime/src/utils/modelParse.ts +4 -4
  64. package/packages/prompts/src/prompts/systemRole/index.ts +1 -1
  65. package/packages/utils/src/index.ts +2 -0
  66. package/packages/utils/src/uriParser.test.ts +29 -0
  67. package/packages/utils/src/uriParser.ts +24 -0
  68. package/src/services/{__tests__ → chat}/chat.test.ts +22 -1032
  69. package/src/services/chat/clientModelRuntime.test.ts +385 -0
  70. package/src/services/chat/clientModelRuntime.ts +34 -0
  71. package/src/services/chat/contextEngineering.test.ts +848 -0
  72. package/src/services/chat/contextEngineering.ts +123 -0
  73. package/src/services/chat/helper.ts +61 -0
  74. package/src/services/{chat.ts → chat/index.ts} +24 -366
  75. package/src/services/chat/types.ts +9 -0
  76. package/src/services/models.ts +1 -1
  77. package/src/store/aiInfra/slices/aiModel/selectors.ts +2 -2
  78. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -40
  79. /package/src/services/{__tests__ → chat}/__snapshots__/chat.test.ts.snap +0 -0
@@ -0,0 +1,123 @@
1
+ import { INBOX_SESSION_ID, isDesktop, isServerMode } from '@lobechat/const';
2
+ import {
3
+ type AgentState,
4
+ ContextEngine,
5
+ HistorySummaryProvider,
6
+ HistoryTruncateProcessor,
7
+ InboxGuideProvider,
8
+ InputTemplateProcessor,
9
+ MessageCleanupProcessor,
10
+ MessageContentProcessor,
11
+ PlaceholderVariablesProcessor,
12
+ SystemRoleInjector,
13
+ ToolCallProcessor,
14
+ ToolMessageReorder,
15
+ ToolSystemRoleProvider,
16
+ } from '@lobechat/context-engine';
17
+ import { historySummaryPrompt } from '@lobechat/prompts';
18
+ import { ChatMessage, OpenAIChatMessage } from '@lobechat/types';
19
+
20
+ import { INBOX_GUIDE_SYSTEMROLE } from '@/const/guide';
21
+ import { getToolStoreState } from '@/store/tool';
22
+ import { toolSelectors } from '@/store/tool/selectors';
23
+ import { VARIABLE_GENERATORS } from '@/utils/client/parserPlaceholder';
24
+ import { genToolCallingName } from '@/utils/toolCall';
25
+
26
+ import { isCanUseFC, isCanUseVision } from './helper';
27
+ import { FetchOptions } from './types';
28
+
29
+ export const contextEngineering = async (
30
+ {
31
+ messages = [],
32
+ tools,
33
+ model,
34
+ provider,
35
+ systemRole,
36
+ inputTemplate,
37
+ enableHistoryCount,
38
+ historyCount,
39
+ }: {
40
+ enableHistoryCount?: boolean;
41
+ historyCount?: number;
42
+ inputTemplate?: string;
43
+ messages: ChatMessage[];
44
+ model: string;
45
+ provider: string;
46
+ systemRole?: string;
47
+ tools?: string[];
48
+ },
49
+ options?: FetchOptions,
50
+ ): Promise<OpenAIChatMessage[]> => {
51
+ const pipeline = new ContextEngine({
52
+ pipeline: [
53
+ // 1. History truncation (MUST be first, before any message injection)
54
+ new HistoryTruncateProcessor({ enableHistoryCount, historyCount }),
55
+
56
+ // --------- Create system role injection providers
57
+
58
+ // 2. System role injection (agent's system role)
59
+ new SystemRoleInjector({ systemRole }),
60
+
61
+ // 3. Inbox guide system role injection
62
+ new InboxGuideProvider({
63
+ inboxGuideSystemRole: INBOX_GUIDE_SYSTEMROLE,
64
+ inboxSessionId: INBOX_SESSION_ID,
65
+ isWelcomeQuestion: options?.isWelcomeQuestion,
66
+ sessionId: options?.trace?.sessionId,
67
+ }),
68
+
69
+ // 4. Tool system role injection
70
+ new ToolSystemRoleProvider({
71
+ getToolSystemRoles: (tools) => toolSelectors.enabledSystemRoles(tools)(getToolStoreState()),
72
+ isCanUseFC,
73
+ model,
74
+ provider,
75
+ tools,
76
+ }),
77
+
78
+ // 5. History summary injection
79
+ new HistorySummaryProvider({
80
+ formatHistorySummary: historySummaryPrompt,
81
+ historySummary: options?.historySummary,
82
+ }),
83
+
84
+ // Create message processing processors
85
+
86
+ // 6. Input template processing
87
+ new InputTemplateProcessor({
88
+ inputTemplate,
89
+ }),
90
+
91
+ // 7. Placeholder variables processing
92
+ new PlaceholderVariablesProcessor({ variableGenerators: VARIABLE_GENERATORS }),
93
+
94
+ // 8. Message content processing
95
+ new MessageContentProcessor({
96
+ fileContext: { enabled: isServerMode, includeFileUrl: !isDesktop },
97
+ isCanUseVision,
98
+ model,
99
+ provider,
100
+ }),
101
+
102
+ // 9. Tool call processing
103
+ new ToolCallProcessor({ genToolCallingName, isCanUseFC, model, provider }),
104
+
105
+ // 10. Tool message reordering
106
+ new ToolMessageReorder(),
107
+
108
+ // 11. Message cleanup (final step, keep only necessary fields)
109
+ new MessageCleanupProcessor(),
110
+ ],
111
+ });
112
+
113
+ const initialState: AgentState = { messages, model, provider, systemRole, tools };
114
+
115
+ const result = await pipeline.process({
116
+ initialState,
117
+ maxTokens: 10_000_000,
118
+ messages,
119
+ model,
120
+ });
121
+
122
+ return result.messages;
123
+ };
@@ -0,0 +1,61 @@
1
+ import { isDeprecatedEdition } from '@lobechat/const';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { getAiInfraStoreState } from '@/store/aiInfra';
5
+ import { aiModelSelectors, aiProviderSelectors } from '@/store/aiInfra/selectors';
6
+ import { getUserStoreState, useUserStore } from '@/store/user';
7
+ import { modelConfigSelectors, modelProviderSelectors } from '@/store/user/selectors';
8
+
9
+ export const isCanUseFC = (model: string, provider: string): boolean => {
10
+ // TODO: remove isDeprecatedEdition condition in V2.0
11
+ if (isDeprecatedEdition) {
12
+ return modelProviderSelectors.isModelEnabledFunctionCall(model)(getUserStoreState());
13
+ }
14
+
15
+ return aiModelSelectors.isModelSupportToolUse(model, provider)(getAiInfraStoreState()) || false;
16
+ };
17
+
18
+ export const isCanUseVision = (model: string, provider: string): boolean => {
19
+ // TODO: remove isDeprecatedEdition condition in V2.0
20
+ if (isDeprecatedEdition) {
21
+ return modelProviderSelectors.isModelEnabledVision(model)(getUserStoreState());
22
+ }
23
+ return aiModelSelectors.isModelSupportVision(model, provider)(getAiInfraStoreState());
24
+ };
25
+
26
+ /**
27
+ * TODO: we need to update this function to auto find deploymentName with provider setting config
28
+ */
29
+ export const findDeploymentName = (model: string, provider: string) => {
30
+ let deploymentId = model;
31
+
32
+ // TODO: remove isDeprecatedEdition condition in V2.0
33
+ if (isDeprecatedEdition) {
34
+ const chatModelCards = modelProviderSelectors.getModelCardsById(ModelProvider.Azure)(
35
+ useUserStore.getState(),
36
+ );
37
+
38
+ const deploymentName = chatModelCards.find((i) => i.id === model)?.deploymentName;
39
+ if (deploymentName) deploymentId = deploymentName;
40
+ } else {
41
+ // find the model by id
42
+ const modelItem = getAiInfraStoreState().enabledAiModels?.find(
43
+ (i) => i.id === model && i.providerId === provider,
44
+ );
45
+
46
+ if (modelItem && modelItem.config?.deploymentName) {
47
+ deploymentId = modelItem.config?.deploymentName;
48
+ }
49
+ }
50
+
51
+ return deploymentId;
52
+ };
53
+
54
+ export const isEnableFetchOnClient = (provider: string) => {
55
+ // TODO: remove this condition in V2.0
56
+ if (isDeprecatedEdition) {
57
+ return modelConfigSelectors.isProviderFetchOnClient(provider)(useUserStore.getState());
58
+ } else {
59
+ return aiProviderSelectors.isProviderFetchOnClient(provider)(getAiInfraStoreState());
60
+ }
61
+ };
@@ -2,22 +2,16 @@ import {
2
2
  AgentRuntimeError,
3
3
  ChatCompletionErrorPayload,
4
4
  ModelProvider,
5
- ModelRuntime,
6
- parseDataUri,
7
5
  } from '@lobechat/model-runtime';
8
- import { BuiltinSystemRolePrompts, filesPrompts } from '@lobechat/prompts';
9
6
  import { ChatErrorType, TracePayload, TraceTagMap } from '@lobechat/types';
10
7
  import { PluginRequestPayload, createHeadersWithPluginSettings } from '@lobehub/chat-plugin-sdk';
11
- import { produce } from 'immer';
12
8
  import { merge } from 'lodash-es';
13
9
 
14
10
  import { enableAuth } from '@/const/auth';
15
- import { INBOX_GUIDE_SYSTEMROLE } from '@/const/guide';
16
- import { INBOX_SESSION_ID } from '@/const/session';
17
11
  import { DEFAULT_AGENT_CONFIG } from '@/const/settings';
18
- import { isDeprecatedEdition, isDesktop, isServerMode } from '@/const/version';
12
+ import { isDeprecatedEdition, isDesktop } from '@/const/version';
19
13
  import { getAgentStoreState } from '@/store/agent';
20
- import { agentChatConfigSelectors } from '@/store/agent/selectors';
14
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
21
15
  import { aiModelSelectors, aiProviderSelectors, getAiInfraStoreState } from '@/store/aiInfra';
22
16
  import { getSessionStoreState } from '@/store/session';
23
17
  import { sessionMetaSelectors } from '@/store/session/selectors';
@@ -25,18 +19,14 @@ import { getToolStoreState } from '@/store/tool';
25
19
  import { pluginSelectors, toolSelectors } from '@/store/tool/selectors';
26
20
  import { getUserStoreState, useUserStore } from '@/store/user';
27
21
  import {
28
- modelConfigSelectors,
29
- modelProviderSelectors,
30
22
  preferenceSelectors,
31
23
  userGeneralSettingsSelectors,
32
24
  userProfileSelectors,
33
25
  } from '@/store/user/selectors';
34
26
  import { WebBrowsingManifest } from '@/tools/web-browsing';
35
27
  import { WorkingModel } from '@/types/agent';
36
- import { ChatImageItem, ChatMessage, MessageToolCall } from '@/types/message';
37
- import type { ChatStreamPayload, OpenAIChatMessage } from '@/types/openai/chat';
38
- import { UserMessageContentPart } from '@/types/openai/chat';
39
- import { parsePlaceholderVariablesMessages } from '@/utils/client/parserPlaceholder';
28
+ import { ChatMessage } from '@/types/message';
29
+ import type { ChatStreamPayload } from '@/types/openai/chat';
40
30
  import { fetchWithInvokeStream } from '@/utils/electron/desktopRemoteRPCFetch';
41
31
  import { createErrorResponse } from '@/utils/errorResponse';
42
32
  import {
@@ -45,74 +35,14 @@ import {
45
35
  getMessageError,
46
36
  standardizeAnimationStyle,
47
37
  } from '@/utils/fetch';
48
- import { imageUrlToBase64 } from '@/utils/imageToBase64';
49
- import { genToolCallingName } from '@/utils/toolCall';
50
38
  import { createTraceHeader, getTraceId } from '@/utils/trace';
51
- import { isLocalUrl } from '@/utils/url';
52
-
53
- import { createHeaderWithAuth, createPayloadWithKeyVaults } from './_auth';
54
- import { API_ENDPOINTS } from './_url';
55
-
56
- const isCanUseFC = (model: string, provider: string) => {
57
- // TODO: remove isDeprecatedEdition condition in V2.0
58
- if (isDeprecatedEdition) {
59
- return modelProviderSelectors.isModelEnabledFunctionCall(model)(getUserStoreState());
60
- }
61
-
62
- return aiModelSelectors.isModelSupportToolUse(model, provider)(getAiInfraStoreState());
63
- };
64
-
65
- const isCanUseVision = (model: string, provider: string) => {
66
- // TODO: remove isDeprecatedEdition condition in V2.0
67
- if (isDeprecatedEdition) {
68
- return modelProviderSelectors.isModelEnabledVision(model)(getUserStoreState());
69
- }
70
- return aiModelSelectors.isModelSupportVision(model, provider)(getAiInfraStoreState());
71
- };
72
-
73
- /**
74
- * TODO: we need to update this function to auto find deploymentName with provider setting config
75
- */
76
- const findDeploymentName = (model: string, provider: string) => {
77
- let deploymentId = model;
78
-
79
- // TODO: remove isDeprecatedEdition condition in V2.0
80
- if (isDeprecatedEdition) {
81
- const chatModelCards = modelProviderSelectors.getModelCardsById(ModelProvider.Azure)(
82
- useUserStore.getState(),
83
- );
84
-
85
- const deploymentName = chatModelCards.find((i) => i.id === model)?.deploymentName;
86
- if (deploymentName) deploymentId = deploymentName;
87
- } else {
88
- // find the model by id
89
- const modelItem = getAiInfraStoreState().enabledAiModels?.find(
90
- (i) => i.id === model && i.providerId === provider,
91
- );
92
-
93
- if (modelItem && modelItem.config?.deploymentName) {
94
- deploymentId = modelItem.config?.deploymentName;
95
- }
96
- }
97
39
 
98
- return deploymentId;
99
- };
100
-
101
- const isEnableFetchOnClient = (provider: string) => {
102
- // TODO: remove this condition in V2.0
103
- if (isDeprecatedEdition) {
104
- return modelConfigSelectors.isProviderFetchOnClient(provider)(useUserStore.getState());
105
- } else {
106
- return aiProviderSelectors.isProviderFetchOnClient(provider)(getAiInfraStoreState());
107
- }
108
- };
109
-
110
- interface FetchOptions extends FetchSSEOptions {
111
- historySummary?: string;
112
- isWelcomeQuestion?: boolean;
113
- signal?: AbortSignal | undefined;
114
- trace?: TracePayload;
115
- }
40
+ import { createHeaderWithAuth } from '../_auth';
41
+ import { API_ENDPOINTS } from '../_url';
42
+ import { initializeWithClientStore } from './clientModelRuntime';
43
+ import { contextEngineering } from './contextEngineering';
44
+ import { findDeploymentName, isCanUseFC, isEnableFetchOnClient } from './helper';
45
+ import { FetchOptions } from './types';
116
46
 
117
47
  interface GetChatCompletionPayload extends Partial<Omit<ChatStreamPayload, 'messages'>> {
118
48
  messages: ChatMessage[];
@@ -141,37 +71,6 @@ interface CreateAssistantMessageStream extends FetchSSEOptions {
141
71
  trace?: TracePayload;
142
72
  }
143
73
 
144
- /**
145
- * Initializes the AgentRuntime with the client store.
146
- * @param provider - The provider name.
147
- * @param payload - Init options
148
- * @returns The initialized AgentRuntime instance
149
- *
150
- * **Note**: if you try to fetch directly, use `fetchOnClient` instead.
151
- */
152
- export function initializeWithClientStore(provider: string, payload?: any) {
153
- /**
154
- * Since #5267, we map parameters for client-fetch in function `getProviderAuthPayload`
155
- * which called by `createPayloadWithKeyVaults` below.
156
- * @see https://github.com/lobehub/lobe-chat/pull/5267
157
- * @file src/services/_auth.ts
158
- */
159
- const providerAuthPayload = { ...payload, ...createPayloadWithKeyVaults(provider) };
160
- const commonOptions = {
161
- // Allow OpenAI SDK and Anthropic SDK run on browser
162
- dangerouslyAllowBrowser: true,
163
- };
164
- /**
165
- * Configuration override order:
166
- * payload -> providerAuthPayload -> commonOptions
167
- */
168
- return ModelRuntime.initializeWithProvider(provider, {
169
- ...commonOptions,
170
- ...providerAuthPayload,
171
- ...payload,
172
- });
173
- }
174
-
175
74
  class ChatService {
176
75
  createAssistantMessage = async (
177
76
  { plugins: enabledPlugins, messages, ...params }: GetChatCompletionPayload,
@@ -209,29 +108,35 @@ class ChatService {
209
108
  pluginIds.push(WebBrowsingManifest.identifier);
210
109
  }
211
110
 
212
- // ============ 1. preprocess placeholder variables ============ //
213
- const parsedMessages = parsePlaceholderVariablesMessages(messages);
111
+ // ============ 1. preprocess messages ============ //
214
112
 
215
- // ============ 2. preprocess messages ============ //
113
+ const agentStoreState = getAgentStoreState();
114
+ const agentConfig = agentSelectors.currentAgentConfig(agentStoreState);
216
115
 
217
- const oaiMessages = await this.processMessages(
116
+ // Apply context engineering with preprocessing configuration
117
+ const oaiMessages = await contextEngineering(
218
118
  {
219
- messages: parsedMessages,
119
+ enableHistoryCount: agentChatConfigSelectors.enableHistoryCount(agentStoreState),
120
+ // include user messages
121
+ historyCount: agentChatConfigSelectors.historyCount(agentStoreState) + 2,
122
+ inputTemplate: chatConfig.inputTemplate,
123
+ messages,
220
124
  model: payload.model,
221
125
  provider: payload.provider!,
126
+ systemRole: agentConfig.systemRole,
222
127
  tools: pluginIds,
223
128
  },
224
129
  options,
225
130
  );
226
131
 
227
- // ============ 3. preprocess tools ============ //
132
+ // ============ 2. preprocess tools ============ //
228
133
 
229
134
  const tools = this.prepareTools(pluginIds, {
230
135
  model: payload.model,
231
136
  provider: payload.provider!,
232
137
  });
233
138
 
234
- // ============ 4. process extend params ============ //
139
+ // ============ 3. process extend params ============ //
235
140
 
236
141
  let extendParams: Record<string, any> = {};
237
142
 
@@ -513,7 +418,7 @@ class ChatService {
513
418
  onLoadingChange?.(true);
514
419
 
515
420
  try {
516
- const oaiMessages = await this.processMessages({
421
+ const oaiMessages = await contextEngineering({
517
422
  messages: params.messages as any,
518
423
  model: params.model!,
519
424
  provider: params.provider!,
@@ -545,191 +450,6 @@ class ChatService {
545
450
  }
546
451
  };
547
452
 
548
- private processMessages = async (
549
- {
550
- messages = [],
551
- tools,
552
- model,
553
- provider,
554
- }: {
555
- messages: ChatMessage[];
556
- model: string;
557
- provider: string;
558
- tools?: string[];
559
- },
560
- options?: FetchOptions,
561
- ): Promise<OpenAIChatMessage[]> => {
562
- // handle content type for vision model
563
- // for the models with visual ability, add image url to content
564
- // refs: https://platform.openai.com/docs/guides/vision/quick-start
565
- const getUserContent = async (m: ChatMessage) => {
566
- // only if message doesn't have images and files, then return the plain content
567
- if ((!m.imageList || m.imageList.length === 0) && (!m.fileList || m.fileList.length === 0))
568
- return m.content;
569
-
570
- const imageList = m.imageList || [];
571
- const imageContentParts = await this.processImageList({ imageList, model, provider });
572
-
573
- const filesContext = isServerMode
574
- ? filesPrompts({ addUrl: !isDesktop, fileList: m.fileList, imageList })
575
- : '';
576
- return [
577
- { text: (m.content + '\n\n' + filesContext).trim(), type: 'text' },
578
- ...imageContentParts,
579
- ] as UserMessageContentPart[];
580
- };
581
-
582
- const getAssistantContent = async (m: ChatMessage) => {
583
- // signature is a signal of anthropic thinking mode
584
- const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
585
-
586
- if (shouldIncludeThinking) {
587
- return [
588
- {
589
- signature: m.reasoning!.signature,
590
- thinking: m.reasoning!.content,
591
- type: 'thinking',
592
- },
593
- { text: m.content, type: 'text' },
594
- ] as UserMessageContentPart[];
595
- }
596
- // only if message doesn't have images and files, then return the plain content
597
-
598
- if (m.imageList && m.imageList.length > 0) {
599
- const imageContentParts = await this.processImageList({
600
- imageList: m.imageList,
601
- model,
602
- provider,
603
- });
604
- return [
605
- !!m.content ? { text: m.content, type: 'text' } : undefined,
606
- ...imageContentParts,
607
- ].filter(Boolean) as UserMessageContentPart[];
608
- }
609
-
610
- return m.content;
611
- };
612
-
613
- let postMessages = await Promise.all(
614
- messages.map(async (m): Promise<OpenAIChatMessage> => {
615
- const supportTools = isCanUseFC(model, provider);
616
- switch (m.role) {
617
- case 'user': {
618
- return { content: await getUserContent(m), role: m.role };
619
- }
620
-
621
- case 'assistant': {
622
- const content = await getAssistantContent(m);
623
-
624
- if (!supportTools) {
625
- return { content, role: m.role };
626
- }
627
-
628
- return {
629
- content,
630
- role: m.role,
631
- tool_calls: m.tools?.map(
632
- (tool): MessageToolCall => ({
633
- function: {
634
- arguments: tool.arguments,
635
- name: genToolCallingName(tool.identifier, tool.apiName, tool.type),
636
- },
637
- id: tool.id,
638
- type: 'function',
639
- }),
640
- ),
641
- };
642
- }
643
-
644
- case 'tool': {
645
- if (!supportTools) {
646
- return { content: m.content, role: 'user' };
647
- }
648
-
649
- return {
650
- content: m.content,
651
- name: genToolCallingName(m.plugin!.identifier, m.plugin!.apiName, m.plugin?.type),
652
- role: m.role,
653
- tool_call_id: m.tool_call_id,
654
- };
655
- }
656
-
657
- default: {
658
- return { content: m.content, role: m.role as any };
659
- }
660
- }
661
- }),
662
- );
663
-
664
- postMessages = produce(postMessages, (draft) => {
665
- // if it's a welcome question, inject InboxGuide SystemRole
666
- const inboxGuideSystemRole =
667
- options?.isWelcomeQuestion &&
668
- options?.trace?.sessionId === INBOX_SESSION_ID &&
669
- INBOX_GUIDE_SYSTEMROLE;
670
-
671
- // Inject Tool SystemRole
672
- const hasTools = tools && tools?.length > 0;
673
- const hasFC = hasTools && isCanUseFC(model, provider);
674
- const toolsSystemRoles =
675
- hasFC && toolSelectors.enabledSystemRoles(tools)(getToolStoreState());
676
-
677
- const injectSystemRoles = BuiltinSystemRolePrompts({
678
- historySummary: options?.historySummary,
679
- plugins: toolsSystemRoles as string,
680
- welcome: inboxGuideSystemRole as string,
681
- });
682
-
683
- if (!injectSystemRoles) return;
684
-
685
- const systemMessage = draft.find((i) => i.role === 'system');
686
-
687
- if (systemMessage) {
688
- systemMessage.content = [systemMessage.content, injectSystemRoles]
689
- .filter(Boolean)
690
- .join('\n\n');
691
- } else {
692
- draft.unshift({
693
- content: injectSystemRoles,
694
- role: 'system',
695
- });
696
- }
697
- });
698
-
699
- return this.reorderToolMessages(postMessages);
700
- };
701
-
702
- /**
703
- * Process imageList: convert local URLs to base64 and format as UserMessageContentPart
704
- */
705
- private processImageList = async ({
706
- model,
707
- provider,
708
- imageList,
709
- }: {
710
- imageList: ChatImageItem[];
711
- model: string;
712
- provider: string;
713
- }) => {
714
- if (!isCanUseVision(model, provider)) {
715
- return [];
716
- }
717
-
718
- return Promise.all(
719
- imageList.map(async (image) => {
720
- const { type } = parseDataUri(image.url);
721
-
722
- let processedUrl = image.url;
723
- if (type === 'url' && isLocalUrl(image.url)) {
724
- const { base64, mimeType } = await imageUrlToBase64(image.url);
725
- processedUrl = `data:${mimeType};base64,${base64}`;
726
- }
727
-
728
- return { image_url: { detail: 'auto', url: processedUrl }, type: 'image_url' } as const;
729
- }),
730
- );
731
- };
732
-
733
453
  private mapTrace = (trace?: TracePayload, tag?: TraceTagMap): TracePayload => {
734
454
  const tags = sessionMetaSelectors.currentAgentMeta(getSessionStoreState()).tags || [];
735
455
 
@@ -768,68 +488,6 @@ class ChatService {
768
488
  return agentRuntime.chat(data, { signal: params.signal });
769
489
  };
770
490
 
771
- /**
772
- * Reorder tool messages to ensure that tool messages are displayed in the correct order.
773
- * see https://github.com/lobehub/lobe-chat/pull/3155
774
- */
775
- private reorderToolMessages = (messages: OpenAIChatMessage[]): OpenAIChatMessage[] => {
776
- // 1. 先收集所有 assistant 消息中的有效 tool_call_id
777
- const validToolCallIds = new Set<string>();
778
- messages.forEach((message) => {
779
- if (message.role === 'assistant' && message.tool_calls) {
780
- message.tool_calls.forEach((toolCall) => {
781
- validToolCallIds.add(toolCall.id);
782
- });
783
- }
784
- });
785
-
786
- // 2. 收集所有有效的 tool 消息
787
- const toolMessages: Record<string, OpenAIChatMessage> = {};
788
- messages.forEach((message) => {
789
- if (
790
- message.role === 'tool' &&
791
- message.tool_call_id &&
792
- validToolCallIds.has(message.tool_call_id)
793
- ) {
794
- toolMessages[message.tool_call_id] = message;
795
- }
796
- });
797
-
798
- // 3. 重新排序消息
799
- const reorderedMessages: OpenAIChatMessage[] = [];
800
- messages.forEach((message) => {
801
- // 跳过无效的 tool 消息
802
- if (
803
- message.role === 'tool' &&
804
- (!message.tool_call_id || !validToolCallIds.has(message.tool_call_id))
805
- ) {
806
- return;
807
- }
808
-
809
- // 检查是否已经添加过该 tool 消息
810
- const hasPushed = reorderedMessages.some(
811
- (m) => !!message.tool_call_id && m.tool_call_id === message.tool_call_id,
812
- );
813
-
814
- if (hasPushed) return;
815
-
816
- reorderedMessages.push(message);
817
-
818
- // 如果是 assistant 消息且有 tool_calls,添加对应的 tool 消息
819
- if (message.role === 'assistant' && message.tool_calls) {
820
- message.tool_calls.forEach((toolCall) => {
821
- const correspondingToolMessage = toolMessages[toolCall.id];
822
- if (correspondingToolMessage) {
823
- reorderedMessages.push(correspondingToolMessage);
824
- delete toolMessages[toolCall.id];
825
- }
826
- });
827
- }
828
- });
829
-
830
- return reorderedMessages;
831
- };
832
-
833
491
  private prepareTools = (pluginIds: string[], { model, provider }: WorkingModel) => {
834
492
  let filterTools = toolSelectors.enabledSchema(pluginIds)(getToolStoreState());
835
493
 
@@ -0,0 +1,9 @@
1
+ import { TracePayload } from '@lobechat/types';
2
+ import { FetchSSEOptions } from '@/utils/fetch';
3
+
4
+ export interface FetchOptions extends FetchSSEOptions {
5
+ historySummary?: string;
6
+ isWelcomeQuestion?: boolean;
7
+ signal?: AbortSignal | undefined;
8
+ trace?: TracePayload;
9
+ }
@@ -7,7 +7,7 @@ import { ChatModelCard } from '@/types/llm';
7
7
  import { getMessageError } from '@/utils/fetch';
8
8
 
9
9
  import { API_ENDPOINTS } from './_url';
10
- import { initializeWithClientStore } from './chat';
10
+ import { initializeWithClientStore } from './chat/clientModelRuntime';
11
11
 
12
12
  const isEnableFetchOnClient = (provider: string) => {
13
13
  // TODO: remove this condition in V2.0
@@ -47,7 +47,7 @@ const getEnabledModelById = (id: string, provider: string) => (s: AIProviderStor
47
47
  const isModelSupportToolUse = (id: string, provider: string) => (s: AIProviderStoreState) => {
48
48
  const model = getEnabledModelById(id, provider)(s);
49
49
 
50
- return model?.abilities?.functionCall;
50
+ return model?.abilities?.functionCall || false;
51
51
  };
52
52
 
53
53
  const isModelSupportFiles = (id: string, provider: string) => (s: AIProviderStoreState) => {
@@ -59,7 +59,7 @@ const isModelSupportFiles = (id: string, provider: string) => (s: AIProviderStor
59
59
  const isModelSupportVision = (id: string, provider: string) => (s: AIProviderStoreState) => {
60
60
  const model = getEnabledModelById(id, provider)(s);
61
61
 
62
- return model?.abilities?.vision;
62
+ return model?.abilities?.vision || false;
63
63
  };
64
64
 
65
65
  const isModelSupportReasoning = (id: string, provider: string) => (s: AIProviderStoreState) => {