@lobehub/chat 1.68.10 → 1.69.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +51 -0
- package/changelog/v1.json +18 -0
- package/locales/ar/chat.json +8 -0
- package/locales/ar/models.json +6 -0
- package/locales/bg-BG/chat.json +8 -0
- package/locales/bg-BG/models.json +6 -0
- package/locales/de-DE/chat.json +8 -0
- package/locales/de-DE/models.json +6 -0
- package/locales/en-US/chat.json +8 -0
- package/locales/en-US/models.json +6 -0
- package/locales/es-ES/chat.json +8 -0
- package/locales/es-ES/models.json +6 -0
- package/locales/fa-IR/chat.json +8 -0
- package/locales/fa-IR/models.json +6 -0
- package/locales/fr-FR/chat.json +8 -0
- package/locales/fr-FR/models.json +6 -0
- package/locales/it-IT/chat.json +8 -0
- package/locales/it-IT/models.json +6 -0
- package/locales/ja-JP/chat.json +8 -0
- package/locales/ja-JP/models.json +6 -0
- package/locales/ko-KR/chat.json +8 -0
- package/locales/ko-KR/models.json +6 -0
- package/locales/nl-NL/chat.json +8 -0
- package/locales/nl-NL/models.json +6 -0
- package/locales/pl-PL/chat.json +8 -0
- package/locales/pl-PL/models.json +6 -0
- package/locales/pt-BR/chat.json +8 -0
- package/locales/pt-BR/models.json +6 -0
- package/locales/ru-RU/chat.json +8 -0
- package/locales/ru-RU/models.json +6 -0
- package/locales/tr-TR/chat.json +8 -0
- package/locales/tr-TR/models.json +6 -0
- package/locales/vi-VN/chat.json +8 -0
- package/locales/vi-VN/models.json +6 -0
- package/locales/zh-CN/chat.json +8 -0
- package/locales/zh-CN/models.json +6 -0
- package/locales/zh-TW/chat.json +8 -0
- package/locales/zh-TW/models.json +6 -0
- package/next.config.ts +6 -0
- package/package.json +1 -1
- package/packages/web-crawler/src/crawImpl/naive.ts +19 -12
- package/packages/web-crawler/src/urlRules.ts +9 -1
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +9 -18
- package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/WelcomeMessage.tsx +2 -5
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/HeaderAction.tsx +3 -2
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +56 -30
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags/HistoryLimitTags.tsx +26 -0
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{SearchTags.tsx → Tags/SearchTags.tsx} +7 -4
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{Tags.tsx → Tags/index.tsx} +4 -1
- package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/index.tsx +1 -1
- package/src/config/aiModels/anthropic.ts +16 -1
- package/src/config/aiModels/google.ts +37 -0
- package/src/config/aiModels/qwen.ts +64 -25
- package/src/config/modelProviders/anthropic.ts +0 -2
- package/src/const/layoutTokens.test.ts +1 -1
- package/src/const/layoutTokens.ts +1 -1
- package/src/const/models.ts +27 -0
- package/src/features/ChatInput/ActionBar/History.tsx +6 -3
- package/src/features/ChatInput/ActionBar/Model/ContextCachingSwitch.tsx +20 -0
- package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +49 -7
- package/src/features/ChatInput/ActionBar/Model/ReasoningTokenSlider.tsx +6 -14
- package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +2 -2
- package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +2 -2
- package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +3 -5
- package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +2 -0
- package/src/features/Conversation/Messages/Assistant/Tool/Render/index.tsx +5 -1
- package/src/features/Conversation/Messages/Assistant/Tool/index.tsx +2 -0
- package/src/features/Conversation/components/ChatItem/index.tsx +3 -6
- package/src/features/Portal/Thread/Chat/ChatItem.tsx +4 -9
- package/src/hooks/useAgentEnableSearch.ts +2 -2
- package/src/libs/agent-runtime/anthropic/index.test.ts +36 -7
- package/src/libs/agent-runtime/anthropic/index.ts +30 -8
- package/src/libs/agent-runtime/azureOpenai/index.ts +4 -9
- package/src/libs/agent-runtime/azureai/index.ts +4 -9
- package/src/libs/agent-runtime/openai/index.ts +21 -38
- package/src/libs/agent-runtime/types/chat.ts +4 -0
- package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +55 -0
- package/src/libs/agent-runtime/utils/anthropicHelpers.ts +37 -3
- package/src/libs/langchain/loaders/code/__tests__/long.json +2 -2
- package/src/libs/langchain/loaders/code/__tests__/long.txt +1 -1
- package/src/locales/default/chat.ts +8 -0
- package/src/store/agent/initialState.ts +2 -2
- package/src/store/agent/selectors.ts +1 -1
- package/src/store/agent/slices/chat/{selectors.test.ts → selectors/agent.test.ts} +2 -2
- package/src/store/agent/slices/chat/{selectors.ts → selectors/agent.ts} +24 -33
- package/src/store/agent/slices/chat/selectors/chatConfig.test.ts +184 -0
- package/src/store/agent/slices/chat/selectors/chatConfig.ts +65 -0
- package/src/store/agent/slices/chat/selectors/index.ts +2 -0
- package/src/store/agent/store.ts +2 -2
- package/src/store/chat/helpers.test.ts +7 -7
- package/src/store/chat/helpers.ts +11 -7
- package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +3 -3
- package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +11 -2
- package/src/store/chat/slices/aiChat/actions/helpers.ts +6 -2
- package/src/store/chat/slices/builtinTool/actions/searXNG.ts +28 -20
- package/src/store/chat/slices/message/selectors.ts +7 -3
- package/src/store/chat/slices/thread/selectors/index.ts +7 -3
- package/src/tools/web-browsing/Render/PageContent/Result.tsx +4 -2
- package/src/tools/web-browsing/Render/index.tsx +2 -0
- package/src/types/agent/index.ts +4 -0
- package/src/types/aiModel.ts +1 -1
- package/src/types/aiProvider.ts +60 -31
- /package/packages/web-crawler/src/{__test__ → __tests__}/crawler.test.ts +0 -0
- /package/packages/web-crawler/src/crawImpl/{__test__ → __tests__}/jina.test.ts +0 -0
- /package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{KnowledgeTag.tsx → Tags/KnowledgeTag.tsx} +0 -0
- /package/src/store/agent/slices/chat/{__snapshots__/selectors.test.ts.snap → selectors/__snapshots__/agent.test.ts.snap} +0 -0
@@ -3,7 +3,7 @@ import React, { memo, useMemo } from 'react';
|
|
3
3
|
import { ChatItem } from '@/features/Conversation';
|
4
4
|
import ActionsBar from '@/features/Conversation/components/ChatItem/ActionsBar';
|
5
5
|
import { useAgentStore } from '@/store/agent';
|
6
|
-
import {
|
6
|
+
import { agentChatConfigSelectors } from '@/store/agent/selectors';
|
7
7
|
import { useChatStore } from '@/store/chat';
|
8
8
|
import { threadSelectors } from '@/store/chat/selectors';
|
9
9
|
|
@@ -35,14 +35,9 @@ const ThreadChatItem = memo<ThreadChatItemProps>(({ id, index }) => {
|
|
35
35
|
[id, isParentMessage],
|
36
36
|
);
|
37
37
|
|
38
|
-
const enableHistoryDivider = useAgentStore(
|
39
|
-
|
40
|
-
|
41
|
-
config.enableHistoryCount &&
|
42
|
-
historyLength > (config.historyCount ?? 0) &&
|
43
|
-
config.historyCount === historyLength - index
|
44
|
-
);
|
45
|
-
});
|
38
|
+
const enableHistoryDivider = useAgentStore(
|
39
|
+
agentChatConfigSelectors.enableHistoryDivider(historyLength, index),
|
40
|
+
);
|
46
41
|
|
47
42
|
return (
|
48
43
|
<ChatItem
|
@@ -1,12 +1,12 @@
|
|
1
1
|
import { useAgentStore } from '@/store/agent';
|
2
|
-
import { agentSelectors } from '@/store/agent/
|
2
|
+
import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
|
3
3
|
import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
|
4
4
|
|
5
5
|
export const useAgentEnableSearch = () => {
|
6
6
|
const [model, provider, agentSearchMode] = useAgentStore((s) => [
|
7
7
|
agentSelectors.currentAgentModel(s),
|
8
8
|
agentSelectors.currentAgentModelProvider(s),
|
9
|
-
|
9
|
+
agentChatConfigSelectors.agentSearchMode(s),
|
10
10
|
]);
|
11
11
|
|
12
12
|
const isModelSupportToolUse = useAiInfraStore(
|
@@ -81,7 +81,12 @@ describe('LobeAnthropicAI', () => {
|
|
81
81
|
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
82
82
|
{
|
83
83
|
max_tokens: 4096,
|
84
|
-
messages: [
|
84
|
+
messages: [
|
85
|
+
{
|
86
|
+
content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
|
87
|
+
role: 'user',
|
88
|
+
},
|
89
|
+
],
|
85
90
|
model: 'claude-3-haiku-20240307',
|
86
91
|
stream: true,
|
87
92
|
temperature: 0,
|
@@ -117,10 +122,21 @@ describe('LobeAnthropicAI', () => {
|
|
117
122
|
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
118
123
|
{
|
119
124
|
max_tokens: 4096,
|
120
|
-
messages: [
|
125
|
+
messages: [
|
126
|
+
{
|
127
|
+
content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
|
128
|
+
role: 'user',
|
129
|
+
},
|
130
|
+
],
|
121
131
|
model: 'claude-3-haiku-20240307',
|
122
132
|
stream: true,
|
123
|
-
system:
|
133
|
+
system: [
|
134
|
+
{
|
135
|
+
cache_control: { type: 'ephemeral' },
|
136
|
+
type: 'text',
|
137
|
+
text: 'You are an awesome greeter',
|
138
|
+
},
|
139
|
+
],
|
124
140
|
temperature: 0,
|
125
141
|
},
|
126
142
|
{},
|
@@ -152,7 +168,12 @@ describe('LobeAnthropicAI', () => {
|
|
152
168
|
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
153
169
|
{
|
154
170
|
max_tokens: 2048,
|
155
|
-
messages: [
|
171
|
+
messages: [
|
172
|
+
{
|
173
|
+
content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
|
174
|
+
role: 'user',
|
175
|
+
},
|
176
|
+
],
|
156
177
|
model: 'claude-3-haiku-20240307',
|
157
178
|
stream: true,
|
158
179
|
temperature: 0.25,
|
@@ -189,7 +210,12 @@ describe('LobeAnthropicAI', () => {
|
|
189
210
|
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
190
211
|
{
|
191
212
|
max_tokens: 2048,
|
192
|
-
messages: [
|
213
|
+
messages: [
|
214
|
+
{
|
215
|
+
content: [{ cache_control: { type: 'ephemeral' }, text: 'Hello', type: 'text' }],
|
216
|
+
role: 'user',
|
217
|
+
},
|
218
|
+
],
|
193
219
|
model: 'claude-3-haiku-20240307',
|
194
220
|
stream: true,
|
195
221
|
temperature: 0.25,
|
@@ -240,7 +266,7 @@ describe('LobeAnthropicAI', () => {
|
|
240
266
|
});
|
241
267
|
|
242
268
|
describe('chat with tools', () => {
|
243
|
-
it('should call
|
269
|
+
it('should call tools when tools are provided', async () => {
|
244
270
|
// Arrange
|
245
271
|
const tools: ChatCompletionTool[] = [
|
246
272
|
{ function: { name: 'tool1', description: 'desc1' }, type: 'function' },
|
@@ -257,7 +283,10 @@ describe('LobeAnthropicAI', () => {
|
|
257
283
|
|
258
284
|
// Assert
|
259
285
|
expect(instance['client'].messages.create).toHaveBeenCalled();
|
260
|
-
expect(spyOn).toHaveBeenCalledWith(
|
286
|
+
expect(spyOn).toHaveBeenCalledWith(
|
287
|
+
[{ function: { name: 'tool1', description: 'desc1' }, type: 'function' }],
|
288
|
+
{ enabledContextCaching: true },
|
289
|
+
);
|
261
290
|
});
|
262
291
|
});
|
263
292
|
|
@@ -97,10 +97,33 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
97
97
|
}
|
98
98
|
|
99
99
|
private async buildAnthropicPayload(payload: ChatStreamPayload) {
|
100
|
-
const {
|
100
|
+
const {
|
101
|
+
messages,
|
102
|
+
model,
|
103
|
+
max_tokens,
|
104
|
+
temperature,
|
105
|
+
top_p,
|
106
|
+
tools,
|
107
|
+
thinking,
|
108
|
+
enabledContextCaching = true,
|
109
|
+
} = payload;
|
101
110
|
const system_message = messages.find((m) => m.role === 'system');
|
102
111
|
const user_messages = messages.filter((m) => m.role !== 'system');
|
103
112
|
|
113
|
+
const systemPrompts = !!system_message?.content
|
114
|
+
? ([
|
115
|
+
{
|
116
|
+
cache_control: enabledContextCaching ? { type: 'ephemeral' } : undefined,
|
117
|
+
text: system_message?.content as string,
|
118
|
+
type: 'text',
|
119
|
+
},
|
120
|
+
] as Anthropic.TextBlockParam[])
|
121
|
+
: undefined;
|
122
|
+
|
123
|
+
const postMessages = await buildAnthropicMessages(user_messages, { enabledContextCaching });
|
124
|
+
|
125
|
+
const postTools = buildAnthropicTools(tools, { enabledContextCaching });
|
126
|
+
|
104
127
|
if (!!thinking) {
|
105
128
|
const maxTokens =
|
106
129
|
max_tokens ?? (thinking?.budget_tokens ? thinking?.budget_tokens + 4096 : 4096);
|
@@ -109,22 +132,21 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
109
132
|
// `top_p` must be unset when thinking is enabled.
|
110
133
|
return {
|
111
134
|
max_tokens: maxTokens,
|
112
|
-
messages:
|
135
|
+
messages: postMessages,
|
113
136
|
model,
|
114
|
-
system:
|
115
|
-
|
137
|
+
system: systemPrompts,
|
116
138
|
thinking,
|
117
|
-
tools:
|
139
|
+
tools: postTools,
|
118
140
|
} satisfies Anthropic.MessageCreateParams;
|
119
141
|
}
|
120
142
|
|
121
143
|
return {
|
122
144
|
max_tokens: max_tokens ?? 4096,
|
123
|
-
messages:
|
145
|
+
messages: postMessages,
|
124
146
|
model,
|
125
|
-
system:
|
147
|
+
system: systemPrompts,
|
126
148
|
temperature: payload.temperature !== undefined ? temperature / 2 : undefined,
|
127
|
-
tools:
|
149
|
+
tools: postTools,
|
128
150
|
top_p,
|
129
151
|
} satisfies Anthropic.MessageCreateParams;
|
130
152
|
}
|
@@ -1,6 +1,8 @@
|
|
1
1
|
import OpenAI, { AzureOpenAI } from 'openai';
|
2
2
|
import type { Stream } from 'openai/streaming';
|
3
3
|
|
4
|
+
import { systemToUserModels } from '@/const/models';
|
5
|
+
|
4
6
|
import { LobeRuntimeAI } from '../BaseAI';
|
5
7
|
import { AgentRuntimeErrorType } from '../error';
|
6
8
|
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
@@ -13,7 +15,7 @@ import { OpenAIStream } from '../utils/streams';
|
|
13
15
|
export class LobeAzureOpenAI implements LobeRuntimeAI {
|
14
16
|
client: AzureOpenAI;
|
15
17
|
|
16
|
-
constructor(params: { apiKey?: string; apiVersion?: string
|
18
|
+
constructor(params: { apiKey?: string; apiVersion?: string; baseURL?: string } = {}) {
|
17
19
|
if (!params.apiKey || !params.baseURL)
|
18
20
|
throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
|
19
21
|
|
@@ -34,17 +36,10 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
34
36
|
// o1 series models on Azure OpenAI does not support streaming currently
|
35
37
|
const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
|
36
38
|
|
37
|
-
// Convert 'system' role to 'user' or 'developer' based on the model
|
38
|
-
const systemToUserModels = new Set([
|
39
|
-
'o1-preview',
|
40
|
-
'o1-preview-2024-09-12',
|
41
|
-
'o1-mini',
|
42
|
-
'o1-mini-2024-09-12',
|
43
|
-
]);
|
44
|
-
|
45
39
|
const updatedMessages = messages.map((message) => ({
|
46
40
|
...message,
|
47
41
|
role:
|
42
|
+
// Convert 'system' role to 'user' or 'developer' based on the model
|
48
43
|
(model.includes('o1') || model.includes('o3')) && message.role === 'system'
|
49
44
|
? [...systemToUserModels].some((sub) => model.includes(sub))
|
50
45
|
? 'user'
|
@@ -2,6 +2,8 @@ import createClient, { ModelClient } from '@azure-rest/ai-inference';
|
|
2
2
|
import { AzureKeyCredential } from '@azure/core-auth';
|
3
3
|
import OpenAI from 'openai';
|
4
4
|
|
5
|
+
import { systemToUserModels } from '@/const/models';
|
6
|
+
|
5
7
|
import { LobeRuntimeAI } from '../BaseAI';
|
6
8
|
import { AgentRuntimeErrorType } from '../error';
|
7
9
|
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
@@ -30,24 +32,17 @@ export class LobeAzureAI implements LobeRuntimeAI {
|
|
30
32
|
// o1 series models on Azure OpenAI does not support streaming currently
|
31
33
|
const enableStreaming = model.includes('o1') ? false : (params.stream ?? true);
|
32
34
|
|
33
|
-
// Convert 'system' role to 'user' or 'developer' based on the model
|
34
|
-
const systemToUserModels = new Set([
|
35
|
-
'o1-preview',
|
36
|
-
'o1-preview-2024-09-12',
|
37
|
-
'o1-mini',
|
38
|
-
'o1-mini-2024-09-12',
|
39
|
-
]);
|
40
|
-
|
41
35
|
const updatedMessages = messages.map((message) => ({
|
42
36
|
...message,
|
43
37
|
role:
|
38
|
+
// Convert 'system' role to 'user' or 'developer' based on the model
|
44
39
|
(model.includes('o1') || model.includes('o3')) && message.role === 'system'
|
45
40
|
? [...systemToUserModels].some((sub) => model.includes(sub))
|
46
41
|
? 'user'
|
47
42
|
: 'developer'
|
48
43
|
: message.role,
|
49
44
|
}));
|
50
|
-
|
45
|
+
|
51
46
|
try {
|
52
47
|
const response = this.client.path('/chat/completions').post({
|
53
48
|
body: {
|
@@ -1,25 +1,14 @@
|
|
1
|
+
import { disableStreamModels, systemToUserModels } from '@/const/models';
|
2
|
+
import type { ChatModelCard } from '@/types/llm';
|
3
|
+
|
1
4
|
import { ChatStreamPayload, ModelProvider, OpenAIChatMessage } from '../types';
|
2
5
|
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
3
6
|
|
4
|
-
import type { ChatModelCard } from '@/types/llm';
|
5
|
-
|
6
7
|
export interface OpenAIModelCard {
|
7
8
|
id: string;
|
8
9
|
}
|
9
10
|
|
10
11
|
export const pruneReasoningPayload = (payload: ChatStreamPayload) => {
|
11
|
-
// TODO: 临时写法,后续要重构成 model card 展示配置
|
12
|
-
const disableStreamModels = new Set([
|
13
|
-
'o1',
|
14
|
-
'o1-2024-12-17'
|
15
|
-
]);
|
16
|
-
const systemToUserModels = new Set([
|
17
|
-
'o1-preview',
|
18
|
-
'o1-preview-2024-09-12',
|
19
|
-
'o1-mini',
|
20
|
-
'o1-mini-2024-09-12',
|
21
|
-
]);
|
22
|
-
|
23
12
|
return {
|
24
13
|
...payload,
|
25
14
|
frequency_penalty: 0,
|
@@ -58,46 +47,40 @@ export const LobeOpenAI = LobeOpenAICompatibleFactory({
|
|
58
47
|
models: async ({ client }) => {
|
59
48
|
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
60
49
|
|
61
|
-
const functionCallKeywords = [
|
62
|
-
'gpt-4',
|
63
|
-
'gpt-3.5',
|
64
|
-
'o3-mini',
|
65
|
-
];
|
50
|
+
const functionCallKeywords = ['gpt-4', 'gpt-3.5', 'o3-mini'];
|
66
51
|
|
67
|
-
const visionKeywords = [
|
68
|
-
'gpt-4o',
|
69
|
-
'vision',
|
70
|
-
];
|
52
|
+
const visionKeywords = ['gpt-4o', 'vision'];
|
71
53
|
|
72
|
-
const reasoningKeywords = [
|
73
|
-
'o1',
|
74
|
-
'o3',
|
75
|
-
];
|
54
|
+
const reasoningKeywords = ['o1', 'o3'];
|
76
55
|
|
77
|
-
const modelsPage = await client.models.list() as any;
|
56
|
+
const modelsPage = (await client.models.list()) as any;
|
78
57
|
const modelList: OpenAIModelCard[] = modelsPage.data;
|
79
58
|
|
80
59
|
return modelList
|
81
60
|
.map((model) => {
|
82
|
-
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
61
|
+
const knownModel = LOBE_DEFAULT_MODEL_LIST.find(
|
62
|
+
(m) => model.id.toLowerCase() === m.id.toLowerCase(),
|
63
|
+
);
|
83
64
|
|
84
65
|
return {
|
85
66
|
contextWindowTokens: knownModel?.contextWindowTokens ?? undefined,
|
86
67
|
displayName: knownModel?.displayName ?? undefined,
|
87
68
|
enabled: knownModel?.enabled || false,
|
88
69
|
functionCall:
|
89
|
-
functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) &&
|
90
|
-
|
91
|
-
||
|
70
|
+
(functionCallKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
|
71
|
+
!model.id.toLowerCase().includes('audio')) ||
|
72
|
+
knownModel?.abilities?.functionCall ||
|
73
|
+
false,
|
92
74
|
id: model.id,
|
93
75
|
reasoning:
|
94
|
-
reasoningKeywords.some(keyword => model.id.toLowerCase().includes(keyword))
|
95
|
-
|
96
|
-
|
76
|
+
reasoningKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) ||
|
77
|
+
knownModel?.abilities?.reasoning ||
|
78
|
+
false,
|
97
79
|
vision:
|
98
|
-
visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)) &&
|
99
|
-
|
100
|
-
||
|
80
|
+
(visionKeywords.some((keyword) => model.id.toLowerCase().includes(keyword)) &&
|
81
|
+
!model.id.toLowerCase().includes('audio')) ||
|
82
|
+
knownModel?.abilities?.vision ||
|
83
|
+
false,
|
101
84
|
};
|
102
85
|
})
|
103
86
|
.filter(Boolean) as ChatModelCard[];
|
@@ -619,6 +619,26 @@ describe('anthropicHelpers', () => {
|
|
619
619
|
{ content: '继续', role: 'user' },
|
620
620
|
]);
|
621
621
|
});
|
622
|
+
|
623
|
+
it('should enable cache control', async () => {
|
624
|
+
const messages: OpenAIChatMessage[] = [
|
625
|
+
{ content: 'Hello', role: 'user' },
|
626
|
+
{ content: 'Hello', role: 'user' },
|
627
|
+
{ content: 'Hi', role: 'assistant' },
|
628
|
+
];
|
629
|
+
|
630
|
+
const contents = await buildAnthropicMessages(messages, { enabledContextCaching: true });
|
631
|
+
|
632
|
+
expect(contents).toHaveLength(3);
|
633
|
+
expect(contents).toEqual([
|
634
|
+
{ content: 'Hello', role: 'user' },
|
635
|
+
{ content: 'Hello', role: 'user' },
|
636
|
+
{
|
637
|
+
content: [{ cache_control: { type: 'ephemeral' }, text: 'Hi', type: 'text' }],
|
638
|
+
role: 'assistant',
|
639
|
+
},
|
640
|
+
]);
|
641
|
+
});
|
622
642
|
});
|
623
643
|
|
624
644
|
describe('buildAnthropicTools', () => {
|
@@ -656,5 +676,40 @@ describe('anthropicHelpers', () => {
|
|
656
676
|
},
|
657
677
|
]);
|
658
678
|
});
|
679
|
+
it('should enable cache control', () => {
|
680
|
+
const tools: OpenAI.ChatCompletionTool[] = [
|
681
|
+
{
|
682
|
+
type: 'function',
|
683
|
+
function: {
|
684
|
+
name: 'search',
|
685
|
+
description: 'Searches the web',
|
686
|
+
parameters: {
|
687
|
+
type: 'object',
|
688
|
+
properties: {
|
689
|
+
query: { type: 'string' },
|
690
|
+
},
|
691
|
+
required: ['query'],
|
692
|
+
},
|
693
|
+
},
|
694
|
+
},
|
695
|
+
];
|
696
|
+
|
697
|
+
const result = buildAnthropicTools(tools, { enabledContextCaching: true });
|
698
|
+
|
699
|
+
expect(result).toEqual([
|
700
|
+
{
|
701
|
+
name: 'search',
|
702
|
+
description: 'Searches the web',
|
703
|
+
input_schema: {
|
704
|
+
type: 'object',
|
705
|
+
properties: {
|
706
|
+
query: { type: 'string' },
|
707
|
+
},
|
708
|
+
required: ['query'],
|
709
|
+
},
|
710
|
+
cache_control: { type: 'ephemeral' },
|
711
|
+
},
|
712
|
+
]);
|
713
|
+
});
|
659
714
|
});
|
660
715
|
});
|
@@ -130,6 +130,7 @@ export const buildAnthropicMessage = async (
|
|
130
130
|
|
131
131
|
export const buildAnthropicMessages = async (
|
132
132
|
oaiMessages: OpenAIChatMessage[],
|
133
|
+
options: { enabledContextCaching?: boolean } = {},
|
133
134
|
): Promise<Anthropic.Messages.MessageParam[]> => {
|
134
135
|
const messages: Anthropic.Messages.MessageParam[] = [];
|
135
136
|
let pendingToolResults: Anthropic.ToolResultBlockParam[] = [];
|
@@ -180,13 +181,46 @@ export const buildAnthropicMessages = async (
|
|
180
181
|
}
|
181
182
|
}
|
182
183
|
|
184
|
+
const lastMessage = messages.at(-1);
|
185
|
+
if (options.enabledContextCaching && !!lastMessage) {
|
186
|
+
if (typeof lastMessage.content === 'string') {
|
187
|
+
lastMessage.content = [
|
188
|
+
{
|
189
|
+
cache_control: { type: 'ephemeral' },
|
190
|
+
text: lastMessage.content as string,
|
191
|
+
type: 'text',
|
192
|
+
},
|
193
|
+
];
|
194
|
+
} else {
|
195
|
+
const lastContent = lastMessage.content.at(-1);
|
196
|
+
|
197
|
+
if (
|
198
|
+
lastContent &&
|
199
|
+
lastContent.type !== 'thinking' &&
|
200
|
+
lastContent.type !== 'redacted_thinking'
|
201
|
+
) {
|
202
|
+
lastContent.cache_control = { type: 'ephemeral' };
|
203
|
+
}
|
204
|
+
}
|
205
|
+
}
|
183
206
|
return messages;
|
184
207
|
};
|
185
|
-
|
186
|
-
|
187
|
-
|
208
|
+
|
209
|
+
export const buildAnthropicTools = (
|
210
|
+
tools?: OpenAI.ChatCompletionTool[],
|
211
|
+
options: { enabledContextCaching?: boolean } = {},
|
212
|
+
) => {
|
213
|
+
if (!tools) return;
|
214
|
+
|
215
|
+
return tools.map(
|
216
|
+
(tool, index): Anthropic.Tool => ({
|
217
|
+
cache_control:
|
218
|
+
options.enabledContextCaching && index === tools.length - 1
|
219
|
+
? { type: 'ephemeral' }
|
220
|
+
: undefined,
|
188
221
|
description: tool.function.description,
|
189
222
|
input_schema: tool.function.parameters as Anthropic.Tool.InputSchema,
|
190
223
|
name: tool.function.name,
|
191
224
|
}),
|
192
225
|
);
|
226
|
+
};
|
@@ -164,11 +164,11 @@
|
|
164
164
|
"metadata": { "loc": { "lines": { "from": 547, "to": 570 } } }
|
165
165
|
},
|
166
166
|
{
|
167
|
-
"pageContent": "const abortController = internal_toggleChatLoading(\n true,\n assistantId,\n n('generateMessage(start)', { assistantId, messages }) as string,\n );\n\n const agentConfig = getAgentConfig();\n const chatConfig = agentConfig.chatConfig;\n\n const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\\S\\s]+?)}}/g });\n\n // ================================== //\n // messages uniformly preprocess //\n // ================================== //\n\n // 1. slice messages with config\n let preprocessMsgs = chatHelpers.
|
167
|
+
"pageContent": "const abortController = internal_toggleChatLoading(\n true,\n assistantId,\n n('generateMessage(start)', { assistantId, messages }) as string,\n );\n\n const agentConfig = getAgentConfig();\n const chatConfig = agentConfig.chatConfig;\n\n const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\\S\\s]+?)}}/g });\n\n // ================================== //\n // messages uniformly preprocess //\n // ================================== //\n\n // 1. slice messages with config\n let preprocessMsgs = chatHelpers.getSlicedMessages(messages, chatConfig);",
|
168
168
|
"metadata": { "loc": { "lines": { "from": 566, "to": 582 } } }
|
169
169
|
},
|
170
170
|
{
|
171
|
-
"pageContent": "const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\\S\\s]+?)}}/g });\n\n // ================================== //\n // messages uniformly preprocess //\n // ================================== //\n\n // 1. slice messages with config\n let preprocessMsgs = chatHelpers.
|
171
|
+
"pageContent": "const compiler = template(chatConfig.inputTemplate, { interpolate: /{{([\\S\\s]+?)}}/g });\n\n // ================================== //\n // messages uniformly preprocess //\n // ================================== //\n\n // 1. slice messages with config\n let preprocessMsgs = chatHelpers.getSlicedMessages(messages, chatConfig);\n\n // 2. replace inputMessage template\n preprocessMsgs = !chatConfig.inputTemplate\n ? preprocessMsgs\n : preprocessMsgs.map((m) => {\n if (m.role === 'user') {\n try {\n return { ...m, content: compiler({ text: m.content }) };\n } catch (error) {\n console.error(error);\n\n return m;\n }\n }\n\n return m;\n });",
|
172
172
|
"metadata": { "loc": { "lines": { "from": 575, "to": 599 } } }
|
173
173
|
},
|
174
174
|
{
|
@@ -579,7 +579,7 @@ export const chatMessage: StateCreator<
|
|
579
579
|
// ================================== //
|
580
580
|
|
581
581
|
// 1. slice messages with config
|
582
|
-
let preprocessMsgs = chatHelpers.
|
582
|
+
let preprocessMsgs = chatHelpers.getSlicedMessages(messages, chatConfig);
|
583
583
|
|
584
584
|
// 2. replace inputMessage template
|
585
585
|
preprocessMsgs = !chatConfig.inputTemplate
|
@@ -33,7 +33,12 @@ export default {
|
|
33
33
|
duplicateTitle: '{{title}} 副本',
|
34
34
|
emptyAgent: '暂无助手',
|
35
35
|
extendParams: {
|
36
|
+
disableContextCaching: {
|
37
|
+
desc: '单条对话生成成本最高可降低 90%,响应速度提升 4 倍(<1>了解更多</1>)。开启后将自动禁用历史消息数限制',
|
38
|
+
title: '开启上下文缓存',
|
39
|
+
},
|
36
40
|
enableReasoning: {
|
41
|
+
desc: '基于 Claude Thinking 机制限制(<1>了解更多</1>),开启后将自动禁用历史消息数限制',
|
37
42
|
title: '开启深度思考',
|
38
43
|
},
|
39
44
|
reasoningBudgetToken: {
|
@@ -41,6 +46,9 @@ export default {
|
|
41
46
|
},
|
42
47
|
title: '模型扩展功能',
|
43
48
|
},
|
49
|
+
history: {
|
50
|
+
title: '助手将只记住最后{{count}}条消息',
|
51
|
+
},
|
44
52
|
historyRange: '历史范围',
|
45
53
|
historySummary: '历史消息总结',
|
46
54
|
inbox: {
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { AgentState, initialAgentChatState } from './slices/chat/initialState';
|
2
2
|
|
3
|
-
export type
|
3
|
+
export type AgentStoreState = AgentState;
|
4
4
|
|
5
|
-
export const initialState:
|
5
|
+
export const initialState: AgentStoreState = {
|
6
6
|
...initialAgentChatState,
|
7
7
|
};
|
@@ -1 +1 @@
|
|
1
|
-
export { agentSelectors } from './slices/chat/selectors';
|
1
|
+
export { agentChatConfigSelectors,agentSelectors } from './slices/chat/selectors';
|
@@ -6,8 +6,8 @@ import { AgentStore } from '@/store/agent';
|
|
6
6
|
import { AgentState } from '@/store/agent/slices/chat/initialState';
|
7
7
|
import { merge } from '@/utils/merge';
|
8
8
|
|
9
|
-
import { initialState } from '
|
10
|
-
import { agentSelectors } from './
|
9
|
+
import { initialState } from '../../../initialState';
|
10
|
+
import { agentSelectors } from './agent';
|
11
11
|
|
12
12
|
vi.mock('i18next', () => ({
|
13
13
|
t: vi.fn((key) => key), // Simplified mock return value
|