@lobehub/chat 1.68.10 → 1.69.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +8 -0
  4. package/locales/ar/models.json +6 -0
  5. package/locales/bg-BG/chat.json +8 -0
  6. package/locales/bg-BG/models.json +6 -0
  7. package/locales/de-DE/chat.json +8 -0
  8. package/locales/de-DE/models.json +6 -0
  9. package/locales/en-US/chat.json +8 -0
  10. package/locales/en-US/models.json +6 -0
  11. package/locales/es-ES/chat.json +8 -0
  12. package/locales/es-ES/models.json +6 -0
  13. package/locales/fa-IR/chat.json +8 -0
  14. package/locales/fa-IR/models.json +6 -0
  15. package/locales/fr-FR/chat.json +8 -0
  16. package/locales/fr-FR/models.json +6 -0
  17. package/locales/it-IT/chat.json +8 -0
  18. package/locales/it-IT/models.json +6 -0
  19. package/locales/ja-JP/chat.json +8 -0
  20. package/locales/ja-JP/models.json +6 -0
  21. package/locales/ko-KR/chat.json +8 -0
  22. package/locales/ko-KR/models.json +6 -0
  23. package/locales/nl-NL/chat.json +8 -0
  24. package/locales/nl-NL/models.json +6 -0
  25. package/locales/pl-PL/chat.json +8 -0
  26. package/locales/pl-PL/models.json +6 -0
  27. package/locales/pt-BR/chat.json +8 -0
  28. package/locales/pt-BR/models.json +6 -0
  29. package/locales/ru-RU/chat.json +8 -0
  30. package/locales/ru-RU/models.json +6 -0
  31. package/locales/tr-TR/chat.json +8 -0
  32. package/locales/tr-TR/models.json +6 -0
  33. package/locales/vi-VN/chat.json +8 -0
  34. package/locales/vi-VN/models.json +6 -0
  35. package/locales/zh-CN/chat.json +8 -0
  36. package/locales/zh-CN/models.json +6 -0
  37. package/locales/zh-TW/chat.json +8 -0
  38. package/locales/zh-TW/models.json +6 -0
  39. package/next.config.ts +6 -0
  40. package/package.json +1 -1
  41. package/packages/web-crawler/src/crawImpl/naive.ts +19 -12
  42. package/packages/web-crawler/src/urlRules.ts +9 -1
  43. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +9 -18
  44. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/WelcomeChatItem/WelcomeMessage.tsx +2 -5
  45. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/HeaderAction.tsx +3 -2
  46. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +56 -30
  47. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Tags/HistoryLimitTags.tsx +26 -0
  48. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{SearchTags.tsx → Tags/SearchTags.tsx} +7 -4
  49. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{Tags.tsx → Tags/index.tsx} +4 -1
  50. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/index.tsx +1 -1
  51. package/src/config/aiModels/anthropic.ts +16 -1
  52. package/src/config/aiModels/google.ts +37 -0
  53. package/src/config/aiModels/qwen.ts +64 -25
  54. package/src/config/modelProviders/anthropic.ts +0 -2
  55. package/src/const/layoutTokens.test.ts +1 -1
  56. package/src/const/layoutTokens.ts +1 -1
  57. package/src/const/models.ts +27 -0
  58. package/src/features/ChatInput/ActionBar/History.tsx +6 -3
  59. package/src/features/ChatInput/ActionBar/Model/ContextCachingSwitch.tsx +20 -0
  60. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +49 -7
  61. package/src/features/ChatInput/ActionBar/Model/ReasoningTokenSlider.tsx +6 -14
  62. package/src/features/ChatInput/ActionBar/Search/ModelBuiltinSearch.tsx +2 -2
  63. package/src/features/ChatInput/ActionBar/Search/SwitchPanel.tsx +2 -2
  64. package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +3 -5
  65. package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +2 -0
  66. package/src/features/Conversation/Messages/Assistant/Tool/Render/index.tsx +5 -1
  67. package/src/features/Conversation/Messages/Assistant/Tool/index.tsx +2 -0
  68. package/src/features/Conversation/components/ChatItem/index.tsx +3 -6
  69. package/src/features/Portal/Thread/Chat/ChatItem.tsx +4 -9
  70. package/src/hooks/useAgentEnableSearch.ts +2 -2
  71. package/src/libs/agent-runtime/anthropic/index.test.ts +36 -7
  72. package/src/libs/agent-runtime/anthropic/index.ts +30 -8
  73. package/src/libs/agent-runtime/azureOpenai/index.ts +4 -9
  74. package/src/libs/agent-runtime/azureai/index.ts +4 -9
  75. package/src/libs/agent-runtime/openai/index.ts +21 -38
  76. package/src/libs/agent-runtime/types/chat.ts +4 -0
  77. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +55 -0
  78. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +37 -3
  79. package/src/libs/langchain/loaders/code/__tests__/long.json +2 -2
  80. package/src/libs/langchain/loaders/code/__tests__/long.txt +1 -1
  81. package/src/locales/default/chat.ts +8 -0
  82. package/src/store/agent/initialState.ts +2 -2
  83. package/src/store/agent/selectors.ts +1 -1
  84. package/src/store/agent/slices/chat/{selectors.test.ts → selectors/agent.test.ts} +2 -2
  85. package/src/store/agent/slices/chat/{selectors.ts → selectors/agent.ts} +24 -33
  86. package/src/store/agent/slices/chat/selectors/chatConfig.test.ts +184 -0
  87. package/src/store/agent/slices/chat/selectors/chatConfig.ts +65 -0
  88. package/src/store/agent/slices/chat/selectors/index.ts +2 -0
  89. package/src/store/agent/store.ts +2 -2
  90. package/src/store/chat/helpers.test.ts +7 -7
  91. package/src/store/chat/helpers.ts +11 -7
  92. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +3 -3
  93. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +11 -2
  94. package/src/store/chat/slices/aiChat/actions/helpers.ts +6 -2
  95. package/src/store/chat/slices/builtinTool/actions/searXNG.ts +28 -20
  96. package/src/store/chat/slices/message/selectors.ts +7 -3
  97. package/src/store/chat/slices/thread/selectors/index.ts +7 -3
  98. package/src/tools/web-browsing/Render/PageContent/Result.tsx +4 -2
  99. package/src/tools/web-browsing/Render/index.tsx +2 -0
  100. package/src/types/agent/index.ts +4 -0
  101. package/src/types/aiModel.ts +1 -1
  102. package/src/types/aiProvider.ts +60 -31
  103. /package/packages/web-crawler/src/{__test__ → __tests__}/crawler.test.ts +0 -0
  104. /package/packages/web-crawler/src/crawImpl/{__test__ → __tests__}/jina.test.ts +0 -0
  105. /package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/{KnowledgeTag.tsx → Tags/KnowledgeTag.tsx} +0 -0
  106. /package/src/store/agent/slices/chat/{__snapshots__/selectors.test.ts.snap → selectors/__snapshots__/agent.test.ts.snap} +0 -0
@@ -22,7 +22,7 @@ const anthropicChatModels: AIChatModelCard[] = [
22
22
  },
23
23
  releasedAt: '2025-02-24',
24
24
  settings: {
25
- extendParams: ['enableReasoning', 'reasoningBudgetToken'],
25
+ extendParams: ['disableContextCaching', 'enableReasoning', 'reasoningBudgetToken'],
26
26
  },
27
27
  type: 'chat',
28
28
  },
@@ -45,6 +45,9 @@ const anthropicChatModels: AIChatModelCard[] = [
45
45
  writeCacheInput: 1.25,
46
46
  },
47
47
  releasedAt: '2024-11-05',
48
+ settings: {
49
+ extendParams: ['disableContextCaching'],
50
+ },
48
51
  type: 'chat',
49
52
  },
50
53
  {
@@ -66,6 +69,9 @@ const anthropicChatModels: AIChatModelCard[] = [
66
69
  writeCacheInput: 3.75,
67
70
  },
68
71
  releasedAt: '2024-10-22',
72
+ settings: {
73
+ extendParams: ['disableContextCaching'],
74
+ },
69
75
  type: 'chat',
70
76
  },
71
77
  {
@@ -86,6 +92,9 @@ const anthropicChatModels: AIChatModelCard[] = [
86
92
  writeCacheInput: 3.75,
87
93
  },
88
94
  releasedAt: '2024-06-20',
95
+ settings: {
96
+ extendParams: ['disableContextCaching'],
97
+ },
89
98
  type: 'chat',
90
99
  },
91
100
  {
@@ -104,6 +113,9 @@ const anthropicChatModels: AIChatModelCard[] = [
104
113
  output: 1.25,
105
114
  },
106
115
  releasedAt: '2024-03-07',
116
+ settings: {
117
+ extendParams: ['disableContextCaching'],
118
+ },
107
119
  type: 'chat',
108
120
  },
109
121
  {
@@ -141,6 +153,9 @@ const anthropicChatModels: AIChatModelCard[] = [
141
153
  output: 75,
142
154
  },
143
155
  releasedAt: '2024-02-29',
156
+ settings: {
157
+ extendParams: ['disableContextCaching'],
158
+ },
144
159
  type: 'chat',
145
160
  },
146
161
  {
@@ -75,6 +75,23 @@ const googleChatModels: AIChatModelCard[] = [
75
75
  },
76
76
  type: 'chat',
77
77
  },
78
+ {
79
+ abilities: {
80
+ vision: true,
81
+ },
82
+ contextWindowTokens: 1_048_576 + 8192,
83
+ description: 'Gemini 2.0 Flash 模型变体,针对成本效益和低延迟等目标进行了优化。',
84
+ displayName: 'Gemini 2.0 Flash-Lite',
85
+ id: 'gemini-2.0-flash-lite',
86
+ maxOutput: 8192,
87
+ pricing: {
88
+ cachedInput: 0.018_75,
89
+ input: 0.075,
90
+ output: 0.3,
91
+ },
92
+ releasedAt: '2025-02-05',
93
+ type: 'chat',
94
+ },
78
95
  {
79
96
  abilities: {
80
97
  vision: true,
@@ -92,6 +109,26 @@ const googleChatModels: AIChatModelCard[] = [
92
109
  releasedAt: '2025-02-05',
93
110
  type: 'chat',
94
111
  },
112
+ {
113
+ abilities: {
114
+ reasoning: true,
115
+ vision: true,
116
+ },
117
+ contextWindowTokens: 1_048_576 + 65_536,
118
+ description:
119
+ 'Gemini 2.0 Flash Thinking Exp 是 Google 的实验性多模态推理AI模型,能对复杂问题进行推理,拥有新的思维能力。',
120
+ displayName: 'Gemini 2.0 Flash Thinking Experimental',
121
+ enabled: true,
122
+ id: 'gemini-2.0-flash-thinking-exp',
123
+ maxOutput: 65_536,
124
+ pricing: {
125
+ cachedInput: 0,
126
+ input: 0,
127
+ output: 0,
128
+ },
129
+ releasedAt: '2025-01-21',
130
+ type: 'chat',
131
+ },
95
132
  {
96
133
  abilities: {
97
134
  reasoning: true,
@@ -3,6 +3,28 @@ import { AIChatModelCard } from '@/types/aiModel';
3
3
  // https://help.aliyun.com/zh/model-studio/developer-reference/use-qwen-by-calling-api#e1fada1a719u7
4
4
 
5
5
  const qwenChatModels: AIChatModelCard[] = [
6
+ {
7
+ abilities: {
8
+ reasoning: true,
9
+ },
10
+ contextWindowTokens: 131_072,
11
+ description: '基于 Qwen2.5 模型训练的 QwQ 推理模型,通过强化学习大幅度提升了模型推理能力。模型数学代码等核心指标(AIME 24/25、LiveCodeBench)以及部分通用指标(IFEval、LiveBench等)达到DeepSeek-R1 满血版水平。',
12
+ displayName: 'QwQ Plus',
13
+ enabled: true,
14
+ id: 'qwq-plus-latest',
15
+ maxOutput: 8192,
16
+ organization: 'Qwen',
17
+ pricing: {
18
+ currency: 'CNY',
19
+ input: 0,
20
+ output: 0,
21
+ },
22
+ releasedAt: '2025-03-06',
23
+ settings: {
24
+ searchImpl: 'params',
25
+ },
26
+ type: 'chat',
27
+ },
6
28
  {
7
29
  abilities: {
8
30
  functionCall: true,
@@ -71,10 +93,14 @@ const qwenChatModels: AIChatModelCard[] = [
71
93
  type: 'chat',
72
94
  },
73
95
  {
96
+ abilities: {
97
+ functionCall: true,
98
+ },
74
99
  contextWindowTokens: 1_000_000,
75
100
  description:
76
101
  '通义千问超大规模语言模型,支持长文本上下文,以及基于长文档、多文档等多个场景的对话功能。',
77
102
  displayName: 'Qwen Long',
103
+ enabled: true,
78
104
  id: 'qwen-long',
79
105
  maxOutput: 6000,
80
106
  organization: 'Qwen',
@@ -85,6 +111,24 @@ const qwenChatModels: AIChatModelCard[] = [
85
111
  },
86
112
  type: 'chat',
87
113
  },
114
+ {
115
+ abilities: {
116
+ vision: true,
117
+ },
118
+ contextWindowTokens: 32_768,
119
+ description: 'Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本。',
120
+ displayName: 'Qwen Omni Turbo',
121
+ enabled: true,
122
+ id: 'qwen-omni-turbo-latest',
123
+ maxOutput: 2048,
124
+ organization: 'Qwen',
125
+ pricing: {
126
+ currency: 'CNY',
127
+ input: 1.5, // use image input price
128
+ output: 4.5,
129
+ },
130
+ type: 'chat',
131
+ },
88
132
  {
89
133
  abilities: {
90
134
  vision: true,
@@ -199,7 +243,24 @@ const qwenChatModels: AIChatModelCard[] = [
199
243
  },
200
244
  {
201
245
  abilities: {
202
- functionCall: true,
246
+ reasoning: true,
247
+ },
248
+ contextWindowTokens: 131_072,
249
+ description: '基于 Qwen2.5-32B 模型训练的 QwQ 推理模型,通过强化学习大幅度提升了模型推理能力。模型数学代码等核心指标(AIME 24/25、LiveCodeBench)以及部分通用指标(IFEval、LiveBench等)达到DeepSeek-R1 满血版水平,各指标均显著超过同样基于 Qwen2.5-32B 的 DeepSeek-R1-Distill-Qwen-32B。',
250
+ displayName: 'QwQ 32B',
251
+ id: 'qwq-32b',
252
+ maxOutput: 8192,
253
+ organization: 'Qwen',
254
+ pricing: {
255
+ currency: 'CNY',
256
+ input: 0,
257
+ output: 0,
258
+ },
259
+ releasedAt: '2025-03-06',
260
+ type: 'chat',
261
+ },
262
+ {
263
+ abilities: {
203
264
  reasoning: true,
204
265
  },
205
266
  contextWindowTokens: 32_768,
@@ -210,8 +271,8 @@ const qwenChatModels: AIChatModelCard[] = [
210
271
  organization: 'Qwen',
211
272
  pricing: {
212
273
  currency: 'CNY',
213
- input: 3.5,
214
- output: 7,
274
+ input: 2,
275
+ output: 6,
215
276
  },
216
277
  releasedAt: '2024-11-28',
217
278
  type: 'chat',
@@ -410,23 +471,6 @@ const qwenChatModels: AIChatModelCard[] = [
410
471
  },
411
472
  type: 'chat',
412
473
  },
413
- {
414
- abilities: {
415
- vision: true,
416
- },
417
- contextWindowTokens: 32_768,
418
- description: 'Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本。',
419
- displayName: 'Qwen Omni Turbo',
420
- id: 'qwen-omni-turbo-latest',
421
- maxOutput: 2048,
422
- organization: 'Qwen',
423
- pricing: {
424
- currency: 'CNY',
425
- input: 0,
426
- output: 0,
427
- },
428
- type: 'chat',
429
- },
430
474
  {
431
475
  abilities: {
432
476
  vision: true,
@@ -473,7 +517,6 @@ const qwenChatModels: AIChatModelCard[] = [
473
517
  description:
474
518
  'DeepSeek-R1 在后训练阶段大规模使用了强化学习技术,在仅有极少标注数据的情况下,极大提升了模型推理能力。在数学、代码、自然语言推理等任务上,性能较高,能力较强。',
475
519
  displayName: 'DeepSeek R1',
476
- enabled: true,
477
520
  id: 'deepseek-r1',
478
521
  maxOutput: 8192,
479
522
  organization: 'DeepSeek',
@@ -486,14 +529,10 @@ const qwenChatModels: AIChatModelCard[] = [
486
529
  type: 'chat',
487
530
  },
488
531
  {
489
- abilities: {
490
- functionCall: true,
491
- },
492
532
  contextWindowTokens: 65_792,
493
533
  description:
494
534
  'DeepSeek-V3 为自研 MoE 模型,671B 参数,激活 37B,在 14.8T token 上进行了预训练,在长文本、代码、数学、百科、中文能力上表现优秀。',
495
535
  displayName: 'DeepSeek V3',
496
- enabled: true,
497
536
  id: 'deepseek-v3',
498
537
  maxOutput: 8192,
499
538
  organization: 'DeepSeek',
@@ -180,12 +180,10 @@ const Anthropic: ModelProviderCard = {
180
180
  sdkType: 'anthropic',
181
181
  showModelFetcher: true,
182
182
  smoothing: {
183
- speed: 5,
184
183
  text: true,
185
184
  },
186
185
  },
187
186
  smoothing: {
188
- speed: 5,
189
187
  text: true,
190
188
  },
191
189
  url: 'https://anthropic.com',
@@ -6,6 +6,6 @@ describe('HEADER_ICON_SIZE', () => {
6
6
  });
7
7
 
8
8
  it('desktop', () => {
9
- expect(HEADER_ICON_SIZE(false)).toEqual({ fontSize: 24 });
9
+ expect(HEADER_ICON_SIZE(false)).toEqual({ blockSize: 32, fontSize: 20 });
10
10
  });
11
11
  });
@@ -20,7 +20,7 @@ export const FORM_STYLE: FormProps = {
20
20
  style: { maxWidth: MAX_WIDTH, width: '100%' },
21
21
  };
22
22
  export const MOBILE_HEADER_ICON_SIZE = { blockSize: 36, fontSize: 22 };
23
- export const DESKTOP_HEADER_ICON_SIZE = { fontSize: 24 };
23
+ export const DESKTOP_HEADER_ICON_SIZE = { blockSize: 32, fontSize: 20 };
24
24
  export const HEADER_ICON_SIZE = (mobile?: boolean) =>
25
25
  mobile ? MOBILE_HEADER_ICON_SIZE : DESKTOP_HEADER_ICON_SIZE;
26
26
  export const PWA_INSTALL_ID = 'pwa-install';
@@ -0,0 +1,27 @@
1
+ export const systemToUserModels = new Set([
2
+ 'o1-preview',
3
+ 'o1-preview-2024-09-12',
4
+ 'o1-mini',
5
+ 'o1-mini-2024-09-12',
6
+ ]);
7
+
8
+ // TODO: 临时写法,后续要重构成 model card 展示配置
9
+ export const disableStreamModels = new Set(['o1', 'o1-2024-12-17']);
10
+
11
+ /**
12
+ * models support context caching
13
+ */
14
+ export const contextCachingModels = new Set([
15
+ 'claude-3-7-sonnet-latest',
16
+ 'claude-3-7-sonnet-20250219',
17
+ 'claude-3-5-sonnet-latest',
18
+ 'claude-3-5-sonnet-20241022',
19
+ 'claude-3-5-sonnet-20240620',
20
+ 'claude-3-5-haiku-latest',
21
+ 'claude-3-5-haiku-20241022',
22
+ ]);
23
+
24
+ export const thinkingWithToolClaudeModels = new Set([
25
+ 'claude-3-7-sonnet-latest',
26
+ 'claude-3-7-sonnet-20250219',
27
+ ]);
@@ -7,15 +7,18 @@ import { Flexbox } from 'react-layout-kit';
7
7
 
8
8
  import { useIsMobile } from '@/hooks/useIsMobile';
9
9
  import { useAgentStore } from '@/store/agent';
10
- import { agentSelectors } from '@/store/agent/selectors';
10
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
11
11
 
12
12
  const History = memo(() => {
13
13
  const { t } = useTranslation('setting');
14
14
  const [popoverOpen, setPopoverOpen] = useState(false);
15
15
 
16
16
  const [historyCount, enableHistoryCount, updateAgentConfig] = useAgentStore((s) => {
17
- const config = agentSelectors.currentAgentChatConfig(s);
18
- return [config.historyCount, config.enableHistoryCount, s.updateAgentChatConfig];
17
+ return [
18
+ agentChatConfigSelectors.historyCount(s),
19
+ agentChatConfigSelectors.enableHistoryCount(s),
20
+ s.updateAgentChatConfig,
21
+ ];
19
22
  });
20
23
 
21
24
  const title = t(
@@ -0,0 +1,20 @@
1
+ import { Switch } from 'antd';
2
+ import { memo } from 'react';
3
+
4
+ interface ContextCachingSwitchProps {
5
+ onChange?: (value: boolean) => void;
6
+ value?: boolean;
7
+ }
8
+
9
+ const ContextCachingSwitch = memo<ContextCachingSwitchProps>(({ value, onChange }) => {
10
+ return (
11
+ <Switch
12
+ onChange={(checked) => {
13
+ onChange?.(!checked);
14
+ }}
15
+ value={!value}
16
+ />
17
+ );
18
+ });
19
+
20
+ export default ContextCachingSwitch;
@@ -1,14 +1,16 @@
1
1
  import { Form } from '@lobehub/ui';
2
2
  import type { FormItemProps } from '@lobehub/ui';
3
- import { Switch } from 'antd';
3
+ import { Form as AntdForm, Switch } from 'antd';
4
4
  import isEqual from 'fast-deep-equal';
5
+ import Link from 'next/link';
5
6
  import { memo } from 'react';
6
- import { useTranslation } from 'react-i18next';
7
+ import { Trans, useTranslation } from 'react-i18next';
7
8
 
8
9
  import { useAgentStore } from '@/store/agent';
9
- import { agentSelectors } from '@/store/agent/slices/chat';
10
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
10
11
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
11
12
 
13
+ import ContextCachingSwitch from './ContextCachingSwitch';
12
14
  import ReasoningTokenSlider from './ReasoningTokenSlider';
13
15
 
14
16
  const ControlsForm = memo(() => {
@@ -18,18 +20,57 @@ const ControlsForm = memo(() => {
18
20
  agentSelectors.currentAgentModelProvider(s),
19
21
  s.updateAgentChatConfig,
20
22
  ]);
21
- const config = useAgentStore(agentSelectors.currentAgentChatConfig, isEqual);
23
+ const [form] = Form.useForm();
24
+ const enableReasoning = AntdForm.useWatch(['enableReasoning'], form);
25
+
26
+ const config = useAgentStore(agentChatConfigSelectors.currentChatConfig, isEqual);
22
27
 
23
28
  const modelExtendParams = useAiInfraStore(aiModelSelectors.modelExtendParams(model, provider));
24
29
 
25
- const items: FormItemProps[] = [
30
+ const items = [
31
+ {
32
+ children: <ContextCachingSwitch />,
33
+ desc: (
34
+ <span style={{ display: 'inline-block', width: 300 }}>
35
+ <Trans i18nKey={'extendParams.disableContextCaching.desc'} ns={'chat'}>
36
+ 单条对话生成成本最高可降低 90%,响应速度提升 4 倍(
37
+ <Link
38
+ href={'https://www.anthropic.com/news/prompt-caching?utm_source=lobechat'}
39
+ rel={'nofollow'}
40
+ >
41
+ 了解更多
42
+ </Link>
43
+ )。开启后将自动禁用历史记录限制
44
+ </Trans>
45
+ </span>
46
+ ),
47
+ label: t('extendParams.disableContextCaching.title'),
48
+ minWidth: undefined,
49
+ name: 'disableContextCaching',
50
+ },
26
51
  {
27
52
  children: <Switch />,
53
+ desc: (
54
+ <span style={{ display: 'inline-block', width: 300 }}>
55
+ <Trans i18nKey={'extendParams.enableReasoning.desc'} ns={'chat'}>
56
+ 基于 Claude Thinking 机制限制(
57
+ <Link
58
+ href={
59
+ 'https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking?utm_source=lobechat#why-thinking-blocks-must-be-preserved'
60
+ }
61
+ rel={'nofollow'}
62
+ >
63
+ 了解更多
64
+ </Link>
65
+ ),开启后将自动禁用历史消息数限制
66
+ </Trans>
67
+ </span>
68
+ ),
28
69
  label: t('extendParams.enableReasoning.title'),
29
70
  minWidth: undefined,
30
71
  name: 'enableReasoning',
31
72
  },
32
- {
73
+ enableReasoning && {
33
74
  children: <ReasoningTokenSlider />,
34
75
  label: t('extendParams.reasoningBudgetToken.title'),
35
76
  layout: 'vertical',
@@ -39,10 +80,11 @@ const ControlsForm = memo(() => {
39
80
  paddingBottom: 0,
40
81
  },
41
82
  },
42
- ];
83
+ ].filter(Boolean) as FormItemProps[];
43
84
 
44
85
  return (
45
86
  <Form
87
+ form={form}
46
88
  initialValues={config}
47
89
  items={
48
90
  (modelExtendParams || [])
@@ -6,7 +6,6 @@ import useMergeState from 'use-merge-value';
6
6
  const Kibi = 1024;
7
7
 
8
8
  const exponent = (num: number) => Math.log2(num);
9
- const getRealValue = (num: number) => Math.round(Math.pow(2, num));
10
9
  const powerKibi = (num: number) => Math.round(Math.pow(2, num) * Kibi);
11
10
 
12
11
  interface MaxTokenSliderProps {
@@ -15,7 +14,7 @@ interface MaxTokenSliderProps {
15
14
  value?: number;
16
15
  }
17
16
 
18
- const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValue }) => {
17
+ const ReasoningTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValue }) => {
19
18
  const [token, setTokens] = useMergeState(0, {
20
19
  defaultValue,
21
20
  onChange,
@@ -30,7 +29,7 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
30
29
  const updateWithPowValue = (value: number) => {
31
30
  setPowValue(value);
32
31
 
33
- setTokens(powerKibi(value));
32
+ setTokens(Math.min(powerKibi(value), 64_000));
34
33
  };
35
34
 
36
35
  const updateWithRealValue = (value: number) => {
@@ -52,7 +51,7 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
52
51
  }, []);
53
52
 
54
53
  return (
55
- <Flexbox align={'center'} gap={12} horizontal>
54
+ <Flexbox align={'center'} gap={12} horizontal paddingInline={'4px 0'}>
56
55
  <Flexbox flex={1}>
57
56
  <Slider
58
57
  marks={marks}
@@ -60,21 +59,14 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
60
59
  min={exponent(1)}
61
60
  onChange={updateWithPowValue}
62
61
  step={null}
63
- tooltip={{
64
- formatter: (x) => {
65
- if (typeof x === 'undefined') return;
66
-
67
- let value = getRealValue(x);
68
-
69
- if (value < Kibi) return ((value * Kibi) / 1000).toFixed(0) + 'k';
70
- },
71
- }}
62
+ tooltip={{ open: false }}
72
63
  value={powValue}
73
64
  />
74
65
  </Flexbox>
75
66
  <div>
76
67
  <InputNumber
77
68
  changeOnWheel
69
+ max={64_000}
78
70
  min={0}
79
71
  onChange={(e) => {
80
72
  if (!e && e !== 0) return;
@@ -89,4 +81,4 @@ const MaxTokenSlider = memo<MaxTokenSliderProps>(({ value, onChange, defaultValu
89
81
  </Flexbox>
90
82
  );
91
83
  });
92
- export default MaxTokenSlider;
84
+ export default ReasoningTokenSlider;
@@ -7,7 +7,7 @@ import { useTranslation } from 'react-i18next';
7
7
  import { Flexbox } from 'react-layout-kit';
8
8
 
9
9
  import { useAgentStore } from '@/store/agent';
10
- import { agentSelectors } from '@/store/agent/selectors';
10
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
11
11
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
12
12
 
13
13
  import ExaIcon from './ExaIcon';
@@ -37,7 +37,7 @@ const ModelBuiltinSearch = memo(() => {
37
37
  const [model, provider, checked, updateAgentChatConfig] = useAgentStore((s) => [
38
38
  agentSelectors.currentAgentModel(s),
39
39
  agentSelectors.currentAgentModelProvider(s),
40
- agentSelectors.currentAgentChatConfig(s).useModelBuiltinSearch,
40
+ agentChatConfigSelectors.useModelBuiltinSearch(s),
41
41
  s.updateAgentChatConfig,
42
42
  ]);
43
43
 
@@ -8,7 +8,7 @@ import { useTranslation } from 'react-i18next';
8
8
  import { Flexbox } from 'react-layout-kit';
9
9
 
10
10
  import { useAgentStore } from '@/store/agent';
11
- import { agentSelectors } from '@/store/agent/slices/chat';
11
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/slices/chat';
12
12
  import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
13
13
  import { SearchMode } from '@/types/search';
14
14
 
@@ -84,7 +84,7 @@ const Item = memo<NetworkOption>(({ value, description, icon, label, disable })
84
84
  const { t } = useTranslation('chat');
85
85
  const { styles } = useStyles();
86
86
  const [mode, updateAgentChatConfig] = useAgentStore((s) => [
87
- agentSelectors.agentSearchMode(s),
87
+ agentChatConfigSelectors.agentSearchMode(s),
88
88
  s.updateAgentChatConfig,
89
89
  ]);
90
90
 
@@ -10,7 +10,7 @@ import { useModelContextWindowTokens } from '@/hooks/useModelContextWindowTokens
10
10
  import { useModelSupportToolUse } from '@/hooks/useModelSupportToolUse';
11
11
  import { useTokenCount } from '@/hooks/useTokenCount';
12
12
  import { useAgentStore } from '@/store/agent';
13
- import { agentSelectors } from '@/store/agent/selectors';
13
+ import { agentChatConfigSelectors, agentSelectors } from '@/store/agent/selectors';
14
14
  import { useChatStore } from '@/store/chat';
15
15
  import { topicSelectors } from '@/store/chat/selectors';
16
16
  import { useToolStore } from '@/store/tool';
@@ -31,15 +31,13 @@ const Token = memo<TokenTagProps>(({ total: messageString }) => {
31
31
  ]);
32
32
 
33
33
  const [systemRole, model, provider] = useAgentStore((s) => {
34
- const config = agentSelectors.currentAgentChatConfig(s);
35
-
36
34
  return [
37
35
  agentSelectors.currentAgentSystemRole(s),
38
36
  agentSelectors.currentAgentModel(s) as string,
39
37
  agentSelectors.currentAgentModelProvider(s) as string,
40
38
  // add these two params to enable the component to re-render
41
- config.historyCount,
42
- config.enableHistoryCount,
39
+ agentChatConfigSelectors.historyCount(s),
40
+ agentChatConfigSelectors.enableHistoryCount(s),
43
41
  ];
44
42
  });
45
43
 
@@ -57,4 +57,6 @@ const CustomRender = memo<CustomRenderProps>(
57
57
  },
58
58
  );
59
59
 
60
+ CustomRender.displayName = 'CustomRender';
61
+
60
62
  export default CustomRender;
@@ -1,3 +1,4 @@
1
+ import isEqual from 'fast-deep-equal';
1
2
  import { Suspense, memo } from 'react';
2
3
 
3
4
  import { LOADING_FLAT } from '@/const/message';
@@ -16,10 +17,11 @@ interface RenderProps {
16
17
  toolCallId: string;
17
18
  toolIndex: number;
18
19
  }
20
+
19
21
  const Render = memo<RenderProps>(
20
22
  ({ toolCallId, toolIndex, messageId, requestArgs, showPluginRender, setShowPluginRender }) => {
21
23
  const loading = useChatStore(chatSelectors.isToolCallStreaming(messageId, toolIndex));
22
- const toolMessage = useChatStore(chatSelectors.getMessageByToolCallId(toolCallId));
24
+ const toolMessage = useChatStore(chatSelectors.getMessageByToolCallId(toolCallId), isEqual);
23
25
 
24
26
  // 如果处于 loading 或者找不到 toolMessage 则展示 Arguments
25
27
  if (loading || !toolMessage) return <Arguments arguments={requestArgs} />;
@@ -48,4 +50,6 @@ const Render = memo<RenderProps>(
48
50
  },
49
51
  );
50
52
 
53
+ Render.displayName = 'ToolRender';
54
+
51
55
  export default Render;
@@ -52,4 +52,6 @@ const Tool = memo<InspectorProps>(
52
52
  },
53
53
  );
54
54
 
55
+ Tool.displayName = 'AssistantTool';
56
+
55
57
  export default Tool;
@@ -8,7 +8,7 @@ import { useTranslation } from 'react-i18next';
8
8
  import { Flexbox } from 'react-layout-kit';
9
9
 
10
10
  import { useAgentStore } from '@/store/agent';
11
- import { agentSelectors } from '@/store/agent/selectors';
11
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
12
12
  import { useChatStore } from '@/store/chat';
13
13
  import { chatSelectors } from '@/store/chat/selectors';
14
14
  import { useUserStore } from '@/store/user';
@@ -65,15 +65,12 @@ const Item = memo<ChatListItemProps>(
65
65
  disableEditing,
66
66
  inPortalThread = false,
67
67
  }) => {
68
- const fontSize = useUserStore(userGeneralSettingsSelectors.fontSize);
69
68
  const { t } = useTranslation('common');
70
69
  const { styles, cx } = useStyles();
71
- const [type = 'chat'] = useAgentStore((s) => {
72
- const config = agentSelectors.currentAgentChatConfig(s);
73
- return [config.displayMode];
74
- });
75
70
 
71
+ const type = useAgentStore(agentChatConfigSelectors.displayMode);
76
72
  const item = useChatStore(chatSelectors.getMessageById(id), isEqual);
73
+ const fontSize = useUserStore(userGeneralSettingsSelectors.fontSize);
77
74
 
78
75
  const [
79
76
  isMessageLoading,