@lobehub/lobehub 2.0.0-next.51 → 2.0.0-next.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/README.md +8 -8
  3. package/README.zh-CN.md +8 -8
  4. package/apps/desktop/package.json +1 -1
  5. package/apps/desktop/src/main/controllers/LocalFileCtr.ts +25 -5
  6. package/apps/desktop/src/main/controllers/__tests__/LocalFileCtr.test.ts +4 -1
  7. package/apps/desktop/src/main/modules/fileSearch/__tests__/macOS.integration.test.ts +357 -0
  8. package/apps/desktop/src/main/modules/fileSearch/impl/macOS.ts +30 -22
  9. package/changelog/v1.json +21 -0
  10. package/locales/ar/models.json +119 -126
  11. package/locales/ar/plugin.json +1 -1
  12. package/locales/bg-BG/models.json +104 -132
  13. package/locales/bg-BG/plugin.json +1 -1
  14. package/locales/de-DE/models.json +119 -126
  15. package/locales/de-DE/plugin.json +1 -1
  16. package/locales/en-US/models.json +167 -126
  17. package/locales/en-US/plugin.json +1 -1
  18. package/locales/es-ES/models.json +119 -126
  19. package/locales/es-ES/plugin.json +1 -1
  20. package/locales/fa-IR/models.json +119 -126
  21. package/locales/fa-IR/plugin.json +1 -1
  22. package/locales/fr-FR/models.json +119 -126
  23. package/locales/fr-FR/plugin.json +1 -1
  24. package/locales/it-IT/models.json +119 -126
  25. package/locales/it-IT/plugin.json +1 -1
  26. package/locales/ja-JP/models.json +119 -126
  27. package/locales/ja-JP/plugin.json +1 -1
  28. package/locales/ko-KR/models.json +119 -126
  29. package/locales/ko-KR/plugin.json +1 -1
  30. package/locales/nl-NL/models.json +119 -126
  31. package/locales/nl-NL/plugin.json +1 -1
  32. package/locales/pl-PL/models.json +119 -126
  33. package/locales/pl-PL/plugin.json +1 -1
  34. package/locales/pt-BR/models.json +119 -126
  35. package/locales/pt-BR/plugin.json +1 -1
  36. package/locales/ru-RU/models.json +119 -126
  37. package/locales/ru-RU/plugin.json +1 -1
  38. package/locales/tr-TR/models.json +119 -126
  39. package/locales/tr-TR/plugin.json +1 -1
  40. package/locales/vi-VN/models.json +119 -126
  41. package/locales/vi-VN/plugin.json +1 -1
  42. package/locales/zh-CN/models.json +173 -80
  43. package/locales/zh-CN/plugin.json +1 -1
  44. package/locales/zh-TW/models.json +119 -126
  45. package/locales/zh-TW/plugin.json +1 -1
  46. package/package.json +2 -2
  47. package/packages/const/src/models.ts +2 -0
  48. package/packages/electron-client-ipc/src/types/localSystem.ts +26 -2
  49. package/packages/electron-server-ipc/src/ipcClient.ts +31 -31
  50. package/packages/electron-server-ipc/src/ipcServer.ts +15 -15
  51. package/packages/model-bank/src/aiModels/aihubmix.ts +106 -2
  52. package/packages/model-bank/src/aiModels/openai.ts +107 -3
  53. package/packages/model-bank/src/aiModels/qwen.ts +76 -7
  54. package/packages/model-bank/src/types/aiModel.ts +1 -0
  55. package/packages/model-runtime/src/core/contextBuilders/openai.test.ts +58 -0
  56. package/packages/model-runtime/src/core/contextBuilders/openai.ts +24 -10
  57. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +3 -2
  58. package/packages/model-runtime/src/providers/openai/index.test.ts +44 -0
  59. package/packages/types/src/agent/chatConfig.ts +9 -0
  60. package/packages/types/src/tool/builtin.ts +6 -4
  61. package/src/app/[variants]/(main)/chat/components/WorkspaceLayout.tsx +32 -23
  62. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +12 -0
  63. package/src/features/ChatInput/ActionBar/Model/GPT51ReasoningEffortSlider.tsx +58 -0
  64. package/src/features/ChatItem/components/MessageContent.tsx +3 -1
  65. package/src/features/Conversation/Messages/Assistant/Tool/Render/LoadingPlaceholder/index.tsx +3 -3
  66. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/ApprovalActions.tsx +34 -13
  67. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/index.tsx +2 -2
  68. package/src/features/Conversation/Messages/Group/Tool/Render/LoadingPlaceholder/index.tsx +3 -3
  69. package/src/features/Conversation/Messages/User/index.tsx +11 -1
  70. package/src/features/PluginsUI/Render/BuiltinType/index.test.tsx +10 -4
  71. package/src/features/PluginsUI/Render/BuiltinType/index.tsx +2 -2
  72. package/src/libs/mcp/__tests__/__snapshots__/index.test.ts.snap +0 -6
  73. package/src/locales/default/chat.ts +2 -0
  74. package/src/locales/default/plugin.ts +1 -1
  75. package/src/services/chat/chat.test.ts +1 -0
  76. package/src/services/chat/index.ts +7 -0
  77. package/src/store/aiInfra/slices/aiProvider/__tests__/selectors.test.ts +62 -0
  78. package/src/store/aiInfra/slices/aiProvider/selectors.ts +1 -1
  79. package/src/store/chat/slices/aiChat/actions/conversationControl.ts +42 -0
  80. package/src/tools/code-interpreter/Render/index.tsx +1 -1
  81. package/src/tools/interventions.ts +28 -4
  82. package/src/tools/local-system/Intervention/RunCommand/index.tsx +4 -5
  83. package/src/tools/local-system/Placeholder/ListFiles.tsx +3 -5
  84. package/src/tools/local-system/Placeholder/SearchFiles.tsx +2 -5
  85. package/src/tools/local-system/Render/ListFiles/index.tsx +16 -21
  86. package/src/tools/local-system/Render/ReadLocalFile/ReadFileView.tsx +2 -1
  87. package/src/tools/local-system/Render/RenameLocalFile/index.tsx +15 -20
  88. package/src/tools/local-system/Render/RunCommand/index.tsx +67 -70
  89. package/src/tools/local-system/Render/SearchFiles/SearchQuery/index.tsx +0 -1
  90. package/src/tools/local-system/Render/SearchFiles/index.tsx +15 -20
  91. package/src/tools/local-system/Render/WriteFile/index.tsx +2 -8
  92. package/src/tools/local-system/index.ts +5 -4
  93. package/src/tools/local-system/systemRole.ts +1 -1
  94. package/src/tools/placeholders.ts +39 -8
  95. package/src/tools/renders.ts +56 -9
  96. package/src/tools/web-browsing/Placeholder/{PageContent.tsx → CrawlMultiPages.tsx} +4 -1
  97. package/src/tools/web-browsing/Placeholder/CrawlSinglePage.tsx +12 -0
  98. package/src/tools/web-browsing/Placeholder/Search.tsx +4 -4
  99. package/src/tools/web-browsing/Render/CrawlMultiPages.tsx +15 -0
  100. package/src/tools/web-browsing/Render/CrawlSinglePage.tsx +15 -0
  101. package/src/tools/web-browsing/Render/Search/index.tsx +39 -44
  102. package/packages/database/migrations/0044_add_tool_intervention.sql +0 -1
  103. package/src/tools/local-system/Intervention/index.tsx +0 -17
  104. package/src/tools/local-system/Placeholder/index.tsx +0 -25
  105. package/src/tools/local-system/Render/index.tsx +0 -42
  106. package/src/tools/web-browsing/Placeholder/index.tsx +0 -40
  107. package/src/tools/web-browsing/Render/index.tsx +0 -57
@@ -1016,6 +1016,71 @@ const qwenChatModels: AIChatModelCard[] = [
1016
1016
  id: 'qwen3-max',
1017
1017
  maxOutput: 65_536,
1018
1018
  organization: 'Qwen',
1019
+ pricing: {
1020
+ currency: 'CNY',
1021
+ units: [
1022
+ {
1023
+ lookup: {
1024
+ prices: {
1025
+ '[0, 0.032]': 3.2 * 0.2,
1026
+ '[0.032, 0.128]': 6.4 * 0.2,
1027
+ '[0.128, infinity]': 9.6 * 0.2,
1028
+ },
1029
+ pricingParams: ['textInputRange'],
1030
+ },
1031
+ name: 'textInput_cacheRead',
1032
+ strategy: 'lookup',
1033
+ unit: 'millionTokens',
1034
+ },
1035
+ {
1036
+ lookup: {
1037
+ prices: {
1038
+ '[0, 0.032]': 3.2,
1039
+ '[0.032, 0.128]': 6.4,
1040
+ '[0.128, infinity]': 9.6,
1041
+ },
1042
+ pricingParams: ['textInputRange'],
1043
+ },
1044
+ name: 'textInput',
1045
+ strategy: 'lookup',
1046
+ unit: 'millionTokens',
1047
+ },
1048
+ {
1049
+ lookup: {
1050
+ prices: {
1051
+ '[0, 0.032]': 12.8,
1052
+ '[0.032, 0.128]': 25.6,
1053
+ '[0.128, infinity]': 38.4,
1054
+ },
1055
+ pricingParams: ['textInputRange'],
1056
+ },
1057
+ name: 'textOutput',
1058
+ strategy: 'lookup',
1059
+ unit: 'millionTokens',
1060
+ },
1061
+ ],
1062
+ },
1063
+ releasedAt: '2025-09-23',
1064
+ settings: {
1065
+ searchImpl: 'params',
1066
+ },
1067
+ type: 'chat',
1068
+ },
1069
+ {
1070
+ abilities: {
1071
+ functionCall: true,
1072
+ reasoning: true,
1073
+ search: true,
1074
+ },
1075
+ config: {
1076
+ deploymentName: 'qwen3-max-preview', // 其支持上下文缓存
1077
+ },
1078
+ contextWindowTokens: 262_144,
1079
+ description: '通义千问系列效果最好的模型,适合复杂、多步骤的任务。预览版已支持思考。',
1080
+ displayName: 'Qwen3 Max Preview',
1081
+ id: 'qwen3-max-preview',
1082
+ maxOutput: 65_536,
1083
+ organization: 'Qwen',
1019
1084
  pricing: {
1020
1085
  currency: 'CNY',
1021
1086
  units: [
@@ -1060,8 +1125,9 @@ const qwenChatModels: AIChatModelCard[] = [
1060
1125
  },
1061
1126
  ],
1062
1127
  },
1063
- releasedAt: '2025-09-23',
1128
+ releasedAt: '2025-10-30',
1064
1129
  settings: {
1130
+ extendParams: ['enableReasoning', 'reasoningBudgetToken'],
1065
1131
  searchImpl: 'params',
1066
1132
  },
1067
1133
  type: 'chat',
@@ -1260,8 +1326,8 @@ const qwenChatModels: AIChatModelCard[] = [
1260
1326
  },
1261
1327
  {
1262
1328
  abilities: {
1263
- vision: true,
1264
1329
  reasoning: true,
1330
+ vision: true,
1265
1331
  },
1266
1332
  contextWindowTokens: 131_072,
1267
1333
  description:
@@ -1287,7 +1353,8 @@ const qwenChatModels: AIChatModelCard[] = [
1287
1353
  vision: true,
1288
1354
  },
1289
1355
  contextWindowTokens: 131_072,
1290
- description: 'Qwen3 VL 30B 非思考模式(Instruct),面向普通指令跟随场景,保持较高的多模态理解与生成能力。',
1356
+ description:
1357
+ 'Qwen3 VL 30B 非思考模式(Instruct),面向普通指令跟随场景,保持较高的多模态理解与生成能力。',
1291
1358
  displayName: 'Qwen3 VL 30B A3B Instruct',
1292
1359
  id: 'qwen3-vl-30b-a3b-instruct',
1293
1360
  maxOutput: 32_768,
@@ -1303,8 +1370,8 @@ const qwenChatModels: AIChatModelCard[] = [
1303
1370
  },
1304
1371
  {
1305
1372
  abilities: {
1306
- vision: true,
1307
1373
  reasoning: true,
1374
+ vision: true,
1308
1375
  },
1309
1376
  contextWindowTokens: 131_072,
1310
1377
  description: 'Qwen3 VL 8B 思考模式,面向轻量级多模态推理与交互场景,保留长上下文理解能力。',
@@ -1342,11 +1409,12 @@ const qwenChatModels: AIChatModelCard[] = [
1342
1409
  },
1343
1410
  {
1344
1411
  abilities: {
1345
- vision: true,
1346
1412
  reasoning: true,
1413
+ vision: true,
1347
1414
  },
1348
1415
  contextWindowTokens: 131_072,
1349
- description: 'Qwen3 VL 235B A22B 思考模式(开源版),针对高难度强推理与长视频理解场景,提供顶尖的视觉+文本推理能力。',
1416
+ description:
1417
+ 'Qwen3 VL 235B A22B 思考模式(开源版),针对高难度强推理与长视频理解场景,提供顶尖的视觉+文本推理能力。',
1350
1418
  displayName: 'Qwen3 VL 235B A22B Thinking',
1351
1419
  id: 'qwen3-vl-235b-a22b-thinking',
1352
1420
  maxOutput: 32_768,
@@ -1368,7 +1436,8 @@ const qwenChatModels: AIChatModelCard[] = [
1368
1436
  vision: true,
1369
1437
  },
1370
1438
  contextWindowTokens: 131_072,
1371
- description: 'Qwen3 VL 235B A22B 非思考模式(Instruct),适用于非思考指令场景,保持强大的视觉理解能力。',
1439
+ description:
1440
+ 'Qwen3 VL 235B A22B 非思考模式(Instruct),适用于非思考指令场景,保持强大的视觉理解能力。',
1372
1441
  displayName: 'Qwen3 VL 235B A22B Instruct',
1373
1442
  id: 'qwen3-vl-235b-a22b-instruct',
1374
1443
  maxOutput: 32_768,
@@ -234,6 +234,7 @@ export type ExtendParamsType =
234
234
  | 'disableContextCaching'
235
235
  | 'reasoningEffort'
236
236
  | 'gpt5ReasoningEffort'
237
+ | 'gpt5_1ReasoningEffort'
237
238
  | 'textVerbosity'
238
239
  | 'thinking'
239
240
  | 'thinkingBudget'
@@ -150,6 +150,64 @@ describe('convertOpenAIMessages', () => {
150
150
 
151
151
  expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
152
152
  });
153
+
154
+ it('should filter out reasoning field from messages', async () => {
155
+ const messages = [
156
+ {
157
+ role: 'assistant',
158
+ content: 'Hello',
159
+ reasoning: { content: 'some reasoning', duration: 100 },
160
+ },
161
+ { role: 'user', content: 'Hi' },
162
+ ] as any;
163
+
164
+ const result = await convertOpenAIMessages(messages);
165
+
166
+ expect(result).toEqual([
167
+ { role: 'assistant', content: 'Hello' },
168
+ { role: 'user', content: 'Hi' },
169
+ ]);
170
+ // Ensure reasoning field is removed
171
+ expect((result[0] as any).reasoning).toBeUndefined();
172
+ });
173
+
174
+ it('should filter out reasoning_content field from messages', async () => {
175
+ const messages = [
176
+ {
177
+ role: 'assistant',
178
+ content: 'Hello',
179
+ reasoning_content: 'some reasoning content',
180
+ },
181
+ { role: 'user', content: 'Hi' },
182
+ ] as any;
183
+
184
+ const result = await convertOpenAIMessages(messages);
185
+
186
+ expect(result).toEqual([
187
+ { role: 'assistant', content: 'Hello' },
188
+ { role: 'user', content: 'Hi' },
189
+ ]);
190
+ // Ensure reasoning_content field is removed
191
+ expect((result[0] as any).reasoning_content).toBeUndefined();
192
+ });
193
+
194
+ it('should filter out both reasoning and reasoning_content fields from messages', async () => {
195
+ const messages = [
196
+ {
197
+ role: 'assistant',
198
+ content: 'Hello',
199
+ reasoning: { content: 'some reasoning', duration: 100 },
200
+ reasoning_content: 'some reasoning content',
201
+ },
202
+ ] as any;
203
+
204
+ const result = await convertOpenAIMessages(messages);
205
+
206
+ expect(result).toEqual([{ role: 'assistant', content: 'Hello' }]);
207
+ // Ensure both fields are removed
208
+ expect((result[0] as any).reasoning).toBeUndefined();
209
+ expect((result[0] as any).reasoning_content).toBeUndefined();
210
+ });
153
211
  });
154
212
 
155
213
  describe('convertOpenAIResponseInputs', () => {
@@ -26,17 +26,31 @@ export const convertMessageContent = async (
26
26
 
27
27
  export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessageParam[]) => {
28
28
  return (await Promise.all(
29
- messages.map(async (message) => ({
30
- ...message,
31
- content:
32
- typeof message.content === 'string'
33
- ? message.content
34
- : await Promise.all(
35
- (message.content || []).map((c) =>
36
- convertMessageContent(c as OpenAI.ChatCompletionContentPart),
29
+ messages.map(async (message) => {
30
+ const msg = message as any;
31
+
32
+ // Explicitly map only valid ChatCompletionMessageParam fields
33
+ // Exclude reasoning and reasoning_content fields as they should not be sent in requests
34
+ const result: any = {
35
+ content:
36
+ typeof message.content === 'string'
37
+ ? message.content
38
+ : await Promise.all(
39
+ (message.content || []).map((c) =>
40
+ convertMessageContent(c as OpenAI.ChatCompletionContentPart),
41
+ ),
37
42
  ),
38
- ),
39
- })),
43
+ role: msg.role,
44
+ };
45
+
46
+ // Add optional fields if they exist
47
+ if (msg.name !== undefined) result.name = msg.name;
48
+ if (msg.tool_calls !== undefined) result.tool_calls = msg.tool_calls;
49
+ if (msg.tool_call_id !== undefined) result.tool_call_id = msg.tool_call_id;
50
+ if (msg.function_call !== undefined) result.function_call = msg.function_call;
51
+
52
+ return result;
53
+ }),
40
54
  )) as OpenAI.ChatCompletionMessageParam[];
41
55
  };
42
56
 
@@ -766,12 +766,12 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
766
766
 
767
767
  const inputStartAt = Date.now();
768
768
 
769
- const { messages, reasoning_effort, tools, reasoning, responseMode, ...res } =
769
+ const { messages, reasoning_effort, tools, reasoning, responseMode, max_tokens, ...res } =
770
770
  responses?.handlePayload
771
771
  ? (responses?.handlePayload(payload, this._options) as ChatStreamPayload)
772
772
  : payload;
773
773
 
774
- // remove penalty params
774
+ // remove penalty params and chat completion specific params
775
775
  delete res.apiMode;
776
776
  delete res.frequency_penalty;
777
777
  delete res.presence_penalty;
@@ -797,6 +797,7 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
797
797
  }
798
798
  : {}),
799
799
  input,
800
+ ...(max_tokens && { max_output_tokens: max_tokens }),
800
801
  store: false,
801
802
  stream: !isStreaming ? undefined : isStreaming,
802
803
  tools: tools?.map((tool) => this.convertChatCompletionToolToResponseTool(tool)),
@@ -409,6 +409,50 @@ describe('LobeOpenAI', () => {
409
409
  const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
410
410
  expect(createCall.reasoning).toEqual({ effort: 'high', summary: 'auto' });
411
411
  });
412
+
413
+ it('should convert max_tokens to max_output_tokens for responses API', async () => {
414
+ const payload = {
415
+ max_tokens: 2048,
416
+ messages: [{ content: 'Hello', role: 'user' as const }],
417
+ model: 'o1-pro',
418
+ temperature: 0.7,
419
+ };
420
+
421
+ await instance.chat(payload);
422
+
423
+ const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
424
+ expect(createCall.max_output_tokens).toBe(2048);
425
+ expect(createCall.max_tokens).toBeUndefined();
426
+ });
427
+
428
+ it('should not include max_output_tokens when max_tokens is undefined', async () => {
429
+ const payload = {
430
+ messages: [{ content: 'Hello', role: 'user' as const }],
431
+ model: 'o1-pro',
432
+ temperature: 0.7,
433
+ };
434
+
435
+ await instance.chat(payload);
436
+
437
+ const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
438
+ expect(createCall.max_output_tokens).toBeUndefined();
439
+ });
440
+
441
+ it('should convert max_tokens to max_output_tokens for search-enabled models', async () => {
442
+ const payload = {
443
+ enabledSearch: true,
444
+ max_tokens: 4096,
445
+ messages: [{ content: 'Hello', role: 'user' as const }],
446
+ model: 'gpt-4o',
447
+ temperature: 0.7,
448
+ };
449
+
450
+ await instance.chat(payload);
451
+
452
+ const createCall = (instance['client'].responses.create as Mock).mock.calls[0][0];
453
+ expect(createCall.max_output_tokens).toBe(4096);
454
+ expect(createCall.max_tokens).toBeUndefined();
455
+ });
412
456
  });
413
457
 
414
458
  describe('supportsFlexTier', () => {
@@ -32,6 +32,7 @@ export interface LobeAgentChatConfig {
32
32
  reasoningBudgetToken?: number;
33
33
  reasoningEffort?: 'low' | 'medium' | 'high';
34
34
  gpt5ReasoningEffort?: 'minimal' | 'low' | 'medium' | 'high';
35
+ gpt5_1ReasoningEffort?: 'none' | 'low' | 'medium' | 'high';
35
36
  /**
36
37
  * 输出文本详细程度控制
37
38
  */
@@ -66,6 +67,7 @@ export interface LobeAgentChatConfig {
66
67
 
67
68
  export const AgentChatConfigSchema = z.object({
68
69
  autoCreateTopicThreshold: z.number().default(2),
70
+ disableContextCaching: z.boolean().optional(),
69
71
  displayMode: z.enum(['chat', 'docs']).optional(),
70
72
  enableAutoCreateTopic: z.boolean().optional(),
71
73
  enableCompressHistory: z.boolean().optional(),
@@ -74,8 +76,11 @@ export const AgentChatConfigSchema = z.object({
74
76
  enableReasoning: z.boolean().optional(),
75
77
  enableReasoningEffort: z.boolean().optional(),
76
78
  enableStreaming: z.boolean().optional(),
79
+ gpt5ReasoningEffort: z.enum(['minimal', 'low', 'medium', 'high']).optional(),
80
+ gpt5_1ReasoningEffort: z.enum(['none', 'low', 'medium', 'high']).optional(),
77
81
  historyCount: z.number().optional(),
78
82
  reasoningBudgetToken: z.number().optional(),
83
+ reasoningEffort: z.enum(['low', 'medium', 'high']).optional(),
79
84
  searchFCModel: z
80
85
  .object({
81
86
  model: z.string(),
@@ -84,4 +89,8 @@ export const AgentChatConfigSchema = z.object({
84
89
  .optional(),
85
90
  searchMode: z.enum(['off', 'on', 'auto']).optional(),
86
91
  textVerbosity: z.enum(['low', 'medium', 'high']).optional(),
92
+ thinking: z.enum(['disabled', 'auto', 'enabled']).optional(),
93
+ thinkingBudget: z.number().optional(),
94
+ urlContext: z.boolean().optional(),
95
+ useModelBuiltinSearch: z.boolean().optional(),
87
96
  });
@@ -115,7 +115,7 @@ export const LobeBuiltinToolSchema = z.object({
115
115
  type: z.literal('builtin'),
116
116
  });
117
117
 
118
- export interface BuiltinRenderProps<Content = any, Arguments = any, State = any> {
118
+ export interface BuiltinRenderProps<Arguments = any, State = any, Content = any> {
119
119
  apiName?: string;
120
120
  args: Arguments;
121
121
  content: Content;
@@ -125,7 +125,9 @@ export interface BuiltinRenderProps<Content = any, Arguments = any, State = any>
125
125
  pluginState?: State;
126
126
  }
127
127
 
128
- export type BuiltinRender = <T = any>(props: BuiltinRenderProps<T>) => ReactNode;
128
+ export type BuiltinRender = <A = any, S = any, C = any>(
129
+ props: BuiltinRenderProps<A, S, C>,
130
+ ) => ReactNode;
129
131
 
130
132
  export interface BuiltinPortalProps<Arguments = Record<string, any>, State = any> {
131
133
  apiName?: string;
@@ -137,9 +139,9 @@ export interface BuiltinPortalProps<Arguments = Record<string, any>, State = any
137
139
 
138
140
  export type BuiltinPortal = <T = any>(props: BuiltinPortalProps<T>) => ReactNode;
139
141
 
140
- export interface BuiltinPlaceholderProps {
142
+ export interface BuiltinPlaceholderProps<T extends Record<string, any> = any> {
141
143
  apiName: string;
142
- args?: Record<string, any>;
144
+ args?: T;
143
145
  identifier: string;
144
146
  }
145
147
 
@@ -1,3 +1,4 @@
1
+ import { useTheme } from 'antd-style';
1
2
  import { Suspense, memo } from 'react';
2
3
  import { Flexbox } from 'react-layout-kit';
3
4
 
@@ -18,30 +19,38 @@ interface WorkspaceLayoutProps {
18
19
  mobile?: boolean;
19
20
  }
20
21
 
21
- const DesktopWorkspace = memo(() => (
22
- <>
23
- <ChatHeaderDesktop />
24
- <Flexbox
25
- height={'100%'}
26
- horizontal
27
- style={{ overflow: 'hidden', position: 'relative' }}
28
- width={'100%'}
29
- >
30
- <Flexbox height={'100%'} style={{ overflow: 'hidden', position: 'relative' }} width={'100%'}>
31
- <ConversationArea mobile={false} />
22
+ const DesktopWorkspace = memo(() => {
23
+ const theme = useTheme();
24
+
25
+ return (
26
+ <>
27
+ <ChatHeaderDesktop />
28
+ <Flexbox
29
+ height={'100%'}
30
+ horizontal
31
+ style={{ overflow: 'hidden', position: 'relative' }}
32
+ width={'100%'}
33
+ >
34
+ <Flexbox
35
+ height={'100%'}
36
+ style={{ background: theme.colorBgContainer, overflow: 'hidden', position: 'relative' }}
37
+ width={'100%'}
38
+ >
39
+ <ConversationArea mobile={false} />
40
+ </Flexbox>
41
+ <Portal>
42
+ <Suspense fallback={<BrandTextLoading />}>
43
+ <PortalPanel mobile={false} />
44
+ </Suspense>
45
+ </Portal>
46
+ <TopicPanel>
47
+ <TopicSidebar mobile={false} />
48
+ </TopicPanel>
32
49
  </Flexbox>
33
- <Portal>
34
- <Suspense fallback={<BrandTextLoading />}>
35
- <PortalPanel mobile={false} />
36
- </Suspense>
37
- </Portal>
38
- <TopicPanel>
39
- <TopicSidebar mobile={false} />
40
- </TopicPanel>
41
- </Flexbox>
42
- <MainInterfaceTracker />
43
- </>
44
- ));
50
+ <MainInterfaceTracker />
51
+ </>
52
+ );
53
+ });
45
54
 
46
55
  DesktopWorkspace.displayName = 'DesktopWorkspace';
47
56
 
@@ -12,6 +12,7 @@ import { aiModelSelectors, useAiInfraStore } from '@/store/aiInfra';
12
12
 
13
13
  import ContextCachingSwitch from './ContextCachingSwitch';
14
14
  import GPT5ReasoningEffortSlider from './GPT5ReasoningEffortSlider';
15
+ import GPT51ReasoningEffortSlider from './GPT51ReasoningEffortSlider';
15
16
  import ReasoningEffortSlider from './ReasoningEffortSlider';
16
17
  import ReasoningTokenSlider from './ReasoningTokenSlider';
17
18
  import TextVerbositySlider from './TextVerbositySlider';
@@ -119,6 +120,17 @@ const ControlsForm = memo(() => {
119
120
  paddingBottom: 0,
120
121
  },
121
122
  },
123
+ {
124
+ children: <GPT51ReasoningEffortSlider />,
125
+ desc: 'reasoning_effort',
126
+ label: t('extendParams.reasoningEffort.title'),
127
+ layout: 'horizontal',
128
+ minWidth: undefined,
129
+ name: 'gpt5_1ReasoningEffort',
130
+ style: {
131
+ paddingBottom: 0,
132
+ },
133
+ },
122
134
  {
123
135
  children: <TextVerbositySlider />,
124
136
  desc: 'text_verbosity',
@@ -0,0 +1,58 @@
1
+ import { Slider } from 'antd';
2
+ import { memo, useCallback } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useAgentStore } from '@/store/agent';
6
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
7
+
8
+ const GPT51ReasoningEffortSlider = memo(() => {
9
+ const [config, updateAgentChatConfig] = useAgentStore((s) => [
10
+ agentChatConfigSelectors.currentChatConfig(s),
11
+ s.updateAgentChatConfig,
12
+ ]);
13
+
14
+ const gpt5_1ReasoningEffort = config.gpt5_1ReasoningEffort || 'none'; // Default to 'none' if not set
15
+
16
+ const marks = {
17
+ 0: 'none',
18
+ 1: 'low',
19
+ 2: 'medium',
20
+ 3: 'high',
21
+ };
22
+
23
+ const effortValues = ['none', 'low', 'medium', 'high'];
24
+ const indexValue = effortValues.indexOf(gpt5_1ReasoningEffort);
25
+ const currentValue = indexValue === -1 ? 0 : indexValue;
26
+
27
+ const updateGPT51ReasoningEffort = useCallback(
28
+ (value: number) => {
29
+ const effort = effortValues[value] as 'none' | 'low' | 'medium' | 'high';
30
+ updateAgentChatConfig({ gpt5_1ReasoningEffort: effort });
31
+ },
32
+ [updateAgentChatConfig],
33
+ );
34
+
35
+ return (
36
+ <Flexbox
37
+ align={'center'}
38
+ gap={12}
39
+ horizontal
40
+ paddingInline={'0 20px'}
41
+ style={{ minWidth: 200, width: '100%' }}
42
+ >
43
+ <Flexbox flex={1}>
44
+ <Slider
45
+ marks={marks}
46
+ max={3}
47
+ min={0}
48
+ onChange={updateGPT51ReasoningEffort}
49
+ step={1}
50
+ tooltip={{ open: false }}
51
+ value={currentValue}
52
+ />
53
+ </Flexbox>
54
+ </Flexbox>
55
+ );
56
+ });
57
+
58
+ export default GPT51ReasoningEffortSlider;
@@ -13,6 +13,7 @@ import { useStyles } from '../style';
13
13
  import { ChatItemProps } from '../type';
14
14
 
15
15
  export interface MessageContentProps {
16
+ className?: string;
16
17
  disabled?: ChatItemProps['disabled'];
17
18
  editing?: ChatItemProps['editing'];
18
19
  id: string;
@@ -39,6 +40,7 @@ const MessageContent = memo<MessageContentProps>(
39
40
  onDoubleClick,
40
41
  markdownProps,
41
42
  disabled,
43
+ className,
42
44
  }) => {
43
45
  const { t } = useTranslation('common');
44
46
  const { cx, styles } = useStyles({ disabled, editing, placement, primary, variant });
@@ -81,7 +83,7 @@ const MessageContent = memo<MessageContentProps>(
81
83
 
82
84
  return (
83
85
  <Flexbox
84
- className={cx(styles.message, editing && styles.editingContainer)}
86
+ className={cx(styles.message, editing && styles.editingContainer, className)}
85
87
  onDoubleClick={onDoubleClick}
86
88
  >
87
89
  {messageContent}
@@ -1,7 +1,7 @@
1
1
  import { safeParseJSON } from '@lobechat/utils';
2
2
  import { memo } from 'react';
3
3
 
4
- import { BuiltinToolPlaceholders } from '@/tools/placeholders';
4
+ import { getBuiltinPlaceholder } from '@/tools/placeholders';
5
5
 
6
6
  import Arguments from '../Arguments';
7
7
 
@@ -14,9 +14,9 @@ interface LoadingPlaceholderProps {
14
14
 
15
15
  const LoadingPlaceholder = memo<LoadingPlaceholderProps>(
16
16
  ({ identifier, requestArgs, apiName, loading }) => {
17
- const Render = BuiltinToolPlaceholders[identifier || ''];
17
+ const Render = getBuiltinPlaceholder(identifier, apiName);
18
18
 
19
- if (identifier && Render) {
19
+ if (Render) {
20
20
  return (
21
21
  <Render apiName={apiName} args={safeParseJSON(requestArgs) || {}} identifier={identifier} />
22
22
  );
@@ -28,10 +28,12 @@ const ApprovalActions = memo<ApprovalActionsProps>(
28
28
  const [approveLoading, setApproveLoading] = useState(false);
29
29
 
30
30
  const { assistantGroupId } = useGroupMessage();
31
- const [approveToolIntervention, rejectToolIntervention] = useChatStore((s) => [
32
- s.approveToolCalling,
33
- s.rejectToolCalling,
34
- ]);
31
+ const [approveToolIntervention, rejectToolIntervention, rejectAndContinueToolIntervention] =
32
+ useChatStore((s) => [
33
+ s.approveToolCalling,
34
+ s.rejectToolCalling,
35
+ s.rejectAndContinueToolCalling,
36
+ ]);
35
37
  const addToolToAllowList = useUserStore((s) => s.addToolToAllowList);
36
38
 
37
39
  const handleApprove = async (remember?: boolean) => {
@@ -58,6 +60,14 @@ const ApprovalActions = memo<ApprovalActionsProps>(
58
60
  setRejectReason('');
59
61
  };
60
62
 
63
+ const handleRejectAndContinue = async (reason?: string) => {
64
+ setRejectLoading(true);
65
+ await rejectAndContinueToolIntervention(messageId, reason);
66
+ setRejectLoading(false);
67
+ setRejectPopoverOpen(false);
68
+ setRejectReason('');
69
+ };
70
+
61
71
  return (
62
72
  <Flexbox gap={8} horizontal>
63
73
  <Popover
@@ -67,14 +77,25 @@ const ApprovalActions = memo<ApprovalActionsProps>(
67
77
  <Flexbox align={'center'} horizontal justify={'space-between'}>
68
78
  <div>{t('tool.intervention.rejectTitle')}</div>
69
79
 
70
- <Button
71
- loading={rejectLoading}
72
- onClick={() => handleReject(rejectReason)}
73
- size="small"
74
- type="primary"
75
- >
76
- {t('confirm', { ns: 'common' })}
77
- </Button>
80
+ <Space>
81
+ <Button
82
+ color={'default'}
83
+ loading={rejectLoading}
84
+ onClick={() => handleReject(rejectReason)}
85
+ size="small"
86
+ variant={'filled'}
87
+ >
88
+ {t('tool.intervention.rejectOnly')}
89
+ </Button>
90
+ <Button
91
+ loading={rejectLoading}
92
+ onClick={() => handleRejectAndContinue(rejectReason)}
93
+ size="small"
94
+ type="primary"
95
+ >
96
+ {t('tool.intervention.rejectAndContinue')}
97
+ </Button>
98
+ </Space>
78
99
  </Flexbox>
79
100
  <Input.TextArea
80
101
  autoFocus
@@ -95,7 +116,7 @@ const ApprovalActions = memo<ApprovalActionsProps>(
95
116
  placement="bottomRight"
96
117
  trigger="click"
97
118
  >
98
- <Button size="small" type="default">
119
+ <Button color={'default'} size="small" variant={'filled'}>
99
120
  {t('tool.intervention.reject')}
100
121
  </Button>
101
122
  </Popover>