@lobehub/lobehub 2.0.0-next.87 → 2.0.0-next.89

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/next.config.ts +0 -1
  4. package/package.json +2 -2
  5. package/packages/context-engine/src/processors/ToolCall.ts +1 -0
  6. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +59 -0
  7. package/packages/context-engine/src/tools/ToolNameResolver.ts +1 -0
  8. package/packages/context-engine/src/tools/__tests__/ToolNameResolver.test.ts +57 -0
  9. package/packages/context-engine/src/types.ts +1 -0
  10. package/packages/fetch-sse/src/fetchSSE.ts +12 -2
  11. package/packages/model-bank/src/aiModels/aihubmix.ts +60 -1
  12. package/packages/model-bank/src/aiModels/google.ts +21 -86
  13. package/packages/model-bank/src/aiModels/ollamacloud.ts +7 -6
  14. package/packages/model-bank/src/types/aiModel.ts +1 -0
  15. package/packages/model-runtime/src/core/contextBuilders/google.test.ts +479 -0
  16. package/packages/model-runtime/src/core/contextBuilders/google.ts +44 -1
  17. package/packages/model-runtime/src/core/streams/google/google-ai.test.ts +1115 -814
  18. package/packages/model-runtime/src/core/streams/google/index.ts +19 -5
  19. package/packages/model-runtime/src/core/streams/protocol.ts +1 -0
  20. package/packages/model-runtime/src/providers/google/index.test.ts +1 -1
  21. package/packages/model-runtime/src/providers/google/index.ts +17 -10
  22. package/packages/model-runtime/src/types/chat.ts +4 -0
  23. package/packages/model-runtime/src/types/toolsCalling.ts +3 -1
  24. package/packages/types/src/agent/chatConfig.ts +2 -0
  25. package/packages/types/src/message/common/tools.ts +3 -0
  26. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
  27. package/src/features/ChatInput/ActionBar/Model/ThinkingLevelSlider.tsx +56 -0
  28. package/src/features/Conversation/Messages/Group/Error/index.tsx +3 -2
  29. package/src/features/Conversation/Messages/Group/GroupItem.tsx +2 -2
  30. package/src/locales/default/chat.ts +3 -0
  31. package/src/services/chat/index.ts +4 -0
  32. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +6 -5
  33. package/src/store/chat/slices/message/actions/optimisticUpdate.ts +6 -11
  34. package/src/store/chat/slices/plugin/actions/internals.ts +2 -2
@@ -1,4 +1,4 @@
1
- import { GenerateContentResponse } from '@google/genai';
1
+ import { GenerateContentResponse, Part } from '@google/genai';
2
2
  import { GroundingSearch } from '@lobechat/types';
3
3
 
4
4
  import { ChatStreamCallbacks } from '../../../types';
@@ -74,19 +74,27 @@ const transformGoogleGenerativeAIStream = (
74
74
  }
75
75
  }
76
76
 
77
- const functionCalls = chunk.functionCalls;
77
+ // Parse function calls from candidate.content.parts
78
+ const functionCalls =
79
+ candidate?.content?.parts
80
+ ?.filter((part: any) => part.functionCall)
81
+ .map((part: Part) => ({
82
+ ...part.functionCall,
83
+ thoughtSignature: part.thoughtSignature,
84
+ })) || [];
78
85
 
79
- if (functionCalls) {
86
+ if (functionCalls.length > 0) {
80
87
  return [
81
88
  {
82
89
  data: functionCalls.map(
83
- (value, index): StreamToolCallChunkData => ({
90
+ (value, index: number): StreamToolCallChunkData => ({
84
91
  function: {
85
92
  arguments: JSON.stringify(value.args),
86
93
  name: value.name,
87
94
  },
88
95
  id: generateToolCallId(index, value.name),
89
96
  index: index,
97
+ thoughtSignature: value.thoughtSignature,
90
98
  type: 'function',
91
99
  }),
92
100
  ),
@@ -97,7 +105,13 @@ const transformGoogleGenerativeAIStream = (
97
105
  ];
98
106
  }
99
107
 
100
- const text = chunk.text;
108
+ // Parse text from candidate.content.parts
109
+ // Filter out thought content (thought: true) and thoughtSignature
110
+ const text =
111
+ candidate?.content?.parts
112
+ ?.filter((part: any) => part.text && !part.thought && !part.thoughtSignature)
113
+ .map((part: any) => part.text)
114
+ .join('') || '';
101
115
 
102
116
  if (candidate) {
103
117
  // 首先检查是否为 reasoning 内容 (thought: true)
@@ -98,6 +98,7 @@ export interface StreamToolCallChunkData {
98
98
  };
99
99
  id?: string;
100
100
  index: number;
101
+ thoughtSignature?: string;
101
102
  type: 'function' | string;
102
103
  }
103
104
 
@@ -1,5 +1,5 @@
1
1
  // @vitest-environment node
2
- import { GenerateContentResponse, Tool } from '@google/genai';
2
+ import { GenerateContentResponse } from '@google/genai';
3
3
  import OpenAI from 'openai';
4
4
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
 
@@ -194,7 +194,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
194
194
  async chat(rawPayload: ChatStreamPayload, options?: ChatMethodOptions) {
195
195
  try {
196
196
  const payload = this.buildPayload(rawPayload);
197
- const { model, thinkingBudget } = payload;
197
+ const { model, thinkingBudget, thinkingLevel } = payload;
198
198
 
199
199
  // https://ai.google.dev/gemini-api/docs/thinking#set-budget
200
200
  const resolvedThinkingBudget = resolveModelThinkingBudget(model, thinkingBudget);
@@ -209,6 +209,11 @@ export class LobeGoogleAI implements LobeRuntimeAI {
209
209
  thinkingBudget: resolvedThinkingBudget,
210
210
  };
211
211
 
212
+ // Add thinkingLevel for 3.0 models
213
+ if (model?.toLowerCase().includes('-3-') && thinkingLevel) {
214
+ (thinkingConfig as any).thinkingLevel = thinkingLevel;
215
+ }
216
+
212
217
  const contents = await buildGoogleMessages(payload.messages);
213
218
 
214
219
  const controller = new AbortController();
@@ -262,19 +267,21 @@ export class LobeGoogleAI implements LobeRuntimeAI {
262
267
 
263
268
  const inputStartAt = Date.now();
264
269
 
265
- const geminiStreamResponse = await this.client.models.generateContentStream({
266
- config,
267
- contents,
268
- model,
269
- });
270
-
271
- const googleStream = this.createEnhancedStream(geminiStreamResponse, controller.signal);
272
- const [prod, useForDebug] = googleStream.tee();
273
-
270
+ const finalPayload = { config, contents, model };
274
271
  const key = this.isVertexAi
275
272
  ? 'DEBUG_VERTEX_AI_CHAT_COMPLETION'
276
273
  : 'DEBUG_GOOGLE_CHAT_COMPLETION';
277
274
 
275
+ if (process.env[key] === '1') {
276
+ console.log('[requestPayload]');
277
+ console.log(JSON.stringify(finalPayload), '\n');
278
+ }
279
+
280
+ const geminiStreamResponse = await this.client.models.generateContentStream(finalPayload);
281
+
282
+ const googleStream = this.createEnhancedStream(geminiStreamResponse, controller.signal);
283
+ const [prod, useForDebug] = googleStream.tee();
284
+
278
285
  if (process.env[key] === '1') {
279
286
  debugStream(useForDebug).catch();
280
287
  }
@@ -124,6 +124,10 @@ export interface ChatStreamPayload {
124
124
  type: 'enabled' | 'disabled';
125
125
  };
126
126
  thinkingBudget?: number;
127
+ /**
128
+ * Thinking level for Gemini models (e.g., gemini-3.0-pro)
129
+ */
130
+ thinkingLevel?: 'low' | 'high';
127
131
  tool_choice?: string;
128
132
  tools?: ChatCompletionTool[];
129
133
  /**
@@ -1,5 +1,5 @@
1
- import { z } from 'zod';
2
1
  import type { PartialDeep } from 'type-fest';
2
+ import { z } from 'zod';
3
3
 
4
4
  /**
5
5
  * The function that the model called.
@@ -30,6 +30,7 @@ export interface MessageToolCall {
30
30
  */
31
31
  id: string;
32
32
 
33
+ thoughtSignature?: string;
33
34
  /**
34
35
  * The type of the tool. Currently, only `function` is supported.
35
36
  */
@@ -42,6 +43,7 @@ export const MessageToolCallSchema = z.object({
42
43
  name: z.string(),
43
44
  }),
44
45
  id: z.string(),
46
+ thoughtSignature: z.string().optional(),
45
47
  type: z.string(),
46
48
  });
47
49
 
@@ -38,6 +38,7 @@ export interface LobeAgentChatConfig {
38
38
  */
39
39
  textVerbosity?: 'low' | 'medium' | 'high';
40
40
  thinking?: 'disabled' | 'auto' | 'enabled';
41
+ thinkingLevel?: 'low' | 'high';
41
42
  thinkingBudget?: number;
42
43
  /**
43
44
  * Disable context caching
@@ -91,6 +92,7 @@ export const AgentChatConfigSchema = z.object({
91
92
  textVerbosity: z.enum(['low', 'medium', 'high']).optional(),
92
93
  thinking: z.enum(['disabled', 'auto', 'enabled']).optional(),
93
94
  thinkingBudget: z.number().optional(),
95
+ thinkingLevel: z.enum(['low', 'high']).optional(),
94
96
  urlContext: z.boolean().optional(),
95
97
  useModelBuiltinSearch: z.boolean().optional(),
96
98
  });
@@ -30,6 +30,7 @@ export interface ChatToolPayload {
30
30
  identifier: string;
31
31
  intervention?: ToolIntervention;
32
32
  result_msg_id?: string;
33
+ thoughtSignature?: string;
33
34
  type: LobeToolRenderType;
34
35
  }
35
36
 
@@ -84,6 +85,7 @@ export interface MessageToolCall {
84
85
  */
85
86
  id: string;
86
87
 
88
+ thoughtSignature?: string;
87
89
  /**
88
90
  * The type of the tool. Currently, only `function` is supported.
89
91
  */
@@ -108,6 +110,7 @@ export const ChatToolPayloadSchema = z.object({
108
110
  identifier: z.string(),
109
111
  intervention: ToolInterventionSchema.optional(),
110
112
  result_msg_id: z.string().optional(),
113
+ thoughtSignature: z.string().optional(),
111
114
  type: z.string(),
112
115
  });
113
116
 
@@ -17,6 +17,7 @@ import ReasoningEffortSlider from './ReasoningEffortSlider';
17
17
  import ReasoningTokenSlider from './ReasoningTokenSlider';
18
18
  import TextVerbositySlider from './TextVerbositySlider';
19
19
  import ThinkingBudgetSlider from './ThinkingBudgetSlider';
20
+ import ThinkingLevelSlider from './ThinkingLevelSlider';
20
21
  import ThinkingSlider from './ThinkingSlider';
21
22
 
22
23
  const ControlsForm = memo(() => {
@@ -177,6 +178,16 @@ const ControlsForm = memo(() => {
177
178
  paddingBottom: 0,
178
179
  },
179
180
  },
181
+ {
182
+ children: <ThinkingLevelSlider />,
183
+ label: t('extendParams.thinkingLevel.title'),
184
+ layout: 'horizontal',
185
+ minWidth: undefined,
186
+ name: 'thinkingLevel',
187
+ style: {
188
+ paddingBottom: 0,
189
+ },
190
+ },
180
191
  ].filter(Boolean) as FormItemProps[];
181
192
 
182
193
  return (
@@ -0,0 +1,56 @@
1
+ import { Slider } from 'antd';
2
+ import { memo, useCallback } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useAgentStore } from '@/store/agent';
6
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
7
+
8
+ const ThinkingLevelSlider = memo(() => {
9
+ const [config, updateAgentChatConfig] = useAgentStore((s) => [
10
+ agentChatConfigSelectors.currentChatConfig(s),
11
+ s.updateAgentChatConfig,
12
+ ]);
13
+
14
+ const thinkingLevel = config.thinkingLevel || 'high'; // Default to 'high' if not set
15
+
16
+ const marks = {
17
+ 0: 'low',
18
+ 1: 'high',
19
+ };
20
+
21
+ const levelValues = ['low', 'high'];
22
+ const indexValue = levelValues.indexOf(thinkingLevel);
23
+ const currentValue = indexValue === -1 ? 1 : indexValue;
24
+
25
+ const updateThinkingLevel = useCallback(
26
+ (value: number) => {
27
+ const level = levelValues[value] as 'low' | 'high';
28
+ updateAgentChatConfig({ thinkingLevel: level });
29
+ },
30
+ [updateAgentChatConfig],
31
+ );
32
+
33
+ return (
34
+ <Flexbox
35
+ align={'center'}
36
+ gap={12}
37
+ horizontal
38
+ paddingInline={'0 20px'}
39
+ style={{ minWidth: 130, width: '100%' }} // 三项时宽度需改回 200
40
+ >
41
+ <Flexbox flex={1}>
42
+ <Slider
43
+ marks={marks}
44
+ max={1}
45
+ min={0}
46
+ onChange={updateThinkingLevel}
47
+ step={1}
48
+ tooltip={{ open: false }}
49
+ value={currentValue}
50
+ />
51
+ </Flexbox>
52
+ </Flexbox>
53
+ );
54
+ });
55
+
56
+ export default ThinkingLevelSlider;
@@ -15,12 +15,13 @@ export interface ErrorContentProps {
15
15
 
16
16
  const ErrorContent = memo<ErrorContentProps>(({ error, id }) => {
17
17
  const { t } = useTranslation('common');
18
- const errorProps = useErrorContent(error);
19
18
 
20
19
  const [deleteMessage] = useChatStore((s) => [s.deleteDBMessage]);
21
20
  const message = <ErrorMessageExtra block data={{ error, id }} />;
22
21
 
23
- if (!error?.message) {
22
+ const errorProps = useErrorContent(error);
23
+
24
+ if (!errorProps?.message) {
24
25
  if (!message) return null;
25
26
  return <Flexbox>{message}</Flexbox>;
26
27
  }
@@ -30,10 +30,10 @@ const GroupItem = memo<GroupItemProps>(
30
30
  });
31
31
  }}
32
32
  >
33
- <ContentBlock index={index} {...item} />
33
+ <ContentBlock index={index} {...item} error={error} />
34
34
  </Flexbox>
35
35
  ) : (
36
- <ContentBlock index={index} {...item} />
36
+ <ContentBlock index={index} {...item} error={error} />
37
37
  );
38
38
  },
39
39
  isEqual,
@@ -66,6 +66,9 @@ export default {
66
66
  thinking: {
67
67
  title: '深度思考开关',
68
68
  },
69
+ thinkingLevel: {
70
+ title: '思考水平',
71
+ },
69
72
  title: '模型扩展功能',
70
73
  urlContext: {
71
74
  desc: '开启后将自动解析网页链接,以获取实际网页上下文内容',
@@ -196,6 +196,10 @@ class ChatService {
196
196
  extendParams.thinkingBudget = chatConfig.thinkingBudget;
197
197
  }
198
198
 
199
+ if (modelExtendParams!.includes('thinkingLevel') && chatConfig.thinkingLevel) {
200
+ extendParams.thinkingLevel = chatConfig.thinkingLevel;
201
+ }
202
+
199
203
  if (modelExtendParams!.includes('urlContext') && chatConfig.urlContext) {
200
204
  extendParams.urlContext = chatConfig.urlContext;
201
205
  }
@@ -76,7 +76,7 @@ export interface StreamingExecutorAction {
76
76
  tool_calls?: MessageToolCall[];
77
77
  content: string;
78
78
  traceId?: string;
79
- finishType?: 'done' | 'error' | 'abort';
79
+ finishType?: string;
80
80
  usage?: ModelUsage;
81
81
  }>;
82
82
  /**
@@ -283,13 +283,13 @@ export const streamingExecutor: StateCreator<
283
283
  let thinkingStartAt: number;
284
284
  let duration: number | undefined;
285
285
  let reasoningOperationId: string | undefined;
286
- let finishType: 'done' | 'error' | 'abort' | undefined;
286
+ let finishType: string | undefined;
287
287
  // to upload image
288
288
  const uploadTasks: Map<string, Promise<{ id?: string; url?: string }>> = new Map();
289
289
 
290
290
  // Throttle tool_calls updates to prevent excessive re-renders (max once per 300ms)
291
291
  const throttledUpdateToolCalls = throttle(
292
- (toolCalls: any[]) => {
292
+ (toolCalls: MessageToolCall[]) => {
293
293
  internal_dispatchMessage(
294
294
  {
295
295
  id: messageId,
@@ -366,7 +366,6 @@ export const streamingExecutor: StateCreator<
366
366
  throttledUpdateToolCalls.flush();
367
367
  internal_toggleToolCallingStreaming(messageId, undefined);
368
368
 
369
- tools = get().internal_transformToolCalls(parsedToolCalls);
370
369
  tool_calls = toolCalls;
371
370
 
372
371
  parsedToolCalls = parsedToolCalls.map((item) => ({
@@ -377,6 +376,8 @@ export const streamingExecutor: StateCreator<
377
376
  },
378
377
  }));
379
378
 
379
+ tools = get().internal_transformToolCalls(parsedToolCalls);
380
+
380
381
  isFunctionCall = true;
381
382
  }
382
383
 
@@ -395,7 +396,7 @@ export const streamingExecutor: StateCreator<
395
396
  messageId,
396
397
  content,
397
398
  {
398
- toolCalls: parsedToolCalls,
399
+ tools,
399
400
  reasoning: !!reasoning
400
401
  ? { ...reasoning, duration: duration && !isNaN(duration) ? duration : undefined }
401
402
  : undefined,
@@ -3,11 +3,11 @@ import {
3
3
  ChatImageItem,
4
4
  ChatMessageError,
5
5
  ChatMessagePluginError,
6
+ ChatToolPayload,
6
7
  CreateMessageParams,
7
8
  GroundingSearch,
8
9
  MessageMetadata,
9
10
  MessagePluginItem,
10
- MessageToolCall,
11
11
  ModelReasoning,
12
12
  UIChatMessage,
13
13
  UpdateMessageRAGParams,
@@ -69,7 +69,7 @@ export interface MessageOptimisticUpdateAction {
69
69
  provider?: string;
70
70
  reasoning?: ModelReasoning;
71
71
  search?: GroundingSearch;
72
- toolCalls?: MessageToolCall[];
72
+ tools?: ChatToolPayload[];
73
73
  },
74
74
  context?: OptimisticUpdateContext,
75
75
  ) => Promise<void>;
@@ -204,22 +204,17 @@ export const messageOptimisticUpdate: StateCreator<
204
204
  },
205
205
 
206
206
  optimisticUpdateMessageContent: async (id, content, extra, context) => {
207
- const {
208
- internal_dispatchMessage,
209
- refreshMessages,
210
- internal_transformToolCalls,
211
- replaceMessages,
212
- } = get();
207
+ const { internal_dispatchMessage, refreshMessages, replaceMessages } = get();
213
208
 
214
209
  // Due to the async update method and refresh need about 100ms
215
210
  // we need to update the message content at the frontend to avoid the update flick
216
211
  // refs: https://medium.com/@kyledeguzmanx/what-are-optimistic-updates-483662c3e171
217
- if (extra?.toolCalls) {
212
+ if (extra?.tools) {
218
213
  internal_dispatchMessage(
219
214
  {
220
215
  id,
221
216
  type: 'updateMessage',
222
- value: { tools: internal_transformToolCalls(extra?.toolCalls) },
217
+ value: { tools: extra?.tools },
223
218
  },
224
219
  context,
225
220
  );
@@ -246,7 +241,7 @@ export const messageOptimisticUpdate: StateCreator<
246
241
  provider: extra?.provider,
247
242
  reasoning: extra?.reasoning,
248
243
  search: extra?.search,
249
- tools: extra?.toolCalls ? internal_transformToolCalls(extra?.toolCalls) : undefined,
244
+ tools: extra?.tools,
250
245
  },
251
246
  { sessionId, topicId },
252
247
  );
@@ -1,6 +1,6 @@
1
1
  /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
2
2
  import { ToolNameResolver } from '@lobechat/context-engine';
3
- import { MessageToolCall, ToolsCallingContext } from '@lobechat/types';
3
+ import { ChatToolPayload, MessageToolCall, ToolsCallingContext } from '@lobechat/types';
4
4
  import { LobeChatPluginManifest } from '@lobehub/chat-plugin-sdk';
5
5
  import { StateCreator } from 'zustand/vanilla';
6
6
 
@@ -19,7 +19,7 @@ export interface PluginInternalsAction {
19
19
  /**
20
20
  * Transform tool calls from runtime format to storage format
21
21
  */
22
- internal_transformToolCalls: (toolCalls: MessageToolCall[]) => any[];
22
+ internal_transformToolCalls: (toolCalls: MessageToolCall[]) => ChatToolPayload[];
23
23
 
24
24
  /**
25
25
  * Construct tools calling context for plugin invocation