@lobehub/lobehub 2.0.0-next.87 → 2.0.0-next.88

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.88](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.87...v2.0.0-next.88)
6
+
7
+ <sup>Released on **2025-11-19**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Fully support Gemini 3.0 model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Fully support Gemini 3.0 model, closes [#10292](https://github.com/lobehub/lobe-chat/issues/10292) ([6545ef8](https://github.com/lobehub/lobe-chat/commit/6545ef8))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 2.0.0-next.87](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.86...v2.0.0-next.87)
6
31
 
7
32
  <sup>Released on **2025-11-19**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Fully support Gemini 3.0 model."
6
+ ]
7
+ },
8
+ "date": "2025-11-19",
9
+ "version": "2.0.0-next.88"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.87",
3
+ "version": "2.0.0-next.88",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -986,6 +986,66 @@ const aihubmixModels: AIChatModelCard[] = [
986
986
  },
987
987
  type: 'chat',
988
988
  },
989
+ {
990
+ abilities: {
991
+ functionCall: true,
992
+ reasoning: true,
993
+ search: true,
994
+ video: true,
995
+ vision: true,
996
+ },
997
+ contextWindowTokens: 1_048_576 + 65_536,
998
+ description:
999
+ 'Gemini 3 Pro 是 Google 最智能的模型,具有 SOTA 推理和多模式理解,以及强大的代理和氛围编码功能。',
1000
+ displayName: 'Gemini 3 Pro Preview',
1001
+ enabled: true,
1002
+ id: 'gemini-3-pro-preview',
1003
+ maxOutput: 65_536,
1004
+ pricing: {
1005
+ units: [
1006
+ {
1007
+ name: 'textInput_cacheRead',
1008
+ strategy: 'tiered',
1009
+ tiers: [
1010
+ { rate: 0.2, upTo: 200_000 },
1011
+ { rate: 0.4, upTo: 'infinity' },
1012
+ ],
1013
+ unit: 'millionTokens',
1014
+ },
1015
+ {
1016
+ name: 'textInput',
1017
+ strategy: 'tiered',
1018
+ tiers: [
1019
+ { rate: 2, upTo: 200_000 },
1020
+ { rate: 4, upTo: 'infinity' },
1021
+ ],
1022
+ unit: 'millionTokens',
1023
+ },
1024
+ {
1025
+ name: 'textOutput',
1026
+ strategy: 'tiered',
1027
+ tiers: [
1028
+ { rate: 12, upTo: 200_000 },
1029
+ { rate: 18, upTo: 'infinity' },
1030
+ ],
1031
+ unit: 'millionTokens',
1032
+ },
1033
+ {
1034
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
1035
+ name: 'textInput_cacheWrite',
1036
+ strategy: 'lookup',
1037
+ unit: 'millionTokens',
1038
+ },
1039
+ ],
1040
+ },
1041
+ releasedAt: '2025-11-18',
1042
+ settings: {
1043
+ extendParams: ['thinkingLevel', 'urlContext'],
1044
+ searchImpl: 'params',
1045
+ searchProvider: 'google',
1046
+ },
1047
+ type: 'chat',
1048
+ },
989
1049
  {
990
1050
  abilities: {
991
1051
  functionCall: true,
@@ -998,7 +1058,6 @@ const aihubmixModels: AIChatModelCard[] = [
998
1058
  description:
999
1059
  'Gemini 2.5 Pro 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
1000
1060
  displayName: 'Gemini 2.5 Pro',
1001
- enabled: true,
1002
1061
  id: 'gemini-2.5-pro',
1003
1062
  maxOutput: 65_536,
1004
1063
  pricing: {
@@ -123,8 +123,8 @@ const googleChatModels: AIChatModelCard[] = [
123
123
  },
124
124
  contextWindowTokens: 1_048_576 + 65_536,
125
125
  description:
126
- 'Gemini 3.0 Pro Preview 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
127
- displayName: 'Gemini 3.0 Pro Preview',
126
+ 'Gemini 3 Pro 是 全球最佳的多模态理解模型,也是 Google 迄今为止最强大的智能体和氛围编程模型,提供更丰富的视觉效果和更深层次的交互性,所有这些都建立在最先进的推理能力基础之上。',
127
+ displayName: 'Gemini 3 Pro Preview',
128
128
  enabled: true,
129
129
  id: 'gemini-3-pro-preview',
130
130
  maxOutput: 65_536,
@@ -134,8 +134,8 @@ const googleChatModels: AIChatModelCard[] = [
134
134
  name: 'textInput_cacheRead',
135
135
  strategy: 'tiered',
136
136
  tiers: [
137
- { rate: 0.20, upTo: 200_000 },
138
- { rate: 0.40, upTo: 'infinity' },
137
+ { rate: 0.2, upTo: 200_000 },
138
+ { rate: 0.4, upTo: 'infinity' },
139
139
  ],
140
140
  unit: 'millionTokens',
141
141
  },
@@ -143,8 +143,8 @@ const googleChatModels: AIChatModelCard[] = [
143
143
  name: 'textInput',
144
144
  strategy: 'tiered',
145
145
  tiers: [
146
- { rate: 2.0, upTo: 200_000 },
147
- { rate: 4.0, upTo: 'infinity' },
146
+ { rate: 2, upTo: 200_000 },
147
+ { rate: 4, upTo: 'infinity' },
148
148
  ],
149
149
  unit: 'millionTokens',
150
150
  },
@@ -152,16 +152,22 @@ const googleChatModels: AIChatModelCard[] = [
152
152
  name: 'textOutput',
153
153
  strategy: 'tiered',
154
154
  tiers: [
155
- { rate: 12.0, upTo: 200_000 },
156
- { rate: 18.0, upTo: 'infinity' },
155
+ { rate: 12, upTo: 200_000 },
156
+ { rate: 18, upTo: 'infinity' },
157
157
  ],
158
158
  unit: 'millionTokens',
159
159
  },
160
+ {
161
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
162
+ name: 'textInput_cacheWrite',
163
+ strategy: 'lookup',
164
+ unit: 'millionTokens',
165
+ },
160
166
  ],
161
167
  },
162
168
  releasedAt: '2025-11-18',
163
169
  settings: {
164
- extendParams: ['thinkingBudget', 'urlContext'],
170
+ extendParams: ['thinkingLevel', 'urlContext'],
165
171
  searchImpl: 'params',
166
172
  searchProvider: 'google',
167
173
  },
@@ -211,6 +217,12 @@ const googleChatModels: AIChatModelCard[] = [
211
217
  ],
212
218
  unit: 'millionTokens',
213
219
  },
220
+ {
221
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
222
+ name: 'textInput_cacheWrite',
223
+ strategy: 'lookup',
224
+ unit: 'millionTokens',
225
+ },
214
226
  ],
215
227
  },
216
228
  releasedAt: '2025-06-17',
@@ -383,34 +395,6 @@ const googleChatModels: AIChatModelCard[] = [
383
395
  },
384
396
  type: 'chat',
385
397
  },
386
- {
387
- abilities: {
388
- functionCall: true,
389
- reasoning: true,
390
- search: true,
391
- video: true,
392
- vision: true,
393
- },
394
- contextWindowTokens: 1_048_576 + 65_536,
395
- description: 'Gemini 2.5 Flash Preview 是 Google 性价比最高的模型,提供全面的功能。',
396
- displayName: 'Gemini 2.5 Flash Preview 05-20',
397
- id: 'gemini-2.5-flash-preview-05-20',
398
- maxOutput: 65_536,
399
- pricing: {
400
- units: [
401
- { name: 'textInput_cacheRead', rate: 0.0375, strategy: 'fixed', unit: 'millionTokens' },
402
- { name: 'textInput', rate: 0.15, strategy: 'fixed', unit: 'millionTokens' },
403
- { name: 'textOutput', rate: 3.5, strategy: 'fixed', unit: 'millionTokens' },
404
- ],
405
- },
406
- releasedAt: '2025-05-20',
407
- settings: {
408
- extendParams: ['thinkingBudget', 'urlContext'],
409
- searchImpl: 'params',
410
- searchProvider: 'google',
411
- },
412
- type: 'chat',
413
- },
414
398
  {
415
399
  abilities: {
416
400
  imageOutput: true,
@@ -514,35 +498,6 @@ const googleChatModels: AIChatModelCard[] = [
514
498
  },
515
499
  type: 'chat',
516
500
  },
517
- {
518
- abilities: {
519
- functionCall: true,
520
- reasoning: true,
521
- search: true,
522
- video: true,
523
- vision: true,
524
- },
525
- contextWindowTokens: 1_048_576 + 65_536,
526
- description:
527
- 'Gemini 2.5 Flash-Lite Preview 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
528
- displayName: 'Gemini 2.5 Flash-Lite Preview 06-17',
529
- id: 'gemini-2.5-flash-lite-preview-06-17',
530
- maxOutput: 65_536,
531
- pricing: {
532
- units: [
533
- { name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
534
- { name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
535
- { name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
536
- ],
537
- },
538
- releasedAt: '2025-06-11',
539
- settings: {
540
- extendParams: ['thinkingBudget', 'urlContext'],
541
- searchImpl: 'params',
542
- searchProvider: 'google',
543
- },
544
- type: 'chat',
545
- },
546
501
  {
547
502
  abilities: {
548
503
  functionCall: true,
@@ -597,26 +552,6 @@ const googleChatModels: AIChatModelCard[] = [
597
552
  },
598
553
  type: 'chat',
599
554
  },
600
- {
601
- abilities: {
602
- imageOutput: true,
603
- vision: true,
604
- },
605
- contextWindowTokens: 32_768 + 8192,
606
- description: 'Gemini 2.0 Flash 预览模型,支持图像生成',
607
- displayName: 'Gemini 2.0 Flash Preview Image Generation',
608
- id: 'gemini-2.0-flash-preview-image-generation',
609
- maxOutput: 8192,
610
- pricing: {
611
- units: [
612
- { name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
613
- { name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
614
- { name: 'imageGeneration', rate: 0.039, strategy: 'fixed', unit: 'image' },
615
- ],
616
- },
617
- releasedAt: '2025-05-07',
618
- type: 'chat',
619
- },
620
555
  {
621
556
  abilities: {
622
557
  imageOutput: true,
@@ -5,12 +5,14 @@ const ollamaCloudModels: AIChatModelCard[] = [
5
5
  abilities: {
6
6
  functionCall: true,
7
7
  reasoning: true,
8
+ vision: true,
8
9
  },
9
- contextWindowTokens: 200_000,
10
- description: 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
11
- displayName: 'MiniMax M2',
10
+ contextWindowTokens: 1_048_576,
11
+ description:
12
+ 'Gemini 3 Pro 是 Google 最智能的模型,具有 SOTA 推理和多模式理解,以及强大的代理和氛围编码功能。',
13
+ displayName: 'Gemini 3 Pro Preview',
12
14
  enabled: true,
13
- id: 'minimax-m2',
15
+ id: 'gemini-3-pro-preview',
14
16
  type: 'chat',
15
17
  },
16
18
  {
@@ -19,8 +21,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
19
21
  reasoning: true,
20
22
  },
21
23
  contextWindowTokens: 200_000,
22
- description:
23
- 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
24
+ description: 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
24
25
  displayName: 'MiniMax M2',
25
26
  enabled: true,
26
27
  id: 'minimax-m2',
@@ -242,6 +242,7 @@ export type ExtendParamsType =
242
242
  | 'textVerbosity'
243
243
  | 'thinking'
244
244
  | 'thinkingBudget'
245
+ | 'thinkingLevel'
245
246
  | 'urlContext';
246
247
 
247
248
  export interface AiModelSettings {
@@ -194,7 +194,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
194
194
  async chat(rawPayload: ChatStreamPayload, options?: ChatMethodOptions) {
195
195
  try {
196
196
  const payload = this.buildPayload(rawPayload);
197
- const { model, thinkingBudget } = payload;
197
+ const { model, thinkingBudget, thinkingLevel } = payload;
198
198
 
199
199
  // https://ai.google.dev/gemini-api/docs/thinking#set-budget
200
200
  const resolvedThinkingBudget = resolveModelThinkingBudget(model, thinkingBudget);
@@ -209,6 +209,11 @@ export class LobeGoogleAI implements LobeRuntimeAI {
209
209
  thinkingBudget: resolvedThinkingBudget,
210
210
  };
211
211
 
212
+ // Add thinkingLevel for 3.0 models
213
+ if (model?.toLowerCase().includes('-3-') && thinkingLevel) {
214
+ (thinkingConfig as any).thinkingLevel = thinkingLevel;
215
+ }
216
+
212
217
  const contents = await buildGoogleMessages(payload.messages);
213
218
 
214
219
  const controller = new AbortController();
@@ -124,6 +124,10 @@ export interface ChatStreamPayload {
124
124
  type: 'enabled' | 'disabled';
125
125
  };
126
126
  thinkingBudget?: number;
127
+ /**
128
+ * Thinking level for Gemini models (e.g., gemini-3.0-pro)
129
+ */
130
+ thinkingLevel?: 'low' | 'high';
127
131
  tool_choice?: string;
128
132
  tools?: ChatCompletionTool[];
129
133
  /**
@@ -38,6 +38,7 @@ export interface LobeAgentChatConfig {
38
38
  */
39
39
  textVerbosity?: 'low' | 'medium' | 'high';
40
40
  thinking?: 'disabled' | 'auto' | 'enabled';
41
+ thinkingLevel?: 'low' | 'high';
41
42
  thinkingBudget?: number;
42
43
  /**
43
44
  * Disable context caching
@@ -91,6 +92,7 @@ export const AgentChatConfigSchema = z.object({
91
92
  textVerbosity: z.enum(['low', 'medium', 'high']).optional(),
92
93
  thinking: z.enum(['disabled', 'auto', 'enabled']).optional(),
93
94
  thinkingBudget: z.number().optional(),
95
+ thinkingLevel: z.enum(['low', 'high']).optional(),
94
96
  urlContext: z.boolean().optional(),
95
97
  useModelBuiltinSearch: z.boolean().optional(),
96
98
  });
@@ -17,6 +17,7 @@ import ReasoningEffortSlider from './ReasoningEffortSlider';
17
17
  import ReasoningTokenSlider from './ReasoningTokenSlider';
18
18
  import TextVerbositySlider from './TextVerbositySlider';
19
19
  import ThinkingBudgetSlider from './ThinkingBudgetSlider';
20
+ import ThinkingLevelSlider from './ThinkingLevelSlider';
20
21
  import ThinkingSlider from './ThinkingSlider';
21
22
 
22
23
  const ControlsForm = memo(() => {
@@ -177,6 +178,16 @@ const ControlsForm = memo(() => {
177
178
  paddingBottom: 0,
178
179
  },
179
180
  },
181
+ {
182
+ children: <ThinkingLevelSlider />,
183
+ label: t('extendParams.thinkingLevel.title'),
184
+ layout: 'horizontal',
185
+ minWidth: undefined,
186
+ name: 'thinkingLevel',
187
+ style: {
188
+ paddingBottom: 0,
189
+ },
190
+ },
180
191
  ].filter(Boolean) as FormItemProps[];
181
192
 
182
193
  return (
@@ -0,0 +1,56 @@
1
+ import { Slider } from 'antd';
2
+ import { memo, useCallback } from 'react';
3
+ import { Flexbox } from 'react-layout-kit';
4
+
5
+ import { useAgentStore } from '@/store/agent';
6
+ import { agentChatConfigSelectors } from '@/store/agent/selectors';
7
+
8
+ const ThinkingLevelSlider = memo(() => {
9
+ const [config, updateAgentChatConfig] = useAgentStore((s) => [
10
+ agentChatConfigSelectors.currentChatConfig(s),
11
+ s.updateAgentChatConfig,
12
+ ]);
13
+
14
+ const thinkingLevel = config.thinkingLevel || 'high'; // Default to 'high' if not set
15
+
16
+ const marks = {
17
+ 0: 'low',
18
+ 1: 'high',
19
+ };
20
+
21
+ const levelValues = ['low', 'high'];
22
+ const indexValue = levelValues.indexOf(thinkingLevel);
23
+ const currentValue = indexValue === -1 ? 1 : indexValue;
24
+
25
+ const updateThinkingLevel = useCallback(
26
+ (value: number) => {
27
+ const level = levelValues[value] as 'low' | 'high';
28
+ updateAgentChatConfig({ thinkingLevel: level });
29
+ },
30
+ [updateAgentChatConfig],
31
+ );
32
+
33
+ return (
34
+ <Flexbox
35
+ align={'center'}
36
+ gap={12}
37
+ horizontal
38
+ paddingInline={'0 20px'}
39
+ style={{ minWidth: 130, width: '100%' }} // 三项时宽度需改回 200
40
+ >
41
+ <Flexbox flex={1}>
42
+ <Slider
43
+ marks={marks}
44
+ max={1}
45
+ min={0}
46
+ onChange={updateThinkingLevel}
47
+ step={1}
48
+ tooltip={{ open: false }}
49
+ value={currentValue}
50
+ />
51
+ </Flexbox>
52
+ </Flexbox>
53
+ );
54
+ });
55
+
56
+ export default ThinkingLevelSlider;
@@ -66,6 +66,9 @@ export default {
66
66
  thinking: {
67
67
  title: '深度思考开关',
68
68
  },
69
+ thinkingLevel: {
70
+ title: '思考水平',
71
+ },
69
72
  title: '模型扩展功能',
70
73
  urlContext: {
71
74
  desc: '开启后将自动解析网页链接,以获取实际网页上下文内容',
@@ -196,6 +196,10 @@ class ChatService {
196
196
  extendParams.thinkingBudget = chatConfig.thinkingBudget;
197
197
  }
198
198
 
199
+ if (modelExtendParams!.includes('thinkingLevel') && chatConfig.thinkingLevel) {
200
+ extendParams.thinkingLevel = chatConfig.thinkingLevel;
201
+ }
202
+
199
203
  if (modelExtendParams!.includes('urlContext') && chatConfig.urlContext) {
200
204
  extendParams.urlContext = chatConfig.urlContext;
201
205
  }