@lobehub/lobehub 2.0.0-next.87 → 2.0.0-next.89

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/next.config.ts +0 -1
  4. package/package.json +2 -2
  5. package/packages/context-engine/src/processors/ToolCall.ts +1 -0
  6. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +59 -0
  7. package/packages/context-engine/src/tools/ToolNameResolver.ts +1 -0
  8. package/packages/context-engine/src/tools/__tests__/ToolNameResolver.test.ts +57 -0
  9. package/packages/context-engine/src/types.ts +1 -0
  10. package/packages/fetch-sse/src/fetchSSE.ts +12 -2
  11. package/packages/model-bank/src/aiModels/aihubmix.ts +60 -1
  12. package/packages/model-bank/src/aiModels/google.ts +21 -86
  13. package/packages/model-bank/src/aiModels/ollamacloud.ts +7 -6
  14. package/packages/model-bank/src/types/aiModel.ts +1 -0
  15. package/packages/model-runtime/src/core/contextBuilders/google.test.ts +479 -0
  16. package/packages/model-runtime/src/core/contextBuilders/google.ts +44 -1
  17. package/packages/model-runtime/src/core/streams/google/google-ai.test.ts +1115 -814
  18. package/packages/model-runtime/src/core/streams/google/index.ts +19 -5
  19. package/packages/model-runtime/src/core/streams/protocol.ts +1 -0
  20. package/packages/model-runtime/src/providers/google/index.test.ts +1 -1
  21. package/packages/model-runtime/src/providers/google/index.ts +17 -10
  22. package/packages/model-runtime/src/types/chat.ts +4 -0
  23. package/packages/model-runtime/src/types/toolsCalling.ts +3 -1
  24. package/packages/types/src/agent/chatConfig.ts +2 -0
  25. package/packages/types/src/message/common/tools.ts +3 -0
  26. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
  27. package/src/features/ChatInput/ActionBar/Model/ThinkingLevelSlider.tsx +56 -0
  28. package/src/features/Conversation/Messages/Group/Error/index.tsx +3 -2
  29. package/src/features/Conversation/Messages/Group/GroupItem.tsx +2 -2
  30. package/src/locales/default/chat.ts +3 -0
  31. package/src/services/chat/index.ts +4 -0
  32. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +6 -5
  33. package/src/store/chat/slices/message/actions/optimisticUpdate.ts +6 -11
  34. package/src/store/chat/slices/plugin/actions/internals.ts +2 -2
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.89](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.88...v2.0.0-next.89)
6
+
7
+ <sup>Released on **2025-11-19**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support gemini 3.0 tools calling.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support gemini 3.0 tools calling, closes [#10301](https://github.com/lobehub/lobe-chat/issues/10301) ([7114fc1](https://github.com/lobehub/lobe-chat/commit/7114fc1))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ## [Version 2.0.0-next.88](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.87...v2.0.0-next.88)
31
+
32
+ <sup>Released on **2025-11-19**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Fully support Gemini 3.0 model.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Fully support Gemini 3.0 model, closes [#10292](https://github.com/lobehub/lobe-chat/issues/10292) ([6545ef8](https://github.com/lobehub/lobe-chat/commit/6545ef8))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ## [Version 2.0.0-next.87](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.86...v2.0.0-next.87)
6
56
 
7
57
  <sup>Released on **2025-11-19**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support gemini 3.0 tools calling."
6
+ ]
7
+ },
8
+ "date": "2025-11-19",
9
+ "version": "2.0.0-next.89"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Fully support Gemini 3.0 model."
15
+ ]
16
+ },
17
+ "date": "2025-11-19",
18
+ "version": "2.0.0-next.88"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
package/next.config.ts CHANGED
@@ -42,7 +42,6 @@ const nextConfig: NextConfig = {
42
42
  // so we need to disable it
43
43
  // refs: https://github.com/lobehub/lobe-chat/pull/7430
44
44
  serverMinification: false,
45
- turbopackFileSystemCacheForDev: true,
46
45
  webVitalsAttribution: ['CLS', 'LCP'],
47
46
  webpackBuildWorker: true,
48
47
  webpackMemoryOptimizations: true,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.87",
3
+ "version": "2.0.0-next.89",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -141,7 +141,7 @@
141
141
  "@emotion/react": "^11.14.0",
142
142
  "@fal-ai/client": "^1.7.2",
143
143
  "@formkit/auto-animate": "^0.9.0",
144
- "@google/genai": "^1.29.1",
144
+ "@google/genai": "^1.30.0",
145
145
  "@huggingface/inference": "^4.13.3",
146
146
  "@icons-pack/react-simple-icons": "^13.8.0",
147
147
  "@khmyznikov/pwa-install": "0.3.9",
@@ -130,6 +130,7 @@ export class ToolCallProcessor extends BaseProcessor {
130
130
  : `${tool.identifier}.${tool.apiName}`,
131
131
  },
132
132
  id: tool.id,
133
+ thoughtSignature: tool.thoughtSignature,
133
134
  type: 'function',
134
135
  }),
135
136
  );
@@ -72,6 +72,65 @@ describe('ToolCallProcessor', () => {
72
72
  ]);
73
73
  });
74
74
 
75
+ it('should pass through thoughtSignature when present', async () => {
76
+ const processor = new ToolCallProcessor(defaultConfig);
77
+ const context = createContext([
78
+ {
79
+ content: '',
80
+ id: 'msg1',
81
+ role: 'assistant',
82
+ tools: [
83
+ {
84
+ apiName: 'search',
85
+ arguments: '{"query":"test"}',
86
+ id: 'call_1',
87
+ identifier: 'web',
88
+ thoughtSignature: 'Let me search for this information',
89
+ type: 'builtin',
90
+ },
91
+ ],
92
+ },
93
+ ]);
94
+
95
+ const result = await processor.process(context);
96
+
97
+ expect(result.messages[0].tool_calls).toEqual([
98
+ {
99
+ function: {
100
+ arguments: '{"query":"test"}',
101
+ name: 'web.search',
102
+ },
103
+ id: 'call_1',
104
+ thoughtSignature: 'Let me search for this information',
105
+ type: 'function',
106
+ },
107
+ ]);
108
+ });
109
+
110
+ it('should handle missing thoughtSignature', async () => {
111
+ const processor = new ToolCallProcessor(defaultConfig);
112
+ const context = createContext([
113
+ {
114
+ content: '',
115
+ id: 'msg1',
116
+ role: 'assistant',
117
+ tools: [
118
+ {
119
+ apiName: 'search',
120
+ arguments: '{"query":"test"}',
121
+ id: 'call_1',
122
+ identifier: 'web',
123
+ type: 'builtin',
124
+ },
125
+ ],
126
+ },
127
+ ]);
128
+
129
+ const result = await processor.process(context);
130
+
131
+ expect(result.messages[0].tool_calls[0].thoughtSignature).toBeUndefined();
132
+ });
133
+
75
134
  it('should use custom genToolCallingName function', async () => {
76
135
  const genToolCallingName = vi.fn(
77
136
  (identifier, apiName, type) => `custom_${identifier}_${apiName}_${type}`,
@@ -82,6 +82,7 @@ export class ToolNameResolver {
82
82
  arguments: toolCall.function.arguments,
83
83
  id: toolCall.id,
84
84
  identifier,
85
+ thoughtSignature: toolCall.thoughtSignature,
85
86
  type: (type ?? 'default') as any,
86
87
  };
87
88
 
@@ -455,6 +455,63 @@ describe('ToolNameResolver', () => {
455
455
  });
456
456
  });
457
457
 
458
+ describe('resolve - thoughtSignature', () => {
459
+ it('should pass through thoughtSignature when present', () => {
460
+ const toolCalls = [
461
+ {
462
+ function: {
463
+ arguments: '{"query": "test"}',
464
+ name: 'test-plugin____myAction____builtin',
465
+ },
466
+ id: 'call_1',
467
+ thoughtSignature: 'thinking about this...',
468
+ type: 'function',
469
+ },
470
+ ];
471
+
472
+ const manifests = {
473
+ 'test-plugin': {
474
+ api: [{ description: 'My action', name: 'myAction', parameters: {} }],
475
+ identifier: 'test-plugin',
476
+ meta: {},
477
+ type: 'builtin' as const,
478
+ },
479
+ };
480
+
481
+ const result = resolver.resolve(toolCalls, manifests);
482
+
483
+ expect(result).toHaveLength(1);
484
+ expect(result[0].thoughtSignature).toBe('thinking about this...');
485
+ });
486
+
487
+ it('should handle missing thoughtSignature', () => {
488
+ const toolCalls = [
489
+ {
490
+ function: {
491
+ arguments: '{"query": "test"}',
492
+ name: 'test-plugin____myAction____builtin',
493
+ },
494
+ id: 'call_1',
495
+ type: 'function',
496
+ },
497
+ ];
498
+
499
+ const manifests = {
500
+ 'test-plugin': {
501
+ api: [{ description: 'My action', name: 'myAction', parameters: {} }],
502
+ identifier: 'test-plugin',
503
+ meta: {},
504
+ type: 'builtin' as const,
505
+ },
506
+ };
507
+
508
+ const result = resolver.resolve(toolCalls, manifests);
509
+
510
+ expect(result).toHaveLength(1);
511
+ expect(result[0].thoughtSignature).toBeUndefined();
512
+ });
513
+ });
514
+
458
515
  describe('resolve - edge cases', () => {
459
516
  it('should filter out invalid tool calls with missing apiName', () => {
460
517
  const toolCalls = [
@@ -30,6 +30,7 @@ export interface MessageToolCall {
30
30
  name: string;
31
31
  };
32
32
  id: string;
33
+ thoughtSignature?: string;
33
34
  type: 'function';
34
35
  }
35
36
  export interface Message {
@@ -17,7 +17,7 @@ import { nanoid } from '@lobechat/utils/uuid';
17
17
 
18
18
  import { getMessageError } from './parseError';
19
19
 
20
- type SSEFinishType = 'done' | 'error' | 'abort';
20
+ type SSEFinishType = 'done' | 'error' | 'abort' | string;
21
21
 
22
22
  export type OnFinishHandler = (
23
23
  text: string,
@@ -48,6 +48,10 @@ export interface MessageTextChunk {
48
48
  text: string;
49
49
  type: 'text';
50
50
  }
51
+ export interface MessageStopChunk {
52
+ reason: string;
53
+ type: 'stop';
54
+ }
51
55
 
52
56
  export interface MessageBase64ImageChunk {
53
57
  id: string;
@@ -86,7 +90,8 @@ export interface FetchSSEOptions {
86
90
  | MessageGroundingChunk
87
91
  | MessageUsageChunk
88
92
  | MessageBase64ImageChunk
89
- | MessageSpeedChunk,
93
+ | MessageSpeedChunk
94
+ | MessageStopChunk,
90
95
  ) => void;
91
96
  responseAnimation?: ResponseAnimation;
92
97
  }
@@ -387,6 +392,11 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
387
392
  break;
388
393
  }
389
394
 
395
+ case 'stop': {
396
+ options.onMessageHandle?.({ reason: data, type: 'stop' });
397
+ break;
398
+ }
399
+
390
400
  case 'reasoning': {
391
401
  if (textSmoothing) {
392
402
  thinkingController.pushToQueue(data);
@@ -986,6 +986,66 @@ const aihubmixModels: AIChatModelCard[] = [
986
986
  },
987
987
  type: 'chat',
988
988
  },
989
+ {
990
+ abilities: {
991
+ functionCall: true,
992
+ reasoning: true,
993
+ search: true,
994
+ video: true,
995
+ vision: true,
996
+ },
997
+ contextWindowTokens: 1_048_576 + 65_536,
998
+ description:
999
+ 'Gemini 3 Pro 是 Google 最智能的模型,具有 SOTA 推理和多模式理解,以及强大的代理和氛围编码功能。',
1000
+ displayName: 'Gemini 3 Pro Preview',
1001
+ enabled: true,
1002
+ id: 'gemini-3-pro-preview',
1003
+ maxOutput: 65_536,
1004
+ pricing: {
1005
+ units: [
1006
+ {
1007
+ name: 'textInput_cacheRead',
1008
+ strategy: 'tiered',
1009
+ tiers: [
1010
+ { rate: 0.2, upTo: 200_000 },
1011
+ { rate: 0.4, upTo: 'infinity' },
1012
+ ],
1013
+ unit: 'millionTokens',
1014
+ },
1015
+ {
1016
+ name: 'textInput',
1017
+ strategy: 'tiered',
1018
+ tiers: [
1019
+ { rate: 2, upTo: 200_000 },
1020
+ { rate: 4, upTo: 'infinity' },
1021
+ ],
1022
+ unit: 'millionTokens',
1023
+ },
1024
+ {
1025
+ name: 'textOutput',
1026
+ strategy: 'tiered',
1027
+ tiers: [
1028
+ { rate: 12, upTo: 200_000 },
1029
+ { rate: 18, upTo: 'infinity' },
1030
+ ],
1031
+ unit: 'millionTokens',
1032
+ },
1033
+ {
1034
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
1035
+ name: 'textInput_cacheWrite',
1036
+ strategy: 'lookup',
1037
+ unit: 'millionTokens',
1038
+ },
1039
+ ],
1040
+ },
1041
+ releasedAt: '2025-11-18',
1042
+ settings: {
1043
+ extendParams: ['thinkingLevel', 'urlContext'],
1044
+ searchImpl: 'params',
1045
+ searchProvider: 'google',
1046
+ },
1047
+ type: 'chat',
1048
+ },
989
1049
  {
990
1050
  abilities: {
991
1051
  functionCall: true,
@@ -998,7 +1058,6 @@ const aihubmixModels: AIChatModelCard[] = [
998
1058
  description:
999
1059
  'Gemini 2.5 Pro 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
1000
1060
  displayName: 'Gemini 2.5 Pro',
1001
- enabled: true,
1002
1061
  id: 'gemini-2.5-pro',
1003
1062
  maxOutput: 65_536,
1004
1063
  pricing: {
@@ -123,8 +123,8 @@ const googleChatModels: AIChatModelCard[] = [
123
123
  },
124
124
  contextWindowTokens: 1_048_576 + 65_536,
125
125
  description:
126
- 'Gemini 3.0 Pro Preview 是 Google 最先进的思维模型,能够对代码、数学和STEM领域的复杂问题进行推理,以及使用长上下文分析大型数据集、代码库和文档。',
127
- displayName: 'Gemini 3.0 Pro Preview',
126
+ 'Gemini 3 Pro 是 全球最佳的多模态理解模型,也是 Google 迄今为止最强大的智能体和氛围编程模型,提供更丰富的视觉效果和更深层次的交互性,所有这些都建立在最先进的推理能力基础之上。',
127
+ displayName: 'Gemini 3 Pro Preview',
128
128
  enabled: true,
129
129
  id: 'gemini-3-pro-preview',
130
130
  maxOutput: 65_536,
@@ -134,8 +134,8 @@ const googleChatModels: AIChatModelCard[] = [
134
134
  name: 'textInput_cacheRead',
135
135
  strategy: 'tiered',
136
136
  tiers: [
137
- { rate: 0.20, upTo: 200_000 },
138
- { rate: 0.40, upTo: 'infinity' },
137
+ { rate: 0.2, upTo: 200_000 },
138
+ { rate: 0.4, upTo: 'infinity' },
139
139
  ],
140
140
  unit: 'millionTokens',
141
141
  },
@@ -143,8 +143,8 @@ const googleChatModels: AIChatModelCard[] = [
143
143
  name: 'textInput',
144
144
  strategy: 'tiered',
145
145
  tiers: [
146
- { rate: 2.0, upTo: 200_000 },
147
- { rate: 4.0, upTo: 'infinity' },
146
+ { rate: 2, upTo: 200_000 },
147
+ { rate: 4, upTo: 'infinity' },
148
148
  ],
149
149
  unit: 'millionTokens',
150
150
  },
@@ -152,16 +152,22 @@ const googleChatModels: AIChatModelCard[] = [
152
152
  name: 'textOutput',
153
153
  strategy: 'tiered',
154
154
  tiers: [
155
- { rate: 12.0, upTo: 200_000 },
156
- { rate: 18.0, upTo: 'infinity' },
155
+ { rate: 12, upTo: 200_000 },
156
+ { rate: 18, upTo: 'infinity' },
157
157
  ],
158
158
  unit: 'millionTokens',
159
159
  },
160
+ {
161
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
162
+ name: 'textInput_cacheWrite',
163
+ strategy: 'lookup',
164
+ unit: 'millionTokens',
165
+ },
160
166
  ],
161
167
  },
162
168
  releasedAt: '2025-11-18',
163
169
  settings: {
164
- extendParams: ['thinkingBudget', 'urlContext'],
170
+ extendParams: ['thinkingLevel', 'urlContext'],
165
171
  searchImpl: 'params',
166
172
  searchProvider: 'google',
167
173
  },
@@ -211,6 +217,12 @@ const googleChatModels: AIChatModelCard[] = [
211
217
  ],
212
218
  unit: 'millionTokens',
213
219
  },
220
+ {
221
+ lookup: { prices: { '1h': 4.5 }, pricingParams: ['ttl'] },
222
+ name: 'textInput_cacheWrite',
223
+ strategy: 'lookup',
224
+ unit: 'millionTokens',
225
+ },
214
226
  ],
215
227
  },
216
228
  releasedAt: '2025-06-17',
@@ -383,34 +395,6 @@ const googleChatModels: AIChatModelCard[] = [
383
395
  },
384
396
  type: 'chat',
385
397
  },
386
- {
387
- abilities: {
388
- functionCall: true,
389
- reasoning: true,
390
- search: true,
391
- video: true,
392
- vision: true,
393
- },
394
- contextWindowTokens: 1_048_576 + 65_536,
395
- description: 'Gemini 2.5 Flash Preview 是 Google 性价比最高的模型,提供全面的功能。',
396
- displayName: 'Gemini 2.5 Flash Preview 05-20',
397
- id: 'gemini-2.5-flash-preview-05-20',
398
- maxOutput: 65_536,
399
- pricing: {
400
- units: [
401
- { name: 'textInput_cacheRead', rate: 0.0375, strategy: 'fixed', unit: 'millionTokens' },
402
- { name: 'textInput', rate: 0.15, strategy: 'fixed', unit: 'millionTokens' },
403
- { name: 'textOutput', rate: 3.5, strategy: 'fixed', unit: 'millionTokens' },
404
- ],
405
- },
406
- releasedAt: '2025-05-20',
407
- settings: {
408
- extendParams: ['thinkingBudget', 'urlContext'],
409
- searchImpl: 'params',
410
- searchProvider: 'google',
411
- },
412
- type: 'chat',
413
- },
414
398
  {
415
399
  abilities: {
416
400
  imageOutput: true,
@@ -514,35 +498,6 @@ const googleChatModels: AIChatModelCard[] = [
514
498
  },
515
499
  type: 'chat',
516
500
  },
517
- {
518
- abilities: {
519
- functionCall: true,
520
- reasoning: true,
521
- search: true,
522
- video: true,
523
- vision: true,
524
- },
525
- contextWindowTokens: 1_048_576 + 65_536,
526
- description:
527
- 'Gemini 2.5 Flash-Lite Preview 是 Google 最小、性价比最高的模型,专为大规模使用而设计。',
528
- displayName: 'Gemini 2.5 Flash-Lite Preview 06-17',
529
- id: 'gemini-2.5-flash-lite-preview-06-17',
530
- maxOutput: 65_536,
531
- pricing: {
532
- units: [
533
- { name: 'textInput_cacheRead', rate: 0.025, strategy: 'fixed', unit: 'millionTokens' },
534
- { name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
535
- { name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
536
- ],
537
- },
538
- releasedAt: '2025-06-11',
539
- settings: {
540
- extendParams: ['thinkingBudget', 'urlContext'],
541
- searchImpl: 'params',
542
- searchProvider: 'google',
543
- },
544
- type: 'chat',
545
- },
546
501
  {
547
502
  abilities: {
548
503
  functionCall: true,
@@ -597,26 +552,6 @@ const googleChatModels: AIChatModelCard[] = [
597
552
  },
598
553
  type: 'chat',
599
554
  },
600
- {
601
- abilities: {
602
- imageOutput: true,
603
- vision: true,
604
- },
605
- contextWindowTokens: 32_768 + 8192,
606
- description: 'Gemini 2.0 Flash 预览模型,支持图像生成',
607
- displayName: 'Gemini 2.0 Flash Preview Image Generation',
608
- id: 'gemini-2.0-flash-preview-image-generation',
609
- maxOutput: 8192,
610
- pricing: {
611
- units: [
612
- { name: 'textInput', rate: 0.1, strategy: 'fixed', unit: 'millionTokens' },
613
- { name: 'textOutput', rate: 0.4, strategy: 'fixed', unit: 'millionTokens' },
614
- { name: 'imageGeneration', rate: 0.039, strategy: 'fixed', unit: 'image' },
615
- ],
616
- },
617
- releasedAt: '2025-05-07',
618
- type: 'chat',
619
- },
620
555
  {
621
556
  abilities: {
622
557
  imageOutput: true,
@@ -5,12 +5,14 @@ const ollamaCloudModels: AIChatModelCard[] = [
5
5
  abilities: {
6
6
  functionCall: true,
7
7
  reasoning: true,
8
+ vision: true,
8
9
  },
9
- contextWindowTokens: 200_000,
10
- description: 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
11
- displayName: 'MiniMax M2',
10
+ contextWindowTokens: 1_048_576,
11
+ description:
12
+ 'Gemini 3 Pro 是 Google 最智能的模型,具有 SOTA 推理和多模式理解,以及强大的代理和氛围编码功能。',
13
+ displayName: 'Gemini 3 Pro Preview',
12
14
  enabled: true,
13
- id: 'minimax-m2',
15
+ id: 'gemini-3-pro-preview',
14
16
  type: 'chat',
15
17
  },
16
18
  {
@@ -19,8 +21,7 @@ const ollamaCloudModels: AIChatModelCard[] = [
19
21
  reasoning: true,
20
22
  },
21
23
  contextWindowTokens: 200_000,
22
- description:
23
- 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
24
+ description: 'MiniMax M2 是专为编码和代理工作流程构建的高效大型语言模型。',
24
25
  displayName: 'MiniMax M2',
25
26
  enabled: true,
26
27
  id: 'minimax-m2',
@@ -242,6 +242,7 @@ export type ExtendParamsType =
242
242
  | 'textVerbosity'
243
243
  | 'thinking'
244
244
  | 'thinkingBudget'
245
+ | 'thinkingLevel'
245
246
  | 'urlContext';
246
247
 
247
248
  export interface AiModelSettings {