@lobehub/chat 1.96.12 → 1.96.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1871,6 +1871,9 @@
1871
1871
  "o3": {
1872
1872
  "description": "o3 là một mô hình toàn năng mạnh mẽ, thể hiện xuất sắc trong nhiều lĩnh vực. Nó thiết lập tiêu chuẩn mới cho các nhiệm vụ toán học, khoa học, lập trình và suy luận hình ảnh. Nó cũng giỏi trong việc viết kỹ thuật và tuân thủ hướng dẫn. Người dùng có thể sử dụng nó để phân tích văn bản, mã và hình ảnh, giải quyết các vấn đề phức tạp nhiều bước."
1873
1873
  },
1874
+ "o3-deep-research": {
1875
+ "description": "o3-deep-research là mô hình nghiên cứu sâu tiên tiến nhất của chúng tôi, được thiết kế đặc biệt để xử lý các nhiệm vụ nghiên cứu phức tạp nhiều bước. Nó có thể tìm kiếm và tổng hợp thông tin từ Internet, cũng như truy cập và tận dụng dữ liệu riêng của bạn thông qua kết nối MCP."
1876
+ },
1874
1877
  "o3-mini": {
1875
1878
  "description": "o3-mini là mô hình suy diễn nhỏ gọn mới nhất của chúng tôi, cung cấp trí thông minh cao với chi phí và độ trễ tương tự như o1-mini."
1876
1879
  },
@@ -1880,6 +1883,9 @@
1880
1883
  "o4-mini": {
1881
1884
  "description": "o4-mini là mô hình nhỏ gọn mới nhất trong dòng o của chúng tôi. Nó được tối ưu hóa cho suy luận nhanh chóng và hiệu quả, thể hiện hiệu suất và hiệu quả cao trong các nhiệm vụ mã hóa và hình ảnh."
1882
1885
  },
1886
+ "o4-mini-deep-research": {
1887
+ "description": "o4-mini-deep-research là mô hình nghiên cứu sâu nhanh hơn và tiết kiệm hơn của chúng tôi — rất phù hợp để xử lý các nhiệm vụ nghiên cứu phức tạp nhiều bước. Nó có thể tìm kiếm và tổng hợp thông tin từ Internet, cũng như truy cập và tận dụng dữ liệu riêng của bạn thông qua kết nối MCP."
1888
+ },
1883
1889
  "open-codestral-mamba": {
1884
1890
  "description": "Codestral Mamba là mô hình ngôn ngữ Mamba 2 tập trung vào sinh mã, cung cấp hỗ trợ mạnh mẽ cho các nhiệm vụ mã và suy luận tiên tiến."
1885
1891
  },
@@ -1871,6 +1871,9 @@
1871
1871
  "o3": {
1872
1872
  "description": "o3 是一款全能强大的模型,在多个领域表现出色。它为数学、科学、编程和视觉推理任务树立了新标杆。它也擅长技术写作和指令遵循。用户可利用它分析文本、代码和图像,解决多步骤的复杂问题。"
1873
1873
  },
1874
+ "o3-deep-research": {
1875
+ "description": "o3-deep-research 是我们最先进的深度研究模型,专为处理复杂的多步骤研究任务而设计。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。"
1876
+ },
1874
1877
  "o3-mini": {
1875
1878
  "description": "o3-mini 是我们最新的小型推理模型,在与 o1-mini 相同的成本和延迟目标下提供高智能。"
1876
1879
  },
@@ -1880,6 +1883,9 @@
1880
1883
  "o4-mini": {
1881
1884
  "description": "o4-mini 是我们最新的小型 o 系列模型。 它专为快速有效的推理而优化,在编码和视觉任务中表现出极高的效率和性能。"
1882
1885
  },
1886
+ "o4-mini-deep-research": {
1887
+ "description": "o4-mini-deep-research 是我们更快速、更实惠的深度研究模型——非常适合处理复杂的多步骤研究任务。它可以从互联网上搜索和综合信息,也可以通过 MCP 连接器访问并利用你的自有数据。"
1888
+ },
1883
1889
  "open-codestral-mamba": {
1884
1890
  "description": "Codestral Mamba是专注于代码生成的Mamba 2语言模型,为先进的代码和推理任务提供强力支持。"
1885
1891
  },
@@ -1871,6 +1871,9 @@
1871
1871
  "o3": {
1872
1872
  "description": "o3 是一款全能強大的模型,在多個領域表現出色。它為數學、科學、程式設計和視覺推理任務樹立了新標杆。它也擅長技術寫作和指令遵循。用戶可利用它分析文本、程式碼和圖像,解決多步驟的複雜問題。"
1873
1873
  },
1874
+ "o3-deep-research": {
1875
+ "description": "o3-deep-research 是我們最先進的深度研究模型,專為處理複雜的多步驟研究任務而設計。它可以從網際網路搜尋和綜合資訊,也可以透過 MCP 連接器存取並利用你的自有資料。"
1876
+ },
1874
1877
  "o3-mini": {
1875
1878
  "description": "o3-mini 是我們最新的小型推理模型,在與 o1-mini 相同的成本和延遲目標下提供高智能。"
1876
1879
  },
@@ -1880,6 +1883,9 @@
1880
1883
  "o4-mini": {
1881
1884
  "description": "o4-mini 是我們最新的小型 o 系列模型。它專為快速有效的推理而優化,在編碼和視覺任務中表現出極高的效率和性能。"
1882
1885
  },
1886
+ "o4-mini-deep-research": {
1887
+ "description": "o4-mini-deep-research 是我們更快速、更實惠的深度研究模型——非常適合處理複雜的多步驟研究任務。它可以從網際網路搜尋和綜合資訊,也可以透過 MCP 連接器存取並利用你的自有資料。"
1888
+ },
1883
1889
  "open-codestral-mamba": {
1884
1890
  "description": "Codestral Mamba 是專注於代碼生成的 Mamba 2 語言模型,為先進的代碼和推理任務提供強力支持。"
1885
1891
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.96.12",
3
+ "version": "1.96.14",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -135,8 +135,7 @@
135
135
  "@codesandbox/sandpack-react": "^2.20.0",
136
136
  "@cyntler/react-doc-viewer": "^1.17.0",
137
137
  "@electric-sql/pglite": "0.2.17",
138
- "@google-cloud/vertexai": "^1.10.0",
139
- "@google/generative-ai": "^0.24.1",
138
+ "@google/genai": "^1.6.0",
140
139
  "@huggingface/inference": "^2.8.1",
141
140
  "@icons-pack/react-simple-icons": "9.6.0",
142
141
  "@khmyznikov/pwa-install": "0.3.9",
@@ -1,9 +1,10 @@
1
1
  // @vitest-environment edge-runtime
2
- import { FunctionDeclarationsTool } from '@google/generative-ai';
2
+ import { GenerateContentResponse, Tool } from '@google/genai';
3
3
  import OpenAI from 'openai';
4
4
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
 
6
6
  import { OpenAIChatMessage } from '@/libs/model-runtime';
7
+ import { ChatStreamPayload } from '@/types/openai/chat';
7
8
  import * as imageToBase64Module from '@/utils/imageToBase64';
8
9
 
9
10
  import * as debugStreamModule from '../utils/debugStream';
@@ -22,9 +23,8 @@ beforeEach(() => {
22
23
  instance = new LobeGoogleAI({ apiKey: 'test' });
23
24
 
24
25
  // 使用 vi.spyOn 来模拟 chat.completions.create 方法
25
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
26
- generateContentStream: vi.fn().mockResolvedValue(new ReadableStream()),
27
- } as any);
26
+ const mockStreamData = (async function* (): AsyncGenerator<GenerateContentResponse> {})();
27
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockResolvedValue(mockStreamData);
28
28
  });
29
29
 
30
30
  afterEach(() => {
@@ -60,9 +60,9 @@ describe('LobeGoogleAI', () => {
60
60
  controller.close();
61
61
  },
62
62
  });
63
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
64
- generateContentStream: vi.fn().mockResolvedValueOnce(mockStream),
65
- } as any);
63
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockResolvedValue(
64
+ mockStream as any,
65
+ );
66
66
 
67
67
  const result = await instance.chat({
68
68
  messages: [{ content: 'Hello', role: 'user' }],
@@ -208,9 +208,9 @@ describe('LobeGoogleAI', () => {
208
208
  },
209
209
  });
210
210
 
211
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
212
- generateContentStream: vi.fn().mockResolvedValueOnce(mockStream),
213
- } as any);
211
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockResolvedValue(
212
+ mockStream as any,
213
+ );
214
214
  });
215
215
 
216
216
  it('should call debugStream in DEBUG mode', async () => {
@@ -224,9 +224,9 @@ describe('LobeGoogleAI', () => {
224
224
  controller.close();
225
225
  },
226
226
  });
227
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
228
- generateContentStream: vi.fn().mockResolvedValueOnce(mockStream),
229
- } as any);
227
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockResolvedValue(
228
+ mockStream as any,
229
+ );
230
230
  const debugStreamSpy = vi
231
231
  .spyOn(debugStreamModule, 'debugStream')
232
232
  .mockImplementation(() => Promise.resolve());
@@ -250,9 +250,7 @@ describe('LobeGoogleAI', () => {
250
250
 
251
251
  const apiError = new Error(message);
252
252
 
253
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
254
- generateContentStream: vi.fn().mockRejectedValue(apiError),
255
- } as any);
253
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
256
254
 
257
255
  try {
258
256
  await instance.chat({
@@ -271,9 +269,7 @@ describe('LobeGoogleAI', () => {
271
269
 
272
270
  const apiError = new Error(message);
273
271
 
274
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
275
- generateContentStream: vi.fn().mockRejectedValue(apiError),
276
- } as any);
272
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
277
273
 
278
274
  try {
279
275
  await instance.chat({
@@ -292,9 +288,7 @@ describe('LobeGoogleAI', () => {
292
288
 
293
289
  const apiError = new Error(message);
294
290
 
295
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
296
- generateContentStream: vi.fn().mockRejectedValue(apiError),
297
- } as any);
291
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
298
292
 
299
293
  try {
300
294
  await instance.chat({
@@ -326,9 +320,7 @@ describe('LobeGoogleAI', () => {
326
320
 
327
321
  const apiError = new Error(message);
328
322
 
329
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
330
- generateContentStream: vi.fn().mockRejectedValue(apiError),
331
- } as any);
323
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
332
324
 
333
325
  try {
334
326
  await instance.chat({
@@ -354,9 +346,7 @@ describe('LobeGoogleAI', () => {
354
346
  const apiError = new Error('Error message');
355
347
 
356
348
  // 使用 vi.spyOn 来模拟 chat.completions.create 方法
357
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
358
- generateContentStream: vi.fn().mockRejectedValue(apiError),
359
- } as any);
349
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
360
350
 
361
351
  // Act
362
352
  try {
@@ -392,9 +382,7 @@ describe('LobeGoogleAI', () => {
392
382
  };
393
383
  const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
394
384
 
395
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
396
- generateContentStream: vi.fn().mockRejectedValue(apiError),
397
- } as any);
385
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
398
386
 
399
387
  // Act
400
388
  try {
@@ -418,9 +406,9 @@ describe('LobeGoogleAI', () => {
418
406
  // Arrange
419
407
  const genericError = new Error('Generic Error');
420
408
 
421
- vi.spyOn(instance['client'], 'getGenerativeModel').mockReturnValue({
422
- generateContentStream: vi.fn().mockRejectedValue(genericError),
423
- } as any);
409
+ vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(
410
+ genericError,
411
+ );
424
412
 
425
413
  // Act
426
414
  try {
@@ -642,11 +630,11 @@ describe('LobeGoogleAI', () => {
642
630
  const googleTools = instance['buildGoogleTools'](tools);
643
631
 
644
632
  expect(googleTools).toHaveLength(1);
645
- expect((googleTools![0] as FunctionDeclarationsTool).functionDeclarations![0]).toEqual({
633
+ expect((googleTools![0] as Tool).functionDeclarations![0]).toEqual({
646
634
  name: 'testTool',
647
635
  description: 'A test tool',
648
636
  parameters: {
649
- type: 'object',
637
+ type: 'OBJECT',
650
638
  properties: {
651
639
  param1: { type: 'string' },
652
640
  param2: { type: 'number' },
@@ -655,6 +643,75 @@ describe('LobeGoogleAI', () => {
655
643
  },
656
644
  });
657
645
  });
646
+
647
+ it('should also add tools when tool_calls exists', () => {
648
+ const tools: OpenAI.ChatCompletionTool[] = [
649
+ {
650
+ function: {
651
+ name: 'testTool',
652
+ description: 'A test tool',
653
+ parameters: {
654
+ type: 'object',
655
+ properties: {
656
+ param1: { type: 'string' },
657
+ param2: { type: 'number' },
658
+ },
659
+ required: ['param1'],
660
+ },
661
+ },
662
+ type: 'function',
663
+ },
664
+ ];
665
+
666
+ const payload: ChatStreamPayload = {
667
+ messages: [
668
+ {
669
+ role: 'user',
670
+ content: '',
671
+ tool_calls: [
672
+ { function: { name: 'some_func', arguments: '' }, id: 'func_1', type: 'function' },
673
+ ],
674
+ },
675
+ ],
676
+ model: 'gemini-2.5-flash-preview-04-17',
677
+ temperature: 1,
678
+ };
679
+
680
+ const googleTools = instance['buildGoogleTools'](tools, payload);
681
+
682
+ expect(googleTools).toHaveLength(1);
683
+ expect((googleTools![0] as Tool).functionDeclarations![0]).toEqual({
684
+ name: 'testTool',
685
+ description: 'A test tool',
686
+ parameters: {
687
+ type: 'OBJECT',
688
+ properties: {
689
+ param1: { type: 'string' },
690
+ param2: { type: 'number' },
691
+ },
692
+ required: ['param1'],
693
+ },
694
+ });
695
+ });
696
+
697
+ it('should handle googleSearch', () => {
698
+ const payload: ChatStreamPayload = {
699
+ messages: [
700
+ {
701
+ role: 'user',
702
+ content: '',
703
+ },
704
+ ],
705
+ model: 'gemini-2.5-flash-preview-04-17',
706
+ temperature: 1,
707
+ enabledSearch: true,
708
+ };
709
+
710
+ const googleTools = instance['buildGoogleTools'](undefined, payload);
711
+
712
+ expect(googleTools).toHaveLength(1);
713
+ expect(googleTools![0] as Tool).toEqual({ googleSearch: {} });
714
+ });
658
715
  });
659
716
 
660
717
  describe('convertOAIMessagesToGoogleMessage', () => {
@@ -1,14 +1,13 @@
1
- import type { VertexAI } from '@google-cloud/vertexai';
2
1
  import {
3
2
  Content,
4
- FunctionCallPart,
5
3
  FunctionDeclaration,
4
+ GenerateContentConfig,
6
5
  Tool as GoogleFunctionCallTool,
7
- GoogleGenerativeAI,
8
- GoogleSearchRetrievalTool,
6
+ GoogleGenAI,
9
7
  Part,
10
- SchemaType,
11
- } from '@google/generative-ai';
8
+ Type as SchemaType,
9
+ ThinkingConfig,
10
+ } from '@google/genai';
12
11
 
13
12
  import { imageUrlToBase64 } from '@/utils/imageToBase64';
14
13
  import { safeParseJSON } from '@/utils/safeParseJSON';
@@ -77,16 +76,11 @@ const DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com';
77
76
  interface LobeGoogleAIParams {
78
77
  apiKey?: string;
79
78
  baseURL?: string;
80
- client?: GoogleGenerativeAI | VertexAI;
79
+ client?: GoogleGenAI;
81
80
  id?: string;
82
81
  isVertexAi?: boolean;
83
82
  }
84
83
 
85
- interface GoogleAIThinkingConfig {
86
- includeThoughts?: boolean;
87
- thinkingBudget?: number;
88
- }
89
-
90
84
  const isAbortError = (error: Error): boolean => {
91
85
  const message = error.message.toLowerCase();
92
86
  return (
@@ -99,7 +93,7 @@ const isAbortError = (error: Error): boolean => {
99
93
  };
100
94
 
101
95
  export class LobeGoogleAI implements LobeRuntimeAI {
102
- private client: GoogleGenerativeAI;
96
+ private client: GoogleGenAI;
103
97
  private isVertexAi: boolean;
104
98
  baseURL?: string;
105
99
  apiKey?: string;
@@ -108,9 +102,10 @@ export class LobeGoogleAI implements LobeRuntimeAI {
108
102
  constructor({ apiKey, baseURL, client, isVertexAi, id }: LobeGoogleAIParams = {}) {
109
103
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
110
104
 
111
- this.client = new GoogleGenerativeAI(apiKey);
105
+ const httpOptions = baseURL ? { baseUrl: baseURL } : undefined;
106
+
112
107
  this.apiKey = apiKey;
113
- this.client = client ? (client as GoogleGenerativeAI) : new GoogleGenerativeAI(apiKey);
108
+ this.client = client ? client : new GoogleGenAI({ apiKey, httpOptions });
114
109
  this.baseURL = client ? undefined : baseURL || DEFAULT_BASE_URL;
115
110
  this.isVertexAi = isVertexAi || false;
116
111
 
@@ -122,7 +117,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
122
117
  const payload = this.buildPayload(rawPayload);
123
118
  const { model, thinkingBudget } = payload;
124
119
 
125
- const thinkingConfig: GoogleAIThinkingConfig = {
120
+ const thinkingConfig: ThinkingConfig = {
126
121
  includeThoughts:
127
122
  !!thinkingBudget ||
128
123
  (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
@@ -155,8 +150,6 @@ export class LobeGoogleAI implements LobeRuntimeAI {
155
150
 
156
151
  const contents = await this.buildGoogleMessages(payload.messages);
157
152
 
158
- const inputStartAt = Date.now();
159
-
160
153
  const controller = new AbortController();
161
154
  const originalSignal = options?.signal;
162
155
 
@@ -170,57 +163,50 @@ export class LobeGoogleAI implements LobeRuntimeAI {
170
163
  }
171
164
  }
172
165
 
173
- const geminiStreamResult = await this.client
174
- .getGenerativeModel(
166
+ const config: GenerateContentConfig = {
167
+ abortSignal: originalSignal,
168
+ maxOutputTokens: payload.max_tokens,
169
+ responseModalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
170
+ // avoid wide sensitive words
171
+ // refs: https://github.com/lobehub/lobe-chat/pull/1418
172
+ safetySettings: [
175
173
  {
176
- generationConfig: {
177
- maxOutputTokens: payload.max_tokens,
178
- // @ts-expect-error - Google SDK 0.24.0 doesn't have this property for now with
179
- response_modalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
180
- temperature: payload.temperature,
181
- topP: payload.top_p,
182
- ...(modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm')
183
- ? {}
184
- : { thinkingConfig }),
185
- },
186
- model,
187
- // avoid wide sensitive words
188
- // refs: https://github.com/lobehub/lobe-chat/pull/1418
189
- safetySettings: [
190
- {
191
- category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
192
- threshold: getThreshold(model),
193
- },
194
- {
195
- category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
196
- threshold: getThreshold(model),
197
- },
198
- {
199
- category: HarmCategory.HARM_CATEGORY_HARASSMENT,
200
- threshold: getThreshold(model),
201
- },
202
- {
203
- category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
204
- threshold: getThreshold(model),
205
- },
206
- ],
174
+ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
175
+ threshold: getThreshold(model),
176
+ },
177
+ {
178
+ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
179
+ threshold: getThreshold(model),
207
180
  },
208
- { apiVersion: 'v1beta', baseUrl: this.baseURL },
209
- )
210
- .generateContentStream(
211
181
  {
212
- contents,
213
- systemInstruction: modelsDisableInstuction.has(model)
214
- ? undefined
215
- : (payload.system as string),
216
- tools: this.buildGoogleTools(payload.tools, payload),
182
+ category: HarmCategory.HARM_CATEGORY_HARASSMENT,
183
+ threshold: getThreshold(model),
217
184
  },
218
185
  {
219
- signal: controller.signal,
186
+ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
187
+ threshold: getThreshold(model),
220
188
  },
221
- );
189
+ ],
190
+ systemInstruction: modelsDisableInstuction.has(model)
191
+ ? undefined
192
+ : (payload.system as string),
193
+ temperature: payload.temperature,
194
+ thinkingConfig:
195
+ modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm')
196
+ ? undefined
197
+ : thinkingConfig,
198
+ tools: this.buildGoogleTools(payload.tools, payload),
199
+ topP: payload.top_p,
200
+ };
201
+
202
+ const inputStartAt = Date.now();
203
+ const geminiStreamResponse = await this.client.models.generateContentStream({
204
+ config,
205
+ contents,
206
+ model,
207
+ });
222
208
 
223
- const googleStream = this.createEnhancedStream(geminiStreamResult.stream, controller.signal);
209
+ const googleStream = this.createEnhancedStream(geminiStreamResponse, controller.signal);
224
210
  const [prod, useForDebug] = googleStream.tee();
225
211
 
226
212
  const key = this.isVertexAi
@@ -405,7 +391,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
405
391
  const content = message.content as string | UserMessageContentPart[];
406
392
  if (!!message.tool_calls) {
407
393
  return {
408
- parts: message.tool_calls.map<FunctionCallPart>((tool) => ({
394
+ parts: message.tool_calls.map<Part>((tool) => ({
409
395
  functionCall: {
410
396
  args: safeParseJSON(tool.function.arguments)!,
411
397
  name: tool.function.name,
@@ -527,7 +513,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
527
513
  return this.buildFunctionDeclarations(tools);
528
514
  }
529
515
  if (payload?.enabledSearch) {
530
- return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
516
+ return [{ googleSearch: {} }];
531
517
  }
532
518
 
533
519
  return this.buildFunctionDeclarations(tools);