@lobehub/chat 1.96.8 → 1.96.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.96.9](https://github.com/lobehub/lobe-chat/compare/v1.96.8...v1.96.9)
6
+
7
+ <sup>Released on **2025-06-23**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Google Gemini tools declarations.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Google Gemini tools declarations, closes [#8256](https://github.com/lobehub/lobe-chat/issues/8256) ([08f5d73](https://github.com/lobehub/lobe-chat/commit/08f5d73))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.96.8](https://github.com/lobehub/lobe-chat/compare/v1.96.7...v1.96.8)
6
31
 
7
32
  <sup>Released on **2025-06-23**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Google Gemini tools declarations."
6
+ ]
7
+ },
8
+ "date": "2025-06-23",
9
+ "version": "1.96.9"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.96.8",
3
+ "version": "1.96.9",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -560,6 +560,58 @@ describe('LobeGoogleAI', () => {
560
560
  },
561
561
  ]);
562
562
  });
563
+
564
+ it('should correctly convert function response message', async () => {
565
+ const messages: OpenAIChatMessage[] = [
566
+ {
567
+ content: '',
568
+ role: 'assistant',
569
+ tool_calls: [
570
+ {
571
+ id: 'call_1',
572
+ function: {
573
+ name: 'get_current_weather',
574
+ arguments: JSON.stringify({ location: 'London', unit: 'celsius' }),
575
+ },
576
+ type: 'function',
577
+ },
578
+ ],
579
+ },
580
+ {
581
+ content: '{"success":true,"data":{"temperature":"14°C"}}',
582
+ name: 'get_current_weather',
583
+ role: 'tool',
584
+ tool_call_id: 'call_1',
585
+ },
586
+ ];
587
+
588
+ const contents = await instance['buildGoogleMessages'](messages);
589
+ expect(contents).toHaveLength(2);
590
+ expect(contents).toEqual([
591
+ {
592
+ parts: [
593
+ {
594
+ functionCall: {
595
+ args: { location: 'London', unit: 'celsius' },
596
+ name: 'get_current_weather',
597
+ },
598
+ },
599
+ ],
600
+ role: 'model',
601
+ },
602
+ {
603
+ parts: [
604
+ {
605
+ functionResponse: {
606
+ name: 'get_current_weather',
607
+ response: { result: '{"success":true,"data":{"temperature":"14°C"}}' },
608
+ },
609
+ },
610
+ ],
611
+ role: 'user',
612
+ },
613
+ ]);
614
+ });
563
615
  });
564
616
 
565
617
  describe('buildGoogleTools', () => {
@@ -690,7 +742,7 @@ describe('LobeGoogleAI', () => {
690
742
 
691
743
  const converted = await instance['convertOAIMessagesToGoogleMessage'](message);
692
744
  expect(converted).toEqual({
693
- role: 'function',
745
+ role: 'model',
694
746
  parts: [
695
747
  {
696
748
  functionCall: {
@@ -353,6 +353,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
353
353
  system: system_message?.content,
354
354
  };
355
355
  }
356
+
356
357
  private convertContentToGooglePart = async (
357
358
  content: UserMessageContentPart,
358
359
  ): Promise<Part | undefined> => {
@@ -399,6 +400,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
399
400
 
400
401
  private convertOAIMessagesToGoogleMessage = async (
401
402
  message: OpenAIChatMessage,
403
+ toolCallNameMap?: Map<string, string>,
402
404
  ): Promise<Content> => {
403
405
  const content = message.content as string | UserMessageContentPart[];
404
406
  if (!!message.tool_calls) {
@@ -409,10 +411,28 @@ export class LobeGoogleAI implements LobeRuntimeAI {
409
411
  name: tool.function.name,
410
412
  },
411
413
  })),
412
- role: 'function',
414
+ role: 'model',
413
415
  };
414
416
  }
415
417
 
418
+ // 将 tool_call result 转成 functionResponse part
419
+ if (message.role === 'tool' && toolCallNameMap && message.tool_call_id) {
420
+ const functionName = toolCallNameMap.get(message.tool_call_id);
421
+ if (functionName) {
422
+ return {
423
+ parts: [
424
+ {
425
+ functionResponse: {
426
+ name: functionName,
427
+ response: { result: message.content },
428
+ },
429
+ },
430
+ ],
431
+ role: 'user',
432
+ };
433
+ }
434
+ }
435
+
416
436
  const getParts = async () => {
417
437
  if (typeof content === 'string') return [{ text: content }];
418
438
 
@@ -430,9 +450,20 @@ export class LobeGoogleAI implements LobeRuntimeAI {
430
450
 
431
451
  // convert messages from the OpenAI format to Google GenAI SDK
432
452
  private buildGoogleMessages = async (messages: OpenAIChatMessage[]): Promise<Content[]> => {
453
+ const toolCallNameMap = new Map<string, string>();
454
+ messages.forEach((message) => {
455
+ if (message.role === 'assistant' && message.tool_calls) {
456
+ message.tool_calls.forEach((toolCall) => {
457
+ if (toolCall.type === 'function') {
458
+ toolCallNameMap.set(toolCall.id, toolCall.function.name);
459
+ }
460
+ });
461
+ }
462
+ });
463
+
433
464
  const pools = messages
434
465
  .filter((message) => message.role !== 'function')
435
- .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg));
466
+ .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg, toolCallNameMap));
436
467
 
437
468
  return Promise.all(pools);
438
469
  };
@@ -493,12 +524,18 @@ export class LobeGoogleAI implements LobeRuntimeAI {
493
524
  ): GoogleFunctionCallTool[] | undefined {
494
525
  // 目前 Tools (例如 googleSearch) 无法与其他 FunctionCall 同时使用
495
526
  if (payload?.messages?.some((m) => m.tool_calls?.length)) {
496
- return; // 若历史消息中已有 function calling,则不再注入任何 Tools
527
+ return this.buildFunctionDeclarations(tools);
497
528
  }
498
529
  if (payload?.enabledSearch) {
499
530
  return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
500
531
  }
501
532
 
533
+ return this.buildFunctionDeclarations(tools);
534
+ }
535
+
536
+ private buildFunctionDeclarations(
537
+ tools: ChatCompletionTool[] | undefined,
538
+ ): GoogleFunctionCallTool[] | undefined {
502
539
  if (!tools || tools.length === 0) return;
503
540
 
504
541
  return [
@@ -7,7 +7,7 @@ import { GoogleGenerativeAIStream } from './google-ai';
7
7
 
8
8
  describe('GoogleGenerativeAIStream', () => {
9
9
  it('should transform Google Generative AI stream to protocol stream', async () => {
10
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
10
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
11
11
 
12
12
  const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
13
13
  ({
@@ -59,7 +59,7 @@ describe('GoogleGenerativeAIStream', () => {
59
59
  // tool call
60
60
  'id: chat_1\n',
61
61
  'event: tool_calls\n',
62
- `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0","index":0,"type":"function"}]\n\n`,
62
+ `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0_abcd1234","index":0,"type":"function"}]\n\n`,
63
63
 
64
64
  // text
65
65
  'id: chat_1\n',
@@ -30,7 +30,7 @@ describe('OllamaStream', () => {
30
30
  });
31
31
 
32
32
  const protocolStream = OllamaStream(mockOllamaStream);
33
-
33
+
34
34
  const decoder = new TextDecoder();
35
35
  const chunks = [];
36
36
 
@@ -62,7 +62,7 @@ describe('OllamaStream', () => {
62
62
  'id: chat_2',
63
63
  'event: stop',
64
64
  `data: "finished"\n`,
65
- ].map((line) => `${line}\n`)
65
+ ].map((line) => `${line}\n`),
66
66
  );
67
67
  });
68
68
 
@@ -116,7 +116,7 @@ describe('OllamaStream', () => {
116
116
  });
117
117
 
118
118
  it('tools use', async () => {
119
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
119
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
120
120
 
121
121
  const mockOllamaStream = new ReadableStream<ChatResponse>({
122
122
  start(controller) {
@@ -178,7 +178,7 @@ describe('OllamaStream', () => {
178
178
  [
179
179
  'id: chat_1',
180
180
  'event: tool_calls',
181
- `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n`,
181
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n`,
182
182
  'id: chat_1',
183
183
  'event: stop',
184
184
  `data: "finished"\n`,
@@ -1,5 +1,6 @@
1
1
  import { CitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
2
2
  import { safeParseJSON } from '@/utils/safeParseJSON';
3
+ import { nanoid } from '@/utils/uuid';
3
4
 
4
5
  import { AgentRuntimeErrorType } from '../../error';
5
6
  import { parseToolCalls } from '../../helpers';
@@ -98,7 +99,7 @@ export interface StreamProtocolToolCallChunk {
98
99
  }
99
100
 
100
101
  export const generateToolCallId = (index: number, functionName?: string) =>
101
- `${functionName || 'unknown_tool_call'}_${index}`;
102
+ `${functionName || 'unknown_tool_call'}_${index}_${nanoid()}`;
102
103
 
103
104
  const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
104
105
  for await (const response of stream) {
@@ -137,7 +137,7 @@ describe('VertexAIStream', () => {
137
137
  });
138
138
 
139
139
  it('tool_calls', async () => {
140
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
140
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
141
141
 
142
142
  const rawChunks = [
143
143
  {
@@ -227,7 +227,7 @@ describe('VertexAIStream', () => {
227
227
  // text
228
228
  'id: chat_1\n',
229
229
  'event: tool_calls\n',
230
- `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0","index":0,"type":"function"}]\n\n`,
230
+ `data: [{"function":{"arguments":"{\\"city\\":\\"杭州\\"}","name":"realtime-weather____fetchCurrentWeather"},"id":"realtime-weather____fetchCurrentWeather_0_abcd1234","index":0,"type":"function"}]\n\n`,
231
231
  'id: chat_1\n',
232
232
  'event: stop\n',
233
233
  'data: "STOP"\n\n',