@lobehub/chat 1.96.13 → 1.96.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,13 @@
1
- import type { VertexAI } from '@google-cloud/vertexai';
2
1
  import {
3
2
  Content,
4
- FunctionCallPart,
5
3
  FunctionDeclaration,
4
+ GenerateContentConfig,
6
5
  Tool as GoogleFunctionCallTool,
7
- GoogleGenerativeAI,
8
- GoogleSearchRetrievalTool,
6
+ GoogleGenAI,
9
7
  Part,
10
- SchemaType,
11
- } from '@google/generative-ai';
8
+ Type as SchemaType,
9
+ ThinkingConfig,
10
+ } from '@google/genai';
12
11
 
13
12
  import { imageUrlToBase64 } from '@/utils/imageToBase64';
14
13
  import { safeParseJSON } from '@/utils/safeParseJSON';
@@ -77,16 +76,11 @@ const DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com';
77
76
  interface LobeGoogleAIParams {
78
77
  apiKey?: string;
79
78
  baseURL?: string;
80
- client?: GoogleGenerativeAI | VertexAI;
79
+ client?: GoogleGenAI;
81
80
  id?: string;
82
81
  isVertexAi?: boolean;
83
82
  }
84
83
 
85
- interface GoogleAIThinkingConfig {
86
- includeThoughts?: boolean;
87
- thinkingBudget?: number;
88
- }
89
-
90
84
  const isAbortError = (error: Error): boolean => {
91
85
  const message = error.message.toLowerCase();
92
86
  return (
@@ -99,7 +93,7 @@ const isAbortError = (error: Error): boolean => {
99
93
  };
100
94
 
101
95
  export class LobeGoogleAI implements LobeRuntimeAI {
102
- private client: GoogleGenerativeAI;
96
+ private client: GoogleGenAI;
103
97
  private isVertexAi: boolean;
104
98
  baseURL?: string;
105
99
  apiKey?: string;
@@ -108,9 +102,10 @@ export class LobeGoogleAI implements LobeRuntimeAI {
108
102
  constructor({ apiKey, baseURL, client, isVertexAi, id }: LobeGoogleAIParams = {}) {
109
103
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
110
104
 
111
- this.client = new GoogleGenerativeAI(apiKey);
105
+ const httpOptions = baseURL ? { baseUrl: baseURL } : undefined;
106
+
112
107
  this.apiKey = apiKey;
113
- this.client = client ? (client as GoogleGenerativeAI) : new GoogleGenerativeAI(apiKey);
108
+ this.client = client ? client : new GoogleGenAI({ apiKey, httpOptions });
114
109
  this.baseURL = client ? undefined : baseURL || DEFAULT_BASE_URL;
115
110
  this.isVertexAi = isVertexAi || false;
116
111
 
@@ -122,7 +117,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
122
117
  const payload = this.buildPayload(rawPayload);
123
118
  const { model, thinkingBudget } = payload;
124
119
 
125
- const thinkingConfig: GoogleAIThinkingConfig = {
120
+ const thinkingConfig: ThinkingConfig = {
126
121
  includeThoughts:
127
122
  !!thinkingBudget ||
128
123
  (!thinkingBudget && model && (model.includes('-2.5-') || model.includes('thinking')))
@@ -155,8 +150,6 @@ export class LobeGoogleAI implements LobeRuntimeAI {
155
150
 
156
151
  const contents = await this.buildGoogleMessages(payload.messages);
157
152
 
158
- const inputStartAt = Date.now();
159
-
160
153
  const controller = new AbortController();
161
154
  const originalSignal = options?.signal;
162
155
 
@@ -170,57 +163,50 @@ export class LobeGoogleAI implements LobeRuntimeAI {
170
163
  }
171
164
  }
172
165
 
173
- const geminiStreamResult = await this.client
174
- .getGenerativeModel(
166
+ const config: GenerateContentConfig = {
167
+ abortSignal: originalSignal,
168
+ maxOutputTokens: payload.max_tokens,
169
+ responseModalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
170
+ // avoid wide sensitive words
171
+ // refs: https://github.com/lobehub/lobe-chat/pull/1418
172
+ safetySettings: [
175
173
  {
176
- generationConfig: {
177
- maxOutputTokens: payload.max_tokens,
178
- // @ts-expect-error - Google SDK 0.24.0 doesn't have this property for now with
179
- response_modalities: modelsWithModalities.has(model) ? ['Text', 'Image'] : undefined,
180
- temperature: payload.temperature,
181
- topP: payload.top_p,
182
- ...(modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm')
183
- ? {}
184
- : { thinkingConfig }),
185
- },
186
- model,
187
- // avoid wide sensitive words
188
- // refs: https://github.com/lobehub/lobe-chat/pull/1418
189
- safetySettings: [
190
- {
191
- category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
192
- threshold: getThreshold(model),
193
- },
194
- {
195
- category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
196
- threshold: getThreshold(model),
197
- },
198
- {
199
- category: HarmCategory.HARM_CATEGORY_HARASSMENT,
200
- threshold: getThreshold(model),
201
- },
202
- {
203
- category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
204
- threshold: getThreshold(model),
205
- },
206
- ],
174
+ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
175
+ threshold: getThreshold(model),
176
+ },
177
+ {
178
+ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
179
+ threshold: getThreshold(model),
207
180
  },
208
- { apiVersion: 'v1beta', baseUrl: this.baseURL },
209
- )
210
- .generateContentStream(
211
181
  {
212
- contents,
213
- systemInstruction: modelsDisableInstuction.has(model)
214
- ? undefined
215
- : (payload.system as string),
216
- tools: this.buildGoogleTools(payload.tools, payload),
182
+ category: HarmCategory.HARM_CATEGORY_HARASSMENT,
183
+ threshold: getThreshold(model),
217
184
  },
218
185
  {
219
- signal: controller.signal,
186
+ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
187
+ threshold: getThreshold(model),
220
188
  },
221
- );
189
+ ],
190
+ systemInstruction: modelsDisableInstuction.has(model)
191
+ ? undefined
192
+ : (payload.system as string),
193
+ temperature: payload.temperature,
194
+ thinkingConfig:
195
+ modelsDisableInstuction.has(model) || model.toLowerCase().includes('learnlm')
196
+ ? undefined
197
+ : thinkingConfig,
198
+ tools: this.buildGoogleTools(payload.tools, payload),
199
+ topP: payload.top_p,
200
+ };
201
+
202
+ const inputStartAt = Date.now();
203
+ const geminiStreamResponse = await this.client.models.generateContentStream({
204
+ config,
205
+ contents,
206
+ model,
207
+ });
222
208
 
223
- const googleStream = this.createEnhancedStream(geminiStreamResult.stream, controller.signal);
209
+ const googleStream = this.createEnhancedStream(geminiStreamResponse, controller.signal);
224
210
  const [prod, useForDebug] = googleStream.tee();
225
211
 
226
212
  const key = this.isVertexAi
@@ -405,7 +391,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
405
391
  const content = message.content as string | UserMessageContentPart[];
406
392
  if (!!message.tool_calls) {
407
393
  return {
408
- parts: message.tool_calls.map<FunctionCallPart>((tool) => ({
394
+ parts: message.tool_calls.map<Part>((tool) => ({
409
395
  functionCall: {
410
396
  args: safeParseJSON(tool.function.arguments)!,
411
397
  name: tool.function.name,
@@ -527,7 +513,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
527
513
  return this.buildFunctionDeclarations(tools);
528
514
  }
529
515
  if (payload?.enabledSearch) {
530
- return [{ googleSearch: {} } as GoogleSearchRetrievalTool];
516
+ return [{ googleSearch: {} }];
531
517
  }
532
518
 
533
519
  return this.buildFunctionDeclarations(tools);
@@ -1,4 +1,4 @@
1
- import { EnhancedGenerateContentResponse } from '@google/generative-ai';
1
+ import { GenerateContentResponse } from '@google/genai';
2
2
  import { describe, expect, it, vi } from 'vitest';
3
3
 
4
4
  import * as uuidModule from '@/utils/uuid';
@@ -11,10 +11,9 @@ describe('GoogleGenerativeAIStream', () => {
11
11
 
12
12
  const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
13
13
  ({
14
- text: () => text,
15
- functionCall: () => functionCalls?.[0],
16
- functionCalls: () => functionCalls,
17
- }) as EnhancedGenerateContentResponse;
14
+ text: text,
15
+ functionCalls: functionCalls,
16
+ }) as unknown as GenerateContentResponse;
18
17
 
19
18
  const mockGoogleStream = new ReadableStream({
20
19
  start(controller) {
@@ -114,12 +113,6 @@ describe('GoogleGenerativeAIStream', () => {
114
113
  },
115
114
  modelVersion: 'gemini-2.0-flash-exp',
116
115
  };
117
- const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
118
- ({
119
- text: () => text,
120
- functionCall: () => functionCalls?.[0],
121
- functionCalls: () => functionCalls,
122
- }) as EnhancedGenerateContentResponse;
123
116
 
124
117
  const mockGoogleStream = new ReadableStream({
125
118
  start(controller) {
@@ -209,7 +202,7 @@ describe('GoogleGenerativeAIStream', () => {
209
202
  ],
210
203
  },
211
204
  ],
212
- text: () => '234',
205
+ text: '234',
213
206
  usageMetadata: {
214
207
  promptTokenCount: 20,
215
208
  totalTokenCount: 20,
@@ -218,7 +211,7 @@ describe('GoogleGenerativeAIStream', () => {
218
211
  modelVersion: 'gemini-2.0-flash-exp-image-generation',
219
212
  },
220
213
  {
221
- text: () => '567890\n',
214
+ text: '567890\n',
222
215
  candidates: [
223
216
  {
224
217
  content: { parts: [{ text: '567890\n' }], role: 'model' },
@@ -299,7 +292,7 @@ describe('GoogleGenerativeAIStream', () => {
299
292
  ],
300
293
  },
301
294
  ],
302
- text: () => '234',
295
+ text: '234',
303
296
  usageMetadata: {
304
297
  promptTokenCount: 19,
305
298
  candidatesTokenCount: 3,
@@ -307,10 +300,10 @@ describe('GoogleGenerativeAIStream', () => {
307
300
  promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
308
301
  thoughtsTokenCount: 100,
309
302
  },
310
- modelVersion: 'gemini-2.0-flash-exp-image-generation',
303
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
311
304
  },
312
305
  {
313
- text: () => '567890\n',
306
+ text: '567890\n',
314
307
  candidates: [
315
308
  {
316
309
  content: { parts: [{ text: '567890\n' }], role: 'model' },
@@ -331,7 +324,7 @@ describe('GoogleGenerativeAIStream', () => {
331
324
  candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
332
325
  thoughtsTokenCount: 100,
333
326
  },
334
- modelVersion: 'gemini-2.0-flash-exp-image-generation',
327
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
335
328
  },
336
329
  ];
337
330
 
@@ -375,4 +368,410 @@ describe('GoogleGenerativeAIStream', () => {
375
368
  ].map((i) => i + '\n'),
376
369
  );
377
370
  });
371
+
372
+ it('should handle thought candidate part', async () => {
373
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
374
+
375
+ const data = [
376
+ {
377
+ candidates: [
378
+ {
379
+ content: {
380
+ parts: [{ text: '**Understanding the Conditional Logic**\n\n', thought: true }],
381
+ role: 'model',
382
+ },
383
+ index: 0,
384
+ },
385
+ ],
386
+ text: '**Understanding the Conditional Logic**\n\n',
387
+ usageMetadata: {
388
+ promptTokenCount: 38,
389
+ candidatesTokenCount: 7,
390
+ totalTokenCount: 301,
391
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
392
+ thoughtsTokenCount: 256,
393
+ },
394
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
395
+ },
396
+ {
397
+ candidates: [
398
+ {
399
+ content: {
400
+ parts: [{ text: '**Finalizing Interpretation**\n\n', thought: true }],
401
+ role: 'model',
402
+ },
403
+ index: 0,
404
+ },
405
+ ],
406
+ text: '**Finalizing Interpretation**\n\n',
407
+ usageMetadata: {
408
+ promptTokenCount: 38,
409
+ candidatesTokenCount: 13,
410
+ totalTokenCount: 355,
411
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
412
+ thoughtsTokenCount: 304,
413
+ },
414
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
415
+ },
416
+ {
417
+ candidates: [
418
+ {
419
+ content: {
420
+ parts: [{ text: '简单来说,' }],
421
+ role: 'model',
422
+ },
423
+ index: 0,
424
+ },
425
+ ],
426
+ text: '简单来说,',
427
+ usageMetadata: {
428
+ promptTokenCount: 38,
429
+ candidatesTokenCount: 16,
430
+ totalTokenCount: 358,
431
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
432
+ thoughtsTokenCount: 304,
433
+ },
434
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
435
+ },
436
+ {
437
+ candidates: [
438
+ {
439
+ content: { parts: [{ text: '文本内容。' }], role: 'model' },
440
+ finishReason: 'STOP',
441
+ index: 0,
442
+ },
443
+ ],
444
+ text: '文本内容。',
445
+ usageMetadata: {
446
+ promptTokenCount: 38,
447
+ candidatesTokenCount: 19,
448
+ totalTokenCount: 361,
449
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
450
+ thoughtsTokenCount: 304,
451
+ },
452
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
453
+ },
454
+ ];
455
+
456
+ const mockGoogleStream = new ReadableStream({
457
+ start(controller) {
458
+ data.forEach((item) => {
459
+ controller.enqueue(item);
460
+ });
461
+
462
+ controller.close();
463
+ },
464
+ });
465
+
466
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
467
+
468
+ const decoder = new TextDecoder();
469
+ const chunks = [];
470
+
471
+ // @ts-ignore
472
+ for await (const chunk of protocolStream) {
473
+ chunks.push(decoder.decode(chunk, { stream: true }));
474
+ }
475
+
476
+ expect(chunks).toEqual(
477
+ [
478
+ 'id: chat_1',
479
+ 'event: reasoning',
480
+ 'data: "**Understanding the Conditional Logic**\\n\\n"\n',
481
+
482
+ 'id: chat_1',
483
+ 'event: reasoning',
484
+ `data: "**Finalizing Interpretation**\\n\\n"\n`,
485
+
486
+ 'id: chat_1',
487
+ 'event: text',
488
+ `data: "简单来说,"\n`,
489
+
490
+ 'id: chat_1',
491
+ 'event: text',
492
+ `data: "文本内容。"\n`,
493
+ // stop
494
+ 'id: chat_1',
495
+ 'event: stop',
496
+ `data: "STOP"\n`,
497
+ // usage
498
+ 'id: chat_1',
499
+ 'event: usage',
500
+ `data: {"inputTextTokens":38,"outputReasoningTokens":304,"outputTextTokens":19,"totalInputTokens":38,"totalOutputTokens":323,"totalTokens":361}\n`,
501
+ ].map((i) => i + '\n'),
502
+ );
503
+ });
504
+
505
+ it('should return undefined data without text', async () => {
506
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
507
+
508
+ const data = [
509
+ {
510
+ candidates: [
511
+ {
512
+ content: { parts: [{ text: '234' }], role: 'model' },
513
+ safetyRatings: [
514
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
515
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
516
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
517
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
518
+ ],
519
+ },
520
+ ],
521
+ text: '234',
522
+ usageMetadata: {
523
+ promptTokenCount: 19,
524
+ candidatesTokenCount: 3,
525
+ totalTokenCount: 122,
526
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
527
+ thoughtsTokenCount: 100,
528
+ },
529
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
530
+ },
531
+ {
532
+ text: '',
533
+ candidates: [
534
+ {
535
+ content: { parts: [{ text: '' }], role: 'model' },
536
+ safetyRatings: [
537
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
538
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
539
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
540
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
541
+ ],
542
+ },
543
+ ],
544
+ usageMetadata: {
545
+ promptTokenCount: 19,
546
+ candidatesTokenCount: 3,
547
+ totalTokenCount: 122,
548
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
549
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 3 }],
550
+ thoughtsTokenCount: 100,
551
+ },
552
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
553
+ },
554
+ {
555
+ text: '567890\n',
556
+ candidates: [
557
+ {
558
+ content: { parts: [{ text: '567890\n' }], role: 'model' },
559
+ finishReason: 'STOP',
560
+ safetyRatings: [
561
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
562
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
563
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
564
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
565
+ ],
566
+ },
567
+ ],
568
+ usageMetadata: {
569
+ promptTokenCount: 19,
570
+ candidatesTokenCount: 11,
571
+ totalTokenCount: 131,
572
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
573
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
574
+ thoughtsTokenCount: 100,
575
+ },
576
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
577
+ },
578
+ ];
579
+
580
+ const mockGoogleStream = new ReadableStream({
581
+ start(controller) {
582
+ data.forEach((item) => {
583
+ controller.enqueue(item);
584
+ });
585
+
586
+ controller.close();
587
+ },
588
+ });
589
+
590
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
591
+
592
+ const decoder = new TextDecoder();
593
+ const chunks = [];
594
+
595
+ // @ts-ignore
596
+ for await (const chunk of protocolStream) {
597
+ chunks.push(decoder.decode(chunk, { stream: true }));
598
+ }
599
+
600
+ expect(chunks).toEqual(
601
+ [
602
+ 'id: chat_1',
603
+ 'event: text',
604
+ 'data: "234"\n',
605
+
606
+ 'id: chat_1',
607
+ 'event: text',
608
+ 'data: ""\n',
609
+
610
+ 'id: chat_1',
611
+ 'event: text',
612
+ `data: "567890\\n"\n`,
613
+ // stop
614
+ 'id: chat_1',
615
+ 'event: stop',
616
+ `data: "STOP"\n`,
617
+ // usage
618
+ 'id: chat_1',
619
+ 'event: usage',
620
+ `data: {"inputTextTokens":19,"outputReasoningTokens":100,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":111,"totalTokens":131}\n`,
621
+ ].map((i) => i + '\n'),
622
+ );
623
+ });
624
+
625
+ it('should handle groundingMetadata', async () => {
626
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
627
+
628
+ const data = [
629
+ {
630
+ text: '123',
631
+ candidates: [
632
+ {
633
+ content: {
634
+ parts: [
635
+ {
636
+ text: '123',
637
+ },
638
+ ],
639
+ role: 'model',
640
+ },
641
+ index: 0,
642
+ groundingMetadata: {},
643
+ },
644
+ ],
645
+ usageMetadata: {
646
+ promptTokenCount: 9,
647
+ candidatesTokenCount: 18,
648
+ totalTokenCount: 27,
649
+ promptTokensDetails: [
650
+ {
651
+ modality: 'TEXT',
652
+ tokenCount: 9,
653
+ },
654
+ ],
655
+ },
656
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
657
+ },
658
+ {
659
+ text: '45678',
660
+ candidates: [
661
+ {
662
+ content: {
663
+ parts: [
664
+ {
665
+ text: '45678',
666
+ },
667
+ ],
668
+ role: 'model',
669
+ },
670
+ finishReason: 'STOP',
671
+ index: 0,
672
+ groundingMetadata: {
673
+ searchEntryPoint: {
674
+ renderedContent: 'content\n',
675
+ },
676
+ groundingChunks: [
677
+ {
678
+ web: {
679
+ uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545',
680
+ title: 'npmjs.com',
681
+ },
682
+ },
683
+ {
684
+ web: {
685
+ uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334',
686
+ title: 'google.dev',
687
+ },
688
+ },
689
+ ],
690
+ groundingSupports: [
691
+ {
692
+ segment: {
693
+ startIndex: 63,
694
+ endIndex: 67,
695
+ text: '1。',
696
+ },
697
+ groundingChunkIndices: [0],
698
+ confidenceScores: [1],
699
+ },
700
+ {
701
+ segment: {
702
+ startIndex: 69,
703
+ endIndex: 187,
704
+ text: 'SDK。',
705
+ },
706
+ groundingChunkIndices: [1],
707
+ confidenceScores: [1],
708
+ },
709
+ ],
710
+ webSearchQueries: ['sdk latest version'],
711
+ },
712
+ },
713
+ ],
714
+ usageMetadata: {
715
+ promptTokenCount: 9,
716
+ candidatesTokenCount: 122,
717
+ totalTokenCount: 131,
718
+ promptTokensDetails: [
719
+ {
720
+ modality: 'TEXT',
721
+ tokenCount: 9,
722
+ },
723
+ ],
724
+ },
725
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
726
+ },
727
+ ];
728
+
729
+ const mockGoogleStream = new ReadableStream({
730
+ start(controller) {
731
+ data.forEach((item) => {
732
+ controller.enqueue(item);
733
+ });
734
+
735
+ controller.close();
736
+ },
737
+ });
738
+
739
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
740
+
741
+ const decoder = new TextDecoder();
742
+ const chunks = [];
743
+
744
+ // @ts-ignore
745
+ for await (const chunk of protocolStream) {
746
+ chunks.push(decoder.decode(chunk, { stream: true }));
747
+ }
748
+
749
+ expect(chunks).toEqual(
750
+ [
751
+ 'id: chat_1',
752
+ 'event: text',
753
+ 'data: "123"\n',
754
+
755
+ 'id: chat_1',
756
+ 'event: grounding',
757
+ 'data: {}\n',
758
+
759
+ 'id: chat_1',
760
+ 'event: text',
761
+ 'data: "45678"\n',
762
+
763
+ 'id: chat_1',
764
+ 'event: grounding',
765
+ `data: {\"citations\":[{\"favicon\":\"npmjs.com\",\"title\":\"npmjs.com\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545\"},{\"favicon\":\"google.dev\",\"title\":\"google.dev\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334\"}],\"searchQueries\":[\"sdk latest version\"]}\n`,
766
+ // stop
767
+ 'id: chat_1',
768
+ 'event: stop',
769
+ `data: "STOP"\n`,
770
+ // usage
771
+ 'id: chat_1',
772
+ 'event: usage',
773
+ `data: {"inputTextTokens":9,"outputTextTokens":122,"totalInputTokens":9,"totalOutputTokens":122,"totalTokens":131}\n`,
774
+ ].map((i) => i + '\n'),
775
+ );
776
+ });
378
777
  });