@ai-sdk/google 2.0.0-alpha.9 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,9 +1,6 @@
1
1
  // src/google-provider.ts
2
2
  import {
3
- NoSuchModelError
4
- } from "@ai-sdk/provider";
5
- import {
6
- generateId,
3
+ generateId as generateId2,
7
4
  loadApiKey,
8
5
  withoutTrailingSlash
9
6
  } from "@ai-sdk/provider-utils";
@@ -19,11 +16,11 @@ import {
19
16
  postJsonToApi,
20
17
  resolve
21
18
  } from "@ai-sdk/provider-utils";
22
- import { z as z3 } from "zod";
19
+ import { z as z3 } from "zod/v4";
23
20
 
24
21
  // src/google-error.ts
25
22
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
26
- import { z } from "zod";
23
+ import { z } from "zod/v4";
27
24
  var googleErrorDataSchema = z.object({
28
25
  error: z.object({
29
26
  code: z.number().nullable(),
@@ -37,7 +34,7 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
37
34
  });
38
35
 
39
36
  // src/google-generative-ai-embedding-options.ts
40
- import { z as z2 } from "zod";
37
+ import { z as z2 } from "zod/v4";
41
38
  var googleGenerativeAIEmbeddingProviderOptions = z2.object({
42
39
  /**
43
40
  * Optional. Optional reduced dimension for the output embedding.
@@ -141,11 +138,12 @@ import {
141
138
  combineHeaders as combineHeaders2,
142
139
  createEventSourceResponseHandler,
143
140
  createJsonResponseHandler as createJsonResponseHandler2,
141
+ generateId,
144
142
  parseProviderOptions as parseProviderOptions2,
145
143
  postJsonToApi as postJsonToApi2,
146
144
  resolve as resolve2
147
145
  } from "@ai-sdk/provider-utils";
148
- import { z as z5 } from "zod";
146
+ import { z as z7 } from "zod/v4";
149
147
 
150
148
  // src/convert-json-schema-to-openapi-schema.ts
151
149
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
@@ -241,20 +239,20 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
241
239
  return result;
242
240
  }
243
241
  function isEmptyObjectSchema(jsonSchema) {
244
- return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
242
+ return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
245
243
  }
246
244
 
247
245
  // src/convert-to-google-generative-ai-messages.ts
248
246
  import {
249
247
  UnsupportedFunctionalityError
250
248
  } from "@ai-sdk/provider";
251
- import {
252
- convertToBase64
253
- } from "@ai-sdk/provider-utils";
254
- function convertToGoogleGenerativeAIMessages(prompt) {
249
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
250
+ function convertToGoogleGenerativeAIMessages(prompt, options) {
251
+ var _a;
255
252
  const systemInstructionParts = [];
256
253
  const contents = [];
257
254
  let systemMessagesAllowed = true;
255
+ const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
258
256
  for (const { role, content } of prompt) {
259
257
  switch (role) {
260
258
  case "system": {
@@ -328,7 +326,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
328
326
  return {
329
327
  functionCall: {
330
328
  name: part.toolName,
331
- args: part.args
329
+ args: part.input
332
330
  }
333
331
  };
334
332
  }
@@ -346,7 +344,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
346
344
  name: part.toolName,
347
345
  response: {
348
346
  name: part.toolName,
349
- content: part.result
347
+ content: part.output.value
350
348
  }
351
349
  }
352
350
  }))
@@ -355,8 +353,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
355
353
  }
356
354
  }
357
355
  }
356
+ if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
357
+ const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
358
+ contents[0].parts.unshift({ text: systemText + "\n\n" });
359
+ }
358
360
  return {
359
- systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
361
+ systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
360
362
  contents
361
363
  };
362
364
  }
@@ -367,22 +369,12 @@ function getModelPath(modelId) {
367
369
  }
368
370
 
369
371
  // src/google-generative-ai-options.ts
370
- import { z as z4 } from "zod";
371
- var dynamicRetrievalConfig = z4.object({
372
- /**
373
- * The mode of the predictor to be used in dynamic retrieval.
374
- */
375
- mode: z4.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
376
- /**
377
- * The threshold to be used in dynamic retrieval. If not set, a system default
378
- * value is used.
379
- */
380
- dynamicThreshold: z4.number().optional()
381
- });
372
+ import { z as z4 } from "zod/v4";
382
373
  var googleGenerativeAIProviderOptions = z4.object({
383
374
  responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).optional(),
384
375
  thinkingConfig: z4.object({
385
- thinkingBudget: z4.number().optional()
376
+ thinkingBudget: z4.number().optional(),
377
+ includeThoughts: z4.boolean().optional()
386
378
  }).optional(),
387
379
  /**
388
380
  Optional.
@@ -435,21 +427,7 @@ var googleGenerativeAIProviderOptions = z4.object({
435
427
  *
436
428
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
437
429
  */
438
- audioTimestamp: z4.boolean().optional(),
439
- /**
440
- Optional. When enabled, the model will use Google search to ground the response.
441
-
442
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
443
- */
444
- useSearchGrounding: z4.boolean().optional(),
445
- /**
446
- Optional. Specifies the dynamic retrieval configuration.
447
-
448
- @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
449
-
450
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
451
- */
452
- dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
430
+ audioTimestamp: z4.boolean().optional()
453
431
  });
454
432
 
455
433
  // src/google-prepare-tools.ts
@@ -459,8 +437,6 @@ import {
459
437
  function prepareTools({
460
438
  tools,
461
439
  toolChoice,
462
- useSearchGrounding,
463
- dynamicRetrievalConfig: dynamicRetrievalConfig2,
464
440
  modelId
465
441
  }) {
466
442
  var _a;
@@ -468,28 +444,76 @@ function prepareTools({
468
444
  const toolWarnings = [];
469
445
  const isGemini2 = modelId.includes("gemini-2");
470
446
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
471
- if (useSearchGrounding) {
447
+ if (tools == null) {
448
+ return { tools: void 0, toolConfig: void 0, toolWarnings };
449
+ }
450
+ const hasFunctionTools = tools.some((tool) => tool.type === "function");
451
+ const hasProviderDefinedTools = tools.some(
452
+ (tool) => tool.type === "provider-defined"
453
+ );
454
+ if (hasFunctionTools && hasProviderDefinedTools) {
455
+ toolWarnings.push({
456
+ type: "unsupported-tool",
457
+ tool: tools.find((tool) => tool.type === "function"),
458
+ details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
459
+ });
460
+ }
461
+ if (hasProviderDefinedTools) {
462
+ const googleTools2 = {};
463
+ const providerDefinedTools = tools.filter(
464
+ (tool) => tool.type === "provider-defined"
465
+ );
466
+ providerDefinedTools.forEach((tool) => {
467
+ switch (tool.id) {
468
+ case "google.google_search":
469
+ if (isGemini2) {
470
+ googleTools2.googleSearch = {};
471
+ } else if (supportsDynamicRetrieval) {
472
+ googleTools2.googleSearchRetrieval = {
473
+ dynamicRetrievalConfig: {
474
+ mode: tool.args.mode,
475
+ dynamicThreshold: tool.args.dynamicThreshold
476
+ }
477
+ };
478
+ } else {
479
+ googleTools2.googleSearchRetrieval = {};
480
+ }
481
+ break;
482
+ case "google.url_context":
483
+ if (isGemini2) {
484
+ googleTools2.urlContext = {};
485
+ } else {
486
+ toolWarnings.push({
487
+ type: "unsupported-tool",
488
+ tool,
489
+ details: "The URL context tool is not supported with other Gemini models than Gemini 2."
490
+ });
491
+ }
492
+ break;
493
+ default:
494
+ toolWarnings.push({ type: "unsupported-tool", tool });
495
+ break;
496
+ }
497
+ });
472
498
  return {
473
- tools: isGemini2 ? { googleSearch: {} } : {
474
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
475
- },
499
+ tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
476
500
  toolConfig: void 0,
477
501
  toolWarnings
478
502
  };
479
503
  }
480
- if (tools == null) {
481
- return { tools: void 0, toolConfig: void 0, toolWarnings };
482
- }
483
504
  const functionDeclarations = [];
484
505
  for (const tool of tools) {
485
- if (tool.type === "provider-defined") {
486
- toolWarnings.push({ type: "unsupported-tool", tool });
487
- } else {
488
- functionDeclarations.push({
489
- name: tool.name,
490
- description: (_a = tool.description) != null ? _a : "",
491
- parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
492
- });
506
+ switch (tool.type) {
507
+ case "function":
508
+ functionDeclarations.push({
509
+ name: tool.name,
510
+ description: (_a = tool.description) != null ? _a : "",
511
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
512
+ });
513
+ break;
514
+ default:
515
+ toolWarnings.push({ type: "unsupported-tool", tool });
516
+ break;
493
517
  }
494
518
  }
495
519
  if (toolChoice == null) {
@@ -566,12 +590,72 @@ function mapGoogleGenerativeAIFinishReason({
566
590
  }
567
591
  }
568
592
 
593
+ // src/tool/google-search.ts
594
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
595
+ import { z as z5 } from "zod/v4";
596
+ var groundingChunkSchema = z5.object({
597
+ web: z5.object({ uri: z5.string(), title: z5.string() }).nullish(),
598
+ retrievedContext: z5.object({ uri: z5.string(), title: z5.string() }).nullish()
599
+ });
600
+ var groundingMetadataSchema = z5.object({
601
+ webSearchQueries: z5.array(z5.string()).nullish(),
602
+ retrievalQueries: z5.array(z5.string()).nullish(),
603
+ searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
604
+ groundingChunks: z5.array(groundingChunkSchema).nullish(),
605
+ groundingSupports: z5.array(
606
+ z5.object({
607
+ segment: z5.object({
608
+ startIndex: z5.number().nullish(),
609
+ endIndex: z5.number().nullish(),
610
+ text: z5.string().nullish()
611
+ }),
612
+ segment_text: z5.string().nullish(),
613
+ groundingChunkIndices: z5.array(z5.number()).nullish(),
614
+ supportChunkIndices: z5.array(z5.number()).nullish(),
615
+ confidenceScores: z5.array(z5.number()).nullish(),
616
+ confidenceScore: z5.array(z5.number()).nullish()
617
+ })
618
+ ).nullish(),
619
+ retrievalMetadata: z5.union([
620
+ z5.object({
621
+ webDynamicRetrievalScore: z5.number()
622
+ }),
623
+ z5.object({})
624
+ ]).nullish()
625
+ });
626
+ var googleSearch = createProviderDefinedToolFactory({
627
+ id: "google.google_search",
628
+ name: "google_search",
629
+ inputSchema: z5.object({
630
+ mode: z5.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
631
+ dynamicThreshold: z5.number().default(1)
632
+ })
633
+ });
634
+
635
+ // src/tool/url-context.ts
636
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
637
+ import { z as z6 } from "zod/v4";
638
+ var urlMetadataSchema = z6.object({
639
+ retrievedUrl: z6.string(),
640
+ urlRetrievalStatus: z6.string()
641
+ });
642
+ var urlContextMetadataSchema = z6.object({
643
+ urlMetadata: z6.array(urlMetadataSchema)
644
+ });
645
+ var urlContext = createProviderDefinedToolFactory2({
646
+ id: "google.url_context",
647
+ name: "url_context",
648
+ inputSchema: z6.object({})
649
+ });
650
+
569
651
  // src/google-generative-ai-language-model.ts
570
652
  var GoogleGenerativeAILanguageModel = class {
571
653
  constructor(modelId, config) {
572
654
  this.specificationVersion = "v2";
655
+ var _a;
573
656
  this.modelId = modelId;
574
657
  this.config = config;
658
+ this.generateId = (_a = config.generateId) != null ? _a : generateId;
575
659
  }
576
660
  get provider() {
577
661
  return this.config.provider;
@@ -602,16 +686,24 @@ var GoogleGenerativeAILanguageModel = class {
602
686
  providerOptions,
603
687
  schema: googleGenerativeAIProviderOptions
604
688
  });
605
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
689
+ if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
690
+ warnings.push({
691
+ type: "other",
692
+ message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
693
+ });
694
+ }
695
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
696
+ const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
697
+ prompt,
698
+ { isGemmaModel }
699
+ );
606
700
  const {
607
- tools: googleTools,
701
+ tools: googleTools2,
608
702
  toolConfig: googleToolConfig,
609
703
  toolWarnings
610
704
  } = prepareTools({
611
705
  tools,
612
706
  toolChoice,
613
- useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
614
- dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
615
707
  modelId: this.modelId
616
708
  });
617
709
  return {
@@ -640,9 +732,9 @@ var GoogleGenerativeAILanguageModel = class {
640
732
  thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
641
733
  },
642
734
  contents,
643
- systemInstruction,
735
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
644
736
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
645
- tools: googleTools,
737
+ tools: googleTools2,
646
738
  toolConfig: googleToolConfig,
647
739
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
648
740
  },
@@ -650,7 +742,7 @@ var GoogleGenerativeAILanguageModel = class {
650
742
  };
651
743
  }
652
744
  async doGenerate(options) {
653
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
745
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
654
746
  const { args, warnings } = await this.getArgs(options);
655
747
  const body = JSON.stringify(args);
656
748
  const mergedHeaders = combineHeaders2(
@@ -675,16 +767,20 @@ var GoogleGenerativeAILanguageModel = class {
675
767
  const candidate = response.candidates[0];
676
768
  const content = [];
677
769
  const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
770
+ const usageMetadata = response.usageMetadata;
678
771
  for (const part of parts) {
679
- if ("text" in part && part.text.length > 0) {
680
- content.push({ type: "text", text: part.text });
772
+ if ("text" in part && part.text != null && part.text.length > 0) {
773
+ if (part.thought === true) {
774
+ content.push({ type: "reasoning", text: part.text });
775
+ } else {
776
+ content.push({ type: "text", text: part.text });
777
+ }
681
778
  } else if ("functionCall" in part) {
682
779
  content.push({
683
780
  type: "tool-call",
684
- toolCallType: "function",
685
781
  toolCallId: this.config.generateId(),
686
782
  toolName: part.functionCall.name,
687
- args: JSON.stringify(part.functionCall.args)
783
+ input: JSON.stringify(part.functionCall.args)
688
784
  });
689
785
  } else if ("inlineData" in part) {
690
786
  content.push({
@@ -701,7 +797,6 @@ var GoogleGenerativeAILanguageModel = class {
701
797
  for (const source of sources) {
702
798
  content.push(source);
703
799
  }
704
- const usageMetadata = response.usageMetadata;
705
800
  return {
706
801
  content,
707
802
  finishReason: mapGoogleGenerativeAIFinishReason({
@@ -719,7 +814,9 @@ var GoogleGenerativeAILanguageModel = class {
719
814
  providerMetadata: {
720
815
  google: {
721
816
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
722
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
817
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
818
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
819
+ usageMetadata: usageMetadata != null ? usageMetadata : null
723
820
  }
724
821
  },
725
822
  request: { body },
@@ -755,8 +852,12 @@ var GoogleGenerativeAILanguageModel = class {
755
852
  totalTokens: void 0
756
853
  };
757
854
  let providerMetadata = void 0;
758
- const generateId2 = this.config.generateId;
855
+ const generateId3 = this.config.generateId;
759
856
  let hasToolCalls = false;
857
+ let currentTextBlockId = null;
858
+ let currentReasoningBlockId = null;
859
+ let blockCounter = 0;
860
+ const emittedSourceUrls = /* @__PURE__ */ new Set();
760
861
  return {
761
862
  stream: response.pipeThrough(
762
863
  new TransformStream({
@@ -764,7 +865,10 @@ var GoogleGenerativeAILanguageModel = class {
764
865
  controller.enqueue({ type: "stream-start", warnings });
765
866
  },
766
867
  transform(chunk, controller) {
767
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
868
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
869
+ if (options.includeRawChunks) {
870
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
871
+ }
768
872
  if (!chunk.success) {
769
873
  controller.enqueue({ type: "error", error: chunk.error });
770
874
  return;
@@ -783,10 +887,64 @@ var GoogleGenerativeAILanguageModel = class {
783
887
  return;
784
888
  }
785
889
  const content = candidate.content;
890
+ const sources = extractSources({
891
+ groundingMetadata: candidate.groundingMetadata,
892
+ generateId: generateId3
893
+ });
894
+ if (sources != null) {
895
+ for (const source of sources) {
896
+ if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
897
+ emittedSourceUrls.add(source.url);
898
+ controller.enqueue(source);
899
+ }
900
+ }
901
+ }
786
902
  if (content != null) {
787
- const deltaText = getTextFromParts(content.parts);
788
- if (deltaText != null) {
789
- controller.enqueue(deltaText);
903
+ const parts = (_g = content.parts) != null ? _g : [];
904
+ for (const part of parts) {
905
+ if ("text" in part && part.text != null && part.text.length > 0) {
906
+ if (part.thought === true) {
907
+ if (currentTextBlockId !== null) {
908
+ controller.enqueue({
909
+ type: "text-end",
910
+ id: currentTextBlockId
911
+ });
912
+ currentTextBlockId = null;
913
+ }
914
+ if (currentReasoningBlockId === null) {
915
+ currentReasoningBlockId = String(blockCounter++);
916
+ controller.enqueue({
917
+ type: "reasoning-start",
918
+ id: currentReasoningBlockId
919
+ });
920
+ }
921
+ controller.enqueue({
922
+ type: "reasoning-delta",
923
+ id: currentReasoningBlockId,
924
+ delta: part.text
925
+ });
926
+ } else {
927
+ if (currentReasoningBlockId !== null) {
928
+ controller.enqueue({
929
+ type: "reasoning-end",
930
+ id: currentReasoningBlockId
931
+ });
932
+ currentReasoningBlockId = null;
933
+ }
934
+ if (currentTextBlockId === null) {
935
+ currentTextBlockId = String(blockCounter++);
936
+ controller.enqueue({
937
+ type: "text-start",
938
+ id: currentTextBlockId
939
+ });
940
+ }
941
+ controller.enqueue({
942
+ type: "text-delta",
943
+ id: currentTextBlockId,
944
+ delta: part.text
945
+ });
946
+ }
947
+ }
790
948
  }
791
949
  const inlineDataParts = getInlineDataParts(content.parts);
792
950
  if (inlineDataParts != null) {
@@ -800,23 +958,29 @@ var GoogleGenerativeAILanguageModel = class {
800
958
  }
801
959
  const toolCallDeltas = getToolCallsFromParts({
802
960
  parts: content.parts,
803
- generateId: generateId2
961
+ generateId: generateId3
804
962
  });
805
963
  if (toolCallDeltas != null) {
806
964
  for (const toolCall of toolCallDeltas) {
807
965
  controller.enqueue({
808
- type: "tool-call-delta",
809
- toolCallType: "function",
810
- toolCallId: toolCall.toolCallId,
811
- toolName: toolCall.toolName,
812
- argsTextDelta: toolCall.args
966
+ type: "tool-input-start",
967
+ id: toolCall.toolCallId,
968
+ toolName: toolCall.toolName
969
+ });
970
+ controller.enqueue({
971
+ type: "tool-input-delta",
972
+ id: toolCall.toolCallId,
973
+ delta: toolCall.args
974
+ });
975
+ controller.enqueue({
976
+ type: "tool-input-end",
977
+ id: toolCall.toolCallId
813
978
  });
814
979
  controller.enqueue({
815
980
  type: "tool-call",
816
- toolCallType: "function",
817
981
  toolCallId: toolCall.toolCallId,
818
982
  toolName: toolCall.toolName,
819
- args: toolCall.args
983
+ input: toolCall.args
820
984
  });
821
985
  hasToolCalls = true;
822
986
  }
@@ -827,22 +991,31 @@ var GoogleGenerativeAILanguageModel = class {
827
991
  finishReason: candidate.finishReason,
828
992
  hasToolCalls
829
993
  });
830
- const sources = (_g = extractSources({
831
- groundingMetadata: candidate.groundingMetadata,
832
- generateId: generateId2
833
- })) != null ? _g : [];
834
- for (const source of sources) {
835
- controller.enqueue(source);
836
- }
837
994
  providerMetadata = {
838
995
  google: {
839
996
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
840
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
997
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
998
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
841
999
  }
842
1000
  };
1001
+ if (usageMetadata != null) {
1002
+ providerMetadata.google.usageMetadata = usageMetadata;
1003
+ }
843
1004
  }
844
1005
  },
845
1006
  flush(controller) {
1007
+ if (currentTextBlockId !== null) {
1008
+ controller.enqueue({
1009
+ type: "text-end",
1010
+ id: currentTextBlockId
1011
+ });
1012
+ }
1013
+ if (currentReasoningBlockId !== null) {
1014
+ controller.enqueue({
1015
+ type: "reasoning-end",
1016
+ id: currentReasoningBlockId
1017
+ });
1018
+ }
846
1019
  controller.enqueue({
847
1020
  type: "finish",
848
1021
  finishReason,
@@ -859,26 +1032,18 @@ var GoogleGenerativeAILanguageModel = class {
859
1032
  };
860
1033
  function getToolCallsFromParts({
861
1034
  parts,
862
- generateId: generateId2
1035
+ generateId: generateId3
863
1036
  }) {
864
1037
  const functionCallParts = parts == null ? void 0 : parts.filter(
865
1038
  (part) => "functionCall" in part
866
1039
  );
867
1040
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
868
1041
  type: "tool-call",
869
- toolCallType: "function",
870
- toolCallId: generateId2(),
1042
+ toolCallId: generateId3(),
871
1043
  toolName: part.functionCall.name,
872
1044
  args: JSON.stringify(part.functionCall.args)
873
1045
  }));
874
1046
  }
875
- function getTextFromParts(parts) {
876
- const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
877
- return textParts == null || textParts.length === 0 ? void 0 : {
878
- type: "text",
879
- text: textParts.map((part) => part.text).join("")
880
- };
881
- }
882
1047
  function getInlineDataParts(parts) {
883
1048
  return parts == null ? void 0 : parts.filter(
884
1049
  (part) => "inlineData" in part
@@ -886,7 +1051,7 @@ function getInlineDataParts(parts) {
886
1051
  }
887
1052
  function extractSources({
888
1053
  groundingMetadata,
889
- generateId: generateId2
1054
+ generateId: generateId3
890
1055
  }) {
891
1056
  var _a;
892
1057
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
@@ -894,101 +1059,196 @@ function extractSources({
894
1059
  ).map((chunk) => ({
895
1060
  type: "source",
896
1061
  sourceType: "url",
897
- id: generateId2(),
1062
+ id: generateId3(),
898
1063
  url: chunk.web.uri,
899
1064
  title: chunk.web.title
900
1065
  }));
901
1066
  }
902
- var contentSchema = z5.object({
903
- role: z5.string(),
904
- parts: z5.array(
905
- z5.union([
906
- z5.object({
907
- text: z5.string()
908
- }),
909
- z5.object({
910
- functionCall: z5.object({
911
- name: z5.string(),
912
- args: z5.unknown()
1067
+ var contentSchema = z7.object({
1068
+ parts: z7.array(
1069
+ z7.union([
1070
+ // note: order matters since text can be fully empty
1071
+ z7.object({
1072
+ functionCall: z7.object({
1073
+ name: z7.string(),
1074
+ args: z7.unknown()
913
1075
  })
914
1076
  }),
915
- z5.object({
916
- inlineData: z5.object({
917
- mimeType: z5.string(),
918
- data: z5.string()
1077
+ z7.object({
1078
+ inlineData: z7.object({
1079
+ mimeType: z7.string(),
1080
+ data: z7.string()
919
1081
  })
1082
+ }),
1083
+ z7.object({
1084
+ text: z7.string().nullish(),
1085
+ thought: z7.boolean().nullish()
920
1086
  })
921
1087
  ])
922
1088
  ).nullish()
923
1089
  });
924
- var groundingChunkSchema = z5.object({
925
- web: z5.object({ uri: z5.string(), title: z5.string() }).nullish(),
926
- retrievedContext: z5.object({ uri: z5.string(), title: z5.string() }).nullish()
927
- });
928
- var groundingMetadataSchema = z5.object({
929
- webSearchQueries: z5.array(z5.string()).nullish(),
930
- retrievalQueries: z5.array(z5.string()).nullish(),
931
- searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
932
- groundingChunks: z5.array(groundingChunkSchema).nullish(),
933
- groundingSupports: z5.array(
934
- z5.object({
935
- segment: z5.object({
936
- startIndex: z5.number().nullish(),
937
- endIndex: z5.number().nullish(),
938
- text: z5.string().nullish()
939
- }),
940
- segment_text: z5.string().nullish(),
941
- groundingChunkIndices: z5.array(z5.number()).nullish(),
942
- supportChunkIndices: z5.array(z5.number()).nullish(),
943
- confidenceScores: z5.array(z5.number()).nullish(),
944
- confidenceScore: z5.array(z5.number()).nullish()
945
- })
946
- ).nullish(),
947
- retrievalMetadata: z5.union([
948
- z5.object({
949
- webDynamicRetrievalScore: z5.number()
950
- }),
951
- z5.object({})
952
- ]).nullish()
953
- });
954
- var safetyRatingSchema = z5.object({
955
- category: z5.string().nullish(),
956
- probability: z5.string().nullish(),
957
- probabilityScore: z5.number().nullish(),
958
- severity: z5.string().nullish(),
959
- severityScore: z5.number().nullish(),
960
- blocked: z5.boolean().nullish()
1090
+ var safetyRatingSchema = z7.object({
1091
+ category: z7.string().nullish(),
1092
+ probability: z7.string().nullish(),
1093
+ probabilityScore: z7.number().nullish(),
1094
+ severity: z7.string().nullish(),
1095
+ severityScore: z7.number().nullish(),
1096
+ blocked: z7.boolean().nullish()
961
1097
  });
962
- var usageSchema = z5.object({
963
- cachedContentTokenCount: z5.number().nullish(),
964
- thoughtsTokenCount: z5.number().nullish(),
965
- promptTokenCount: z5.number().nullish(),
966
- candidatesTokenCount: z5.number().nullish(),
967
- totalTokenCount: z5.number().nullish()
1098
+ var usageSchema = z7.object({
1099
+ cachedContentTokenCount: z7.number().nullish(),
1100
+ thoughtsTokenCount: z7.number().nullish(),
1101
+ promptTokenCount: z7.number().nullish(),
1102
+ candidatesTokenCount: z7.number().nullish(),
1103
+ totalTokenCount: z7.number().nullish()
968
1104
  });
969
- var responseSchema = z5.object({
970
- candidates: z5.array(
971
- z5.object({
972
- content: contentSchema.nullish().or(z5.object({}).strict()),
973
- finishReason: z5.string().nullish(),
974
- safetyRatings: z5.array(safetyRatingSchema).nullish(),
975
- groundingMetadata: groundingMetadataSchema.nullish()
1105
+ var responseSchema = z7.object({
1106
+ candidates: z7.array(
1107
+ z7.object({
1108
+ content: contentSchema.nullish().or(z7.object({}).strict()),
1109
+ finishReason: z7.string().nullish(),
1110
+ safetyRatings: z7.array(safetyRatingSchema).nullish(),
1111
+ groundingMetadata: groundingMetadataSchema.nullish(),
1112
+ urlContextMetadata: urlContextMetadataSchema.nullish()
976
1113
  })
977
1114
  ),
978
1115
  usageMetadata: usageSchema.nullish()
979
1116
  });
980
- var chunkSchema = z5.object({
981
- candidates: z5.array(
982
- z5.object({
1117
+ var chunkSchema = z7.object({
1118
+ candidates: z7.array(
1119
+ z7.object({
983
1120
  content: contentSchema.nullish(),
984
- finishReason: z5.string().nullish(),
985
- safetyRatings: z5.array(safetyRatingSchema).nullish(),
986
- groundingMetadata: groundingMetadataSchema.nullish()
1121
+ finishReason: z7.string().nullish(),
1122
+ safetyRatings: z7.array(safetyRatingSchema).nullish(),
1123
+ groundingMetadata: groundingMetadataSchema.nullish(),
1124
+ urlContextMetadata: urlContextMetadataSchema.nullish()
987
1125
  })
988
1126
  ).nullish(),
989
1127
  usageMetadata: usageSchema.nullish()
990
1128
  });
991
1129
 
1130
+ // src/google-tools.ts
1131
+ var googleTools = {
1132
+ /**
1133
+ * Creates a Google search tool that gives Google direct access to real-time web content.
1134
+ * Must have name "google_search".
1135
+ */
1136
+ googleSearch,
1137
+ /**
1138
+ * Creates a URL context tool that gives Google direct access to real-time web content.
1139
+ * Must have name "url_context".
1140
+ */
1141
+ urlContext
1142
+ };
1143
+
1144
+ // src/google-generative-ai-image-model.ts
1145
+ import {
1146
+ combineHeaders as combineHeaders3,
1147
+ createJsonResponseHandler as createJsonResponseHandler3,
1148
+ parseProviderOptions as parseProviderOptions3,
1149
+ postJsonToApi as postJsonToApi3,
1150
+ resolve as resolve3
1151
+ } from "@ai-sdk/provider-utils";
1152
+ import { z as z8 } from "zod/v4";
1153
+ var GoogleGenerativeAIImageModel = class {
1154
+ constructor(modelId, settings, config) {
1155
+ this.modelId = modelId;
1156
+ this.settings = settings;
1157
+ this.config = config;
1158
+ this.specificationVersion = "v2";
1159
+ }
1160
+ get maxImagesPerCall() {
1161
+ var _a;
1162
+ return (_a = this.settings.maxImagesPerCall) != null ? _a : 4;
1163
+ }
1164
+ get provider() {
1165
+ return this.config.provider;
1166
+ }
1167
+ async doGenerate(options) {
1168
+ var _a, _b, _c;
1169
+ const {
1170
+ prompt,
1171
+ n = 1,
1172
+ size = "1024x1024",
1173
+ aspectRatio = "1:1",
1174
+ seed,
1175
+ providerOptions,
1176
+ headers,
1177
+ abortSignal
1178
+ } = options;
1179
+ const warnings = [];
1180
+ if (size != null) {
1181
+ warnings.push({
1182
+ type: "unsupported-setting",
1183
+ setting: "size",
1184
+ details: "This model does not support the `size` option. Use `aspectRatio` instead."
1185
+ });
1186
+ }
1187
+ if (seed != null) {
1188
+ warnings.push({
1189
+ type: "unsupported-setting",
1190
+ setting: "seed",
1191
+ details: "This model does not support the `seed` option through this provider."
1192
+ });
1193
+ }
1194
+ const googleOptions = await parseProviderOptions3({
1195
+ provider: "google",
1196
+ providerOptions,
1197
+ schema: googleImageProviderOptionsSchema
1198
+ });
1199
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1200
+ const parameters = {
1201
+ sampleCount: n
1202
+ };
1203
+ if (aspectRatio != null) {
1204
+ parameters.aspectRatio = aspectRatio;
1205
+ }
1206
+ if (googleOptions) {
1207
+ Object.assign(parameters, googleOptions);
1208
+ }
1209
+ const body = {
1210
+ instances: [{ prompt }],
1211
+ parameters
1212
+ };
1213
+ const { responseHeaders, value: response } = await postJsonToApi3({
1214
+ url: `${this.config.baseURL}/models/${this.modelId}:predict`,
1215
+ headers: combineHeaders3(await resolve3(this.config.headers), headers),
1216
+ body,
1217
+ failedResponseHandler: googleFailedResponseHandler,
1218
+ successfulResponseHandler: createJsonResponseHandler3(
1219
+ googleImageResponseSchema
1220
+ ),
1221
+ abortSignal,
1222
+ fetch: this.config.fetch
1223
+ });
1224
+ return {
1225
+ images: response.predictions.map(
1226
+ (p) => p.bytesBase64Encoded
1227
+ ),
1228
+ warnings: warnings != null ? warnings : [],
1229
+ providerMetadata: {
1230
+ google: {
1231
+ images: response.predictions.map((prediction) => ({
1232
+ // Add any prediction-specific metadata here
1233
+ }))
1234
+ }
1235
+ },
1236
+ response: {
1237
+ timestamp: currentDate,
1238
+ modelId: this.modelId,
1239
+ headers: responseHeaders
1240
+ }
1241
+ };
1242
+ }
1243
+ };
1244
+ var googleImageResponseSchema = z8.object({
1245
+ predictions: z8.array(z8.object({ bytesBase64Encoded: z8.string() })).default([])
1246
+ });
1247
+ var googleImageProviderOptionsSchema = z8.object({
1248
+ personGeneration: z8.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
1249
+ aspectRatio: z8.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
1250
+ });
1251
+
992
1252
  // src/google-provider.ts
993
1253
  function createGoogleGenerativeAI(options = {}) {
994
1254
  var _a;
@@ -1007,7 +1267,7 @@ function createGoogleGenerativeAI(options = {}) {
1007
1267
  provider: "google.generative-ai",
1008
1268
  baseURL,
1009
1269
  headers: getHeaders,
1010
- generateId: (_a2 = options.generateId) != null ? _a2 : generateId,
1270
+ generateId: (_a2 = options.generateId) != null ? _a2 : generateId2,
1011
1271
  supportedUrls: () => ({
1012
1272
  "*": [
1013
1273
  // Only allow requests to the Google Generative Language "files" endpoint
@@ -1024,6 +1284,12 @@ function createGoogleGenerativeAI(options = {}) {
1024
1284
  headers: getHeaders,
1025
1285
  fetch: options.fetch
1026
1286
  });
1287
+ const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
1288
+ provider: "google.generative-ai",
1289
+ baseURL,
1290
+ headers: getHeaders,
1291
+ fetch: options.fetch
1292
+ });
1027
1293
  const provider = function(modelId) {
1028
1294
  if (new.target) {
1029
1295
  throw new Error(
@@ -1038,9 +1304,9 @@ function createGoogleGenerativeAI(options = {}) {
1038
1304
  provider.embedding = createEmbeddingModel;
1039
1305
  provider.textEmbedding = createEmbeddingModel;
1040
1306
  provider.textEmbeddingModel = createEmbeddingModel;
1041
- provider.imageModel = (modelId) => {
1042
- throw new NoSuchModelError({ modelId, modelType: "imageModel" });
1043
- };
1307
+ provider.image = createImageModel;
1308
+ provider.imageModel = createImageModel;
1309
+ provider.tools = googleTools;
1044
1310
  return provider;
1045
1311
  }
1046
1312
  var google = createGoogleGenerativeAI();